summaryrefslogtreecommitdiffstats
path: root/js
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:14:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:14:29 +0000
commitfbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8 (patch)
tree4c1ccaf5486d4f2009f9a338a98a83e886e29c97 /js
parentReleasing progress-linux version 124.0.1-1~progress7.99u1. (diff)
downloadfirefox-fbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8.tar.xz
firefox-fbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8.zip
Merging upstream version 125.0.1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js')
-rw-r--r--js/app.mozbuild5
-rw-r--r--js/loader/ModuleLoadRequest.cpp15
-rw-r--r--js/loader/ModuleLoadRequest.h2
-rw-r--r--js/loader/ModuleLoaderBase.cpp10
-rw-r--r--js/moz.configure109
-rw-r--r--js/public/ContextOptions.h15
-rw-r--r--js/public/GCAPI.h36
-rw-r--r--js/public/HeapAPI.h28
-rw-r--r--js/public/MemoryMetrics.h1
-rw-r--r--js/public/Modules.h10
-rw-r--r--js/public/ProfilingFrameIterator.h22
-rw-r--r--js/public/RegExpFlags.h43
-rw-r--r--js/public/RootingAPI.h8
-rw-r--r--js/public/Stack.h6
-rw-r--r--js/public/UbiNodeCensus.h6
-rw-r--r--js/public/Value.h16
-rw-r--r--js/public/WasmFeatures.h124
-rw-r--r--js/public/experimental/CompileScript.h58
-rw-r--r--js/public/experimental/JSStencil.h4
-rw-r--r--js/public/friend/ErrorNumbers.msg3
-rw-r--r--js/public/friend/UsageStatistics.h2
-rw-r--r--js/src/aclocal.m41
-rw-r--r--js/src/builtin/.eslintrc.js8
-rw-r--r--js/src/builtin/DataViewObject.cpp141
-rw-r--r--js/src/builtin/DataViewObject.h43
-rw-r--r--js/src/builtin/ModuleObject.cpp163
-rw-r--r--js/src/builtin/ModuleObject.h31
-rw-r--r--js/src/builtin/ReflectParse.cpp3
-rw-r--r--js/src/builtin/String.cpp9
-rw-r--r--js/src/builtin/TestingFunctions.cpp146
-rw-r--r--js/src/builtin/intl/NumberFormat.js91
-rw-r--r--js/src/builtin/intl/PluralRules.js42
-rwxr-xr-xjs/src/builtin/intl/make_intl_data.py8
-rw-r--r--js/src/builtin/temporal/PlainDate.cpp3
-rw-r--r--js/src/builtin/temporal/PlainDateTime.cpp3
-rw-r--r--js/src/builtin/temporal/PlainMonthDay.cpp3
-rw-r--r--js/src/builtin/temporal/PlainTime.cpp3
-rw-r--r--js/src/builtin/temporal/PlainYearMonth.cpp3
-rw-r--r--js/src/builtin/temporal/ZonedDateTime.cpp3
-rw-r--r--js/src/debugger/Object.cpp52
-rw-r--r--js/src/debugger/Object.h2
-rwxr-xr-xjs/src/devtools/automation/autospider.py65
-rw-r--r--js/src/devtools/automation/variants/pbl-debug7
-rw-r--r--js/src/devtools/gc-ubench/harness.js11
-rw-r--r--js/src/devtools/gc-ubench/index.html70
-rw-r--r--js/src/devtools/gc-ubench/ui.js180
-rw-r--r--js/src/doc/Debugger/Debugger.Memory.md15
-rw-r--r--js/src/doc/Debugger/Debugger.Object.md9
-rw-r--r--js/src/frontend/BytecodeEmitter.cpp255
-rw-r--r--js/src/frontend/CompileScript.cpp81
-rw-r--r--js/src/frontend/FoldConstants.cpp1
-rw-r--r--js/src/frontend/FullParseHandler.h18
-rw-r--r--js/src/frontend/NameFunctions.cpp1
-rw-r--r--js/src/frontend/ParseContext.cpp69
-rw-r--r--js/src/frontend/ParseContext.h11
-rw-r--r--js/src/frontend/ParseNode.h28
-rw-r--r--js/src/frontend/Parser.cpp36
-rw-r--r--js/src/frontend/SharedContext.cpp3
-rw-r--r--js/src/frontend/SharedContext.h9
-rw-r--r--js/src/frontend/Stencil.cpp12
-rw-r--r--js/src/frontend/SyntaxParseHandler.h25
-rw-r--r--js/src/frontend/UsedNameTracker.h4
-rwxr-xr-xjs/src/frontend/align_stack_comment.py2
-rw-r--r--js/src/fuzz-tests/testWasm.cpp11
-rw-r--r--js/src/gc/AllocKind.h5
-rw-r--r--js/src/gc/GC.cpp80
-rw-r--r--js/src/gc/GC.h12
-rw-r--r--js/src/gc/GCAPI.cpp10
-rw-r--r--js/src/gc/GCEnum.h1
-rw-r--r--js/src/gc/GCMarker.h7
-rw-r--r--js/src/gc/GCRuntime.h13
-rw-r--r--js/src/gc/Marking.cpp32
-rw-r--r--js/src/gc/MaybeRooted.h4
-rw-r--r--js/src/gc/Nursery.cpp52
-rw-r--r--js/src/gc/Nursery.h16
-rw-r--r--js/src/gc/Scheduling.h31
-rw-r--r--js/src/gc/StableCellHasher-inl.h2
-rw-r--r--js/src/gc/Statistics.cpp6
-rw-r--r--js/src/gc/Statistics.h18
-rw-r--r--js/src/gc/Sweeping.cpp8
-rw-r--r--js/src/gc/Tenuring.cpp77
-rw-r--r--js/src/gc/Tenuring.h4
-rw-r--r--js/src/gc/Zone.cpp12
-rw-r--r--js/src/gc/Zone.h16
-rw-r--r--js/src/gdb/mozilla/prettyprinters.py2
-rw-r--r--js/src/gdb/run-tests.py4
-rw-r--r--js/src/gdb/tests/test-ExecutableAllocator.py4
-rw-r--r--js/src/intgemm/IntegerGemmIntrinsic.cpp14
-rw-r--r--js/src/intgemm/moz.build6
-rw-r--r--js/src/irregexp/RegExpAPI.cpp4
-rw-r--r--js/src/irregexp/RegExpNativeMacroAssembler.cpp4
-rw-r--r--js/src/irregexp/RegExpShim.cpp6
-rw-r--r--js/src/irregexp/RegExpShim.h82
-rw-r--r--js/src/irregexp/RegExpTypes.h18
-rw-r--r--js/src/irregexp/imported/gen-regexp-special-case.cc48
-rw-r--r--js/src/irregexp/imported/regexp-ast.cc23
-rw-r--r--js/src/irregexp/imported/regexp-ast.h42
-rw-r--r--js/src/irregexp/imported/regexp-bytecode-generator.cc2
-rw-r--r--js/src/irregexp/imported/regexp-bytecode-peephole.cc4
-rw-r--r--js/src/irregexp/imported/regexp-compiler-tonode.cc99
-rw-r--r--js/src/irregexp/imported/regexp-compiler.cc43
-rw-r--r--js/src/irregexp/imported/regexp-compiler.h8
-rw-r--r--js/src/irregexp/imported/regexp-dotprinter.cc4
-rw-r--r--js/src/irregexp/imported/regexp-interpreter.cc84
-rw-r--r--js/src/irregexp/imported/regexp-interpreter.h13
-rw-r--r--js/src/irregexp/imported/regexp-macro-assembler.cc70
-rw-r--r--js/src/irregexp/imported/regexp-macro-assembler.h23
-rw-r--r--js/src/irregexp/imported/regexp-nodes.h18
-rw-r--r--js/src/irregexp/imported/regexp-parser.cc393
-rw-r--r--js/src/irregexp/imported/regexp.h12
-rw-r--r--js/src/irregexp/imported/special-case.cc23
-rw-r--r--js/src/irregexp/imported/special-case.h10
-rw-r--r--js/src/irregexp/moz.build8
-rw-r--r--js/src/irregexp/moz.yaml4
-rw-r--r--js/src/jit-test/etc/wasm/generate-spectests/Cargo.lock15
-rw-r--r--js/src/jit-test/etc/wasm/generate-spectests/config.toml18
-rw-r--r--js/src/jit-test/etc/wasm/generate-spectests/wast2js/Cargo.toml2
-rw-r--r--js/src/jit-test/etc/wasm/spec-tests.patch154
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-gen.js71
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M.wasmbin0 -> 11804 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M1.wasmbin0 -> 11804 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-r1M-t1.wasmbin0 -> 19647 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-r1M1-t1.wasmbin0 -> 19647 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K.wasmbin0 -> 11815 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K1.wasmbin0 -> 11815 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-s10K.wasmbin0 -> 258 bytes
-rw-r--r--js/src/jit-test/lib/gen/wasm-gc-limits-s10K1.wasmbin0 -> 258 bytes
-rw-r--r--js/src/jit-test/lib/prologue.js35
-rw-r--r--js/src/jit-test/lib/wasm-binary.js48
-rw-r--r--js/src/jit-test/lib/wasm.js25
-rw-r--r--js/src/jit-test/tests/Set/bug1729269.js2
-rw-r--r--js/src/jit-test/tests/arguments/1883837.js10
-rw-r--r--js/src/jit-test/tests/arguments/argumentsNaming.js3
-rw-r--r--js/src/jit-test/tests/arrays/from-async-oom.js2
-rw-r--r--js/src/jit-test/tests/asm.js/bug1219954.js2
-rw-r--r--js/src/jit-test/tests/asm.js/bug1385428.js2
-rw-r--r--js/src/jit-test/tests/asm.js/bug1421565.js2
-rw-r--r--js/src/jit-test/tests/asm.js/oom-helper-thread-plus-validation-error.js2
-rw-r--r--js/src/jit-test/tests/asm.js/oom-helper-thread.js2
-rw-r--r--js/src/jit-test/tests/asm.js/testBug1255954.js2
-rw-r--r--js/src/jit-test/tests/atomics/basic-tests.js20
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1263558.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1263865.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1263879.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1264823.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1268034.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1269074.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1375446.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1462341.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1466626-1.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1466626-2.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1466626-3.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1466626-4.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1562102.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1652148.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1652153.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1670378.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1791401.js2
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1798883.js4
-rw-r--r--js/src/jit-test/tests/auto-regress/bug1879688.js2
-rw-r--r--js/src/jit-test/tests/baseline/bug1209585.js2
-rw-r--r--js/src/jit-test/tests/baseline/bug1344334.js2
-rw-r--r--js/src/jit-test/tests/baseline/bug1491337.js2
-rw-r--r--js/src/jit-test/tests/baseline/bug1491350.js2
-rw-r--r--js/src/jit-test/tests/basic/bug-1198090.js2
-rw-r--r--js/src/jit-test/tests/basic/bug-1271507.js2
-rw-r--r--js/src/jit-test/tests/basic/bug-1665583.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1207863.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-1.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-2.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-3.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-4.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-5.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-6.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-7.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1219128-8.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1234414.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1240502.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1263868.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1264954.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1265693.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1278839.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1296249.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1300904.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1316557.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1344265.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1348407.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1411294.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1447996.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1459258.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1493627.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1516406.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1532265.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1548759-1.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1548759-2.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1574725.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1644839-2.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1644839.js1
-rw-r--r--js/src/jit-test/tests/basic/bug1666856.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1877586.js2
-rw-r--r--js/src/jit-test/tests/basic/bug1883828.js5
-rw-r--r--js/src/jit-test/tests/basic/bug1884706.js5
-rw-r--r--js/src/jit-test/tests/basic/date-getLocale-oom.js2
-rw-r--r--js/src/jit-test/tests/basic/date-late-weekday-warning.js34
-rw-r--r--js/src/jit-test/tests/basic/dictionary-add-prop-oom.js1
-rw-r--r--js/src/jit-test/tests/basic/dumpValue.js4
-rw-r--r--js/src/jit-test/tests/basic/inflate-oom.js1
-rw-r--r--js/src/jit-test/tests/basic/property-error-message-fix-disabled.js2
-rw-r--r--js/src/jit-test/tests/basic/property-error-message-fix.js2
-rw-r--r--js/src/jit-test/tests/basic/string-substring-latin1rope-with-twobyte-children.js12
-rw-r--r--js/src/jit-test/tests/basic/testBug756919.js1
-rw-r--r--js/src/jit-test/tests/basic/testDetach.js (renamed from js/src/jit-test/tests/basic/testNeutering.js)0
-rw-r--r--js/src/jit-test/tests/basic/testNativeArgsRooting.js1
-rw-r--r--js/src/jit-test/tests/bug1636306.js2
-rw-r--r--js/src/jit-test/tests/bug1681258.js2
-rw-r--r--js/src/jit-test/tests/bug1787730.js2
-rw-r--r--js/src/jit-test/tests/bug1878098-serialization-log-oom.js8
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-bytelength-with-sab.js29
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-bytelength.js43
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-byteoffset-sab.js43
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-byteoffset.js67
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-get-elem-with-sab.js48
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-get-elem.js48
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-set-elem-with-sab.js47
-rw-r--r--js/src/jit-test/tests/dataview/resizable-dataview-set-elem.js47
-rw-r--r--js/src/jit-test/tests/debug/Debugger-findScripts-26.js1
-rw-r--r--js/src/jit-test/tests/debug/Memory-drainAllocationsLog-18.js2
-rw-r--r--js/src/jit-test/tests/debug/Memory-takeCensus-06.js26
-rw-r--r--js/src/jit-test/tests/debug/Object-getPromiseReactions-07.js14
-rw-r--r--js/src/jit-test/tests/debug/Object-isSameNativeWithJitInfo.js32
-rw-r--r--js/src/jit-test/tests/debug/breakpoint-oom-01.js2
-rw-r--r--js/src/jit-test/tests/debug/bug-1238610.js2
-rw-r--r--js/src/jit-test/tests/debug/bug-1248162.js2
-rw-r--r--js/src/jit-test/tests/debug/bug-1260725.js2
-rw-r--r--js/src/jit-test/tests/debug/bug-1565275.js2
-rw-r--r--js/src/jit-test/tests/debug/bug-1576862-2.js1
-rw-r--r--js/src/jit-test/tests/debug/bug-1584195.js1
-rw-r--r--js/src/jit-test/tests/debug/bug1216261.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1219905.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1240546.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1240803.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1242111.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1245862.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1251919.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1254123.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1254190.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1254578.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1264961.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1272908.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1370905.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1404710.js1
-rw-r--r--js/src/jit-test/tests/debug/bug1434391.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1647309.js2
-rw-r--r--js/src/jit-test/tests/debug/bug1878511.js1
-rw-r--r--js/src/jit-test/tests/debug/job-queue-04.js1
-rw-r--r--js/src/jit-test/tests/debug/wasm-14.js2
-rw-r--r--js/src/jit-test/tests/debug/wasm-15.js2
-rw-r--r--js/src/jit-test/tests/fields/private-proxy-oom.js3
-rw-r--r--js/src/jit-test/tests/fuses/with.js9
-rw-r--r--js/src/jit-test/tests/gc/bug-1108007.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1155455.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1161968.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1165966.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1171909.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1175755.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1191576.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1206677.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1208994.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1209001.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1210607.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1214006.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1214781.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1214846.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1215363-1.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1215363-2.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1215363-3.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1216607.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1221359.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1221747.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1223021.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1224710.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1226896.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1231386.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1232386.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1234410.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1236473.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1238555.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1238575-2.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1238575.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1238582.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1240503.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1240527.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1241731.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1242812.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1245520.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1252329.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1253124.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1259306.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1261329.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1263862.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1263871.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1263884.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1271110.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1280588.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1282986.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1287399.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1287869.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1292564.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1298356.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1303015.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1305220.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1310589.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1315946.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1325551.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1340010.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1384047.js3
-rw-r--r--js/src/jit-test/tests/gc/bug-1401141.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1411302.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1435295.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1449887.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1456536.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1462337.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1472734.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1490042.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1530643.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1556155.js1
-rw-r--r--js/src/jit-test/tests/gc/bug-1568119.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1574877.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1648901.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1654186.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1657554.js1
-rw-r--r--js/src/jit-test/tests/gc/bug-1660293.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1689039.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1692221.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1791975.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1802478.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1804629.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1865597.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1870925.js14
-rw-r--r--js/src/jit-test/tests/gc/bug-1871186.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1877406.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1880870.js6
-rw-r--r--js/src/jit-test/tests/gc/bug-1881417.js12
-rw-r--r--js/src/jit-test/tests/gc/bug-1884746.js7
-rw-r--r--js/src/jit-test/tests/gc/bug-978802.js2
-rw-r--r--js/src/jit-test/tests/gc/bug1246607.js2
-rw-r--r--js/src/jit-test/tests/gc/bug1326343-gcstats.js2
-rw-r--r--js/src/jit-test/tests/gc/bug1337324.js1
-rw-r--r--js/src/jit-test/tests/gc/bug1471949.js2
-rw-r--r--js/src/jit-test/tests/gc/bug1704451.js2
-rw-r--r--js/src/jit-test/tests/gc/finalizationRegistry-oom1.js2
-rw-r--r--js/src/jit-test/tests/gc/finalizationRegistry-oom2.js1
-rw-r--r--js/src/jit-test/tests/gc/finalizationRegistry-oom3.js1
-rw-r--r--js/src/jit-test/tests/gc/finalizationRegistry-oom4.js1
-rw-r--r--js/src/jit-test/tests/gc/gcparam.js7
-rw-r--r--js/src/jit-test/tests/gc/incremental-compacting.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInArrayProtoTest.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInByteSize.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInDebugger.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInDtoa.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInExceptionHandlerBailout.js1
-rw-r--r--js/src/jit-test/tests/gc/oomInFindPath.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInFormatStackDump.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInGetJumpLabelForBranch.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInNewGlobal.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInOffTheadCompile.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInOffTheadCompile2.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInOffTheadCompile3.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInParseAsmJS.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInParseFunction.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInRegExp.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInRegExp2.js2
-rw-r--r--js/src/jit-test/tests/gc/oomInWeakMap.js2
-rw-r--r--js/src/jit-test/tests/generators/bug1501722.js2
-rw-r--r--js/src/jit-test/tests/ion/array-push-multiple-frozen.js2
-rw-r--r--js/src/jit-test/tests/ion/array-push-multiple-with-funapply.js2
-rw-r--r--js/src/jit-test/tests/ion/array-push-multiple.js2
-rw-r--r--js/src/jit-test/tests/ion/bailout-oom-01.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1207413.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1216157.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1233331.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1240521.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1269756.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1284491.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1394505.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1479394.js1
-rw-r--r--js/src/jit-test/tests/ion/bug1492574.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1568397.js2
-rw-r--r--js/src/jit-test/tests/ion/bug1877357.js17
-rw-r--r--js/src/jit-test/tests/ion/bug1877709.js22
-rw-r--r--js/src/jit-test/tests/ion/dce-with-rinstructions.js22
-rw-r--r--js/src/jit-test/tests/ion/scalar-replacement-oom.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1219044.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1402535.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1402649.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1420420-3.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1435327.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1771090.js2
-rw-r--r--js/src/jit-test/tests/modules/bug-1802479.js2
-rw-r--r--js/src/jit-test/tests/modules/bug1670236.js1
-rw-r--r--js/src/jit-test/tests/modules/bug1685992.js4
-rw-r--r--js/src/jit-test/tests/modules/bug1846247.js2
-rw-r--r--js/src/jit-test/tests/modules/dynamic-import-oom.js2
-rw-r--r--js/src/jit-test/tests/modules/eval-module-oom.js2
-rw-r--r--js/src/jit-test/tests/modules/import-meta-oom.js2
-rw-r--r--js/src/jit-test/tests/modules/offthread-oom.js2
-rw-r--r--js/src/jit-test/tests/parser/bug-1263355-44.js2
-rw-r--r--js/src/jit-test/tests/parser/bug-1324773-2.js2
-rw-r--r--js/src/jit-test/tests/parser/bug-1324773.js2
-rw-r--r--js/src/jit-test/tests/parser/bug-1433014.js2
-rw-r--r--js/src/jit-test/tests/parser/bug-1576865-1.js1
-rw-r--r--js/src/jit-test/tests/parser/bug-1576865-2.js1
-rw-r--r--js/src/jit-test/tests/parser/bug-1662260.js2
-rw-r--r--js/src/jit-test/tests/parser/bug-1764737.js2
-rw-r--r--js/src/jit-test/tests/parser/bug1461034.js1
-rw-r--r--js/src/jit-test/tests/parser/bug1547655.js2
-rw-r--r--js/src/jit-test/tests/parser/bug1661454.js2
-rw-r--r--js/src/jit-test/tests/parser/bug1764715.js1
-rw-r--r--js/src/jit-test/tests/parser/bug1835785.js2
-rw-r--r--js/src/jit-test/tests/parser/compile-script.js2
-rw-r--r--js/src/jit-test/tests/parser/off_thread_compile_oom.js2
-rw-r--r--js/src/jit-test/tests/parser/warning-oom.js2
-rw-r--r--js/src/jit-test/tests/profiler/bug1211962.js2
-rw-r--r--js/src/jit-test/tests/profiler/bug1231925.js2
-rw-r--r--js/src/jit-test/tests/profiler/bug1242840.js2
-rw-r--r--js/src/jit-test/tests/profiler/bug1563889.js1
-rw-r--r--js/src/jit-test/tests/promise/unhandled-rejections-oom.js2
-rw-r--r--js/src/jit-test/tests/regexp/CheckRegExpSyntax.js2
-rw-r--r--js/src/jit-test/tests/regexp/bug-1845715.js1
-rw-r--r--js/src/jit-test/tests/regexp/bug1640475.js2
-rw-r--r--js/src/jit-test/tests/regexp/bug1640479.js2
-rw-r--r--js/src/jit-test/tests/regexp/bug1794317.js2
-rw-r--r--js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js2
-rw-r--r--js/src/jit-test/tests/saved-stacks/oom-in-save-stack-02.js2
-rw-r--r--js/src/jit-test/tests/saved-stacks/oom-in-save-stack.js2
-rw-r--r--js/src/jit-test/tests/self-hosting/oom-delazify.js2
-rw-r--r--js/src/jit-test/tests/self-hosting/oom-toplevel.js2
-rw-r--r--js/src/jit-test/tests/self-test/oom-test-bug1497906.js2
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength-with-non-growable-write.js102
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength.js67
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength-with-non-growable-write.js103
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength.js68
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength-with-non-growable-write.js103
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength.js67
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length-with-non-growable-write.js103
-rw-r--r--js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length.js67
-rw-r--r--js/src/jit-test/tests/stream/bug-1513266.js2
-rw-r--r--js/src/jit-test/tests/stream/bug-1515816.js2
-rw-r--r--js/src/jit-test/tests/typedarray/construct-with-growable-sharedarraybuffer.js81
-rw-r--r--js/src/jit-test/tests/typedarray/construct-with-resizable-arraybuffer.js102
-rw-r--r--js/src/jit-test/tests/typedarray/ensure-non-inline.js12
-rw-r--r--js/src/jit-test/tests/typedarray/growable-sharedarraybuffer-bytelength.js14
-rw-r--r--js/src/jit-test/tests/typedarray/indexed-integer-exotics.js8
-rw-r--r--js/src/jit-test/tests/typedarray/oom-allocating-arraybuffer-contents.js2
-rw-r--r--js/src/jit-test/tests/typedarray/oom-allocating-copying-same-buffer-contents.js2
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-arraybuffer-bytelength.js20
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-buffer-inlined-data-moved.js53
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength-with-sab.js29
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength.js41
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset-sab.js43
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset.js57
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem-with-sab.js49
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem.js49
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem-with-sab.js36
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem.js36
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-byteOffset.js73
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLength.js75
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLengthZeroOnOutOfBounds.js75
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-length-with-sab.js29
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-length.js41
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem-with-sab.js54
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem.js54
-rw-r--r--js/src/jit-test/tests/warp/bug1665303.js2
-rw-r--r--js/src/jit-test/tests/warp/bug1667685.js2
-rw-r--r--js/src/jit-test/tests/warp/bug1668197.js1
-rw-r--r--js/src/jit-test/tests/warp/bug1871089.js13
-rw-r--r--js/src/jit-test/tests/warp/trial-inline-gc-4.js42
-rw-r--r--js/src/jit-test/tests/wasm/binary.js8
-rw-r--r--js/src/jit-test/tests/wasm/bug1858423.js2
-rw-r--r--js/src/jit-test/tests/wasm/builtin-modules/integer-gemm/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/builtin-modules/js-string/basic.js158
-rw-r--r--js/src/jit-test/tests/wasm/builtin-modules/js-string/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/builtin-modules/oom-test.js2
-rw-r--r--js/src/jit-test/tests/wasm/directiveless/bug1877358.js2
-rw-r--r--js/src/jit-test/tests/wasm/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/bug-1751699.js2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/bug-1788213.js2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/bug-1791361.js2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/oom-construct-message.js2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/oom-create-exception-data.js2
-rw-r--r--js/src/jit-test/tests/wasm/exceptions/unreachable.js2
-rw-r--r--js/src/jit-test/tests/wasm/exnref/bug1883865.js25
-rw-r--r--js/src/jit-test/tests/wasm/exnref/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/extended-const/basic.js2
-rw-r--r--js/src/jit-test/tests/wasm/extended-const/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/extended-const/disabled.js22
-rw-r--r--js/src/jit-test/tests/wasm/extended-const/pathological.js2
-rw-r--r--js/src/jit-test/tests/wasm/features.js24
-rw-r--r--js/src/jit-test/tests/wasm/function-references/as-non-null.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/binary.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/br-non-null.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/br-null.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/call_ref.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/disabled.js4
-rw-r--r--js/src/jit-test/tests/wasm/function-references/nnl-test.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/non-nullable-table.js38
-rw-r--r--js/src/jit-test/tests/wasm/function-references/non-nullable.js2
-rw-r--r--js/src/jit-test/tests/wasm/function-references/reftype-parse.js32
-rw-r--r--js/src/jit-test/tests/wasm/gc/arrays.js143
-rw-r--r--js/src/jit-test/tests/wasm/gc/binary.js16
-rw-r--r--js/src/jit-test/tests/wasm/gc/bug-1843295.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/bug-1845436.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/bug-1854007.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/bug-1879096.js65
-rw-r--r--js/src/jit-test/tests/wasm/gc/call-indirect-subtyping.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/gc/disabled.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/ion-and-baseline.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits.js69
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/array-new-fixed.js9
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/load-mod.js5
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/rec-groups-1.js6
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/rec-groups-2.js6
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/struct-fields.js11
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/subtyping-depth.js13
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/types-1.js6
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/types-2.js6
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/types-3.js6
-rw-r--r--js/src/jit-test/tests/wasm/gc/limits/types-4.js6
-rw-r--r--js/src/jit-test/tests/wasm/gc/ref.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/regress-1754701.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/regress-1884767.js13
-rw-r--r--js/src/jit-test/tests/wasm/gc/structs.js40
-rw-r--r--js/src/jit-test/tests/wasm/globals.js117
-rw-r--r--js/src/jit-test/tests/wasm/import-export.js7
-rw-r--r--js/src/jit-test/tests/wasm/memory-control/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/memory-control/memory-discard.js2
-rw-r--r--js/src/jit-test/tests/wasm/memory64/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/multi-memory/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/multi-value/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/oom/breakpoints.js2
-rw-r--r--js/src/jit-test/tests/wasm/oom/exports.js2
-rw-r--r--js/src/jit-test/tests/wasm/oom/jsapi-prototype.js2
-rw-r--r--js/src/jit-test/tests/wasm/ref-types/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1708124.js1
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1839065.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1839142.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1856733.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1857829.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1858982.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1878673.js13
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1880770.js20
-rw-r--r--js/src/jit-test/tests/wasm/regress/oom-eval.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/oom-init.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/oom-masm-baseline.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/oom-wasm-streaming.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/oom-wasmtexttobinary-block.js2
-rw-r--r--js/src/jit-test/tests/wasm/regress/oom-wrong-argument-number-for-import-call.js2
-rw-r--r--js/src/jit-test/tests/wasm/simd/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/simd/experimental.js36
-rw-r--r--js/src/jit-test/tests/wasm/spec/exception-handling/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/extended-const/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/function-references/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js2
-rw-r--r--js/src/jit-test/tests/wasm/spec/gc/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/memory64/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js24
-rw-r--r--js/src/jit-test/tests/wasm/spec/multi-memory/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js9
-rw-r--r--js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js36
-rw-r--r--js/src/jit-test/tests/wasm/spec/relaxed-simd/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/spec/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/spec/spec/global.wast.js2
-rw-r--r--js/src/jit-test/tests/wasm/spec/spec/memory.wast.js18
-rw-r--r--js/src/jit-test/tests/wasm/spec/tail-call/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/bug1862473.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/bug1865044.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/bug1871605.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/bug1871606.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/bug1871951.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/directives.txt2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/exceptions.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/gc.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/litmus3.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/litmus4.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/litmus8.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/litmus9.js2
-rw-r--r--js/src/jit-test/tests/wasm/tail-calls/return_call_ref.js2
-rw-r--r--js/src/jit-test/tests/wasm/testing/directives.txt1
-rw-r--r--js/src/jit-test/tests/wasm/testing/global-lossless-invoke.js13
-rw-r--r--js/src/jit-test/tests/xdr/bug1390856.js2
-rw-r--r--js/src/jit-test/tests/xdr/bug1427860.js2
-rw-r--r--js/src/jit-test/tests/xdr/incremental-oom.js2
-rw-r--r--js/src/jit-test/tests/xdr/module-oom.js2
-rw-r--r--js/src/jit-test/tests/xdr/stencil-oom.js2
-rw-r--r--js/src/jit/AtomicOp.h22
-rw-r--r--js/src/jit/BaselineCacheIRCompiler.cpp12
-rw-r--r--js/src/jit/CacheIR.cpp539
-rw-r--r--js/src/jit/CacheIR.h10
-rw-r--r--js/src/jit/CacheIRCompiler.cpp710
-rw-r--r--js/src/jit/CacheIRCompiler.h35
-rw-r--r--js/src/jit/CacheIRGenerator.h7
-rw-r--r--js/src/jit/CacheIROps.yaml115
-rw-r--r--js/src/jit/CacheIRReader.h3
-rw-r--r--js/src/jit/CacheIRSpewer.cpp6
-rw-r--r--js/src/jit/CacheIRWriter.h5
-rw-r--r--js/src/jit/CodeGenerator.cpp183
-rw-r--r--js/src/jit/Disassemble.cpp27
-rw-r--r--js/src/jit/ExecutableAllocator.h3
-rw-r--r--js/src/jit/GenerateAtomicOperations.py10
-rw-r--r--js/src/jit/GenerateCacheIRFiles.py8
-rw-r--r--js/src/jit/IonAnalysis.cpp17
-rw-r--r--js/src/jit/IonOptimizationLevels.h4
-rw-r--r--js/src/jit/JitFrames.cpp105
-rw-r--r--js/src/jit/JitFrames.h2
-rw-r--r--js/src/jit/JitOptions.cpp4
-rw-r--r--js/src/jit/JitOptions.h2
-rw-r--r--js/src/jit/JitRuntime.h16
-rw-r--r--js/src/jit/JitScript.cpp11
-rw-r--r--js/src/jit/JitSpewer.cpp3
-rw-r--r--js/src/jit/JitSpewer.h2
-rw-r--r--js/src/jit/JitZone.h3
-rw-r--r--js/src/jit/LIROps.yaml59
-rw-r--r--js/src/jit/Lowering.cpp139
-rw-r--r--js/src/jit/MIR.cpp85
-rw-r--r--js/src/jit/MIR.h213
-rw-r--r--js/src/jit/MIROps.yaml84
-rw-r--r--js/src/jit/MacroAssembler-inl.h63
-rw-r--r--js/src/jit/MacroAssembler.cpp255
-rw-r--r--js/src/jit/MacroAssembler.h247
-rw-r--r--js/src/jit/PcScriptCache.h88
-rw-r--r--js/src/jit/RangeAnalysis.cpp19
-rw-r--r--js/src/jit/Recover.cpp11
-rw-r--r--js/src/jit/Registers.h1
-rw-r--r--js/src/jit/VMFunctions.cpp82
-rw-r--r--js/src/jit/VMFunctions.h43
-rw-r--r--js/src/jit/WarpBuilderShared.cpp9
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.cpp506
-rw-r--r--js/src/jit/arm/CodeGenerator-arm.cpp4
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.cpp160
-rw-r--r--js/src/jit/arm64/CodeGenerator-arm64.cpp4
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.cpp122
-rw-r--r--js/src/jit/loong64/Assembler-loong64.cpp6
-rw-r--r--js/src/jit/loong64/Assembler-loong64.h11
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.cpp4
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64.cpp178
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp4
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp136
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp10
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp42
-rw-r--r--js/src/jit/riscv64/CodeGenerator-riscv64.cpp4
-rw-r--r--js/src/jit/riscv64/MacroAssembler-riscv64.cpp178
-rw-r--r--js/src/jit/shared/Assembler-shared.h4
-rw-r--r--js/src/jit/shared/LIR-shared.h11
-rw-r--r--js/src/jit/wasm32/CodeGenerator-wasm32.cpp1
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp2
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp8
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp45
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp6
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp4
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h3
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp171
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.h9
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp4
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp39
-rw-r--r--js/src/jsapi-tests/testCompileScript.cpp30
-rw-r--r--js/src/jsapi-tests/testFrontendCompileStencil.cpp8
-rw-r--r--js/src/jsapi-tests/testFrontendErrors.cpp6
-rw-r--r--js/src/jsapi-tests/testStencil.cpp2
-rw-r--r--js/src/jsapi.cpp24
-rw-r--r--js/src/jsdate.cpp88
-rw-r--r--js/src/moz.build9
-rw-r--r--js/src/old-configure.in133
-rw-r--r--js/src/shell/ModuleLoader.cpp51
-rw-r--r--js/src/shell/ModuleLoader.h6
-rw-r--r--js/src/shell/ShellModuleObjectWrapper.cpp4
-rw-r--r--js/src/shell/js.cpp338
-rw-r--r--js/src/shell/jsshell.h5
-rw-r--r--js/src/tests/jstests.list13
-rw-r--r--js/src/tests/lib/tasks_adb_remote.py2
-rw-r--r--js/src/tests/non262/Date/dashed-date.js6
-rw-r--r--js/src/tests/non262/Date/parse-keywords.js7
-rw-r--r--js/src/tests/non262/Intl/ListFormat/unit-type.js5
-rw-r--r--js/src/tests/non262/String/make-normalize-generateddata-input.py2
-rw-r--r--js/src/tests/non262/argumentsLengthOpt.js87
-rw-r--r--js/src/tests/non262/extensions/typedarray-set-detach.js (renamed from js/src/tests/non262/extensions/typedarray-set-neutering.js)0
-rw-r--r--js/src/tests/non262/reflect-parse/argumentsReflect.js14
-rw-r--r--js/src/tests/shell/compression.js30
-rwxr-xr-xjs/src/tests/test262-export.py2
-rwxr-xr-xjs/src/tests/test262-update.py1
-rw-r--r--js/src/tests/test262/language/expressions/dynamic-import/import-assertions/2nd-param-assert-enumeration-enumerable.js2
-rw-r--r--js/src/tests/test262/language/expressions/dynamic-import/import-attributes/2nd-param-with-enumeration-enumerable.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-extensibility-array.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-extensibility-object.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-idempotency.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-invalid.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-named-bindings.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-value-array.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-value-boolean.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-value-null.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-value-number.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-value-object.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-value-string.js2
-rw-r--r--js/src/tests/test262/language/import/import-assertions/json-via-namespace.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-extensibility-array.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-extensibility-object.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-idempotency.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-invalid.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-named-bindings.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-value-array.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-value-boolean.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-value-null.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-value-number.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-value-object.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-value-string.js2
-rw-r--r--js/src/tests/test262/language/import/import-attributes/json-via-namespace.js2
-rw-r--r--js/src/util/StructuredSpewer.cpp82
-rw-r--r--js/src/util/StructuredSpewer.h5
-rwxr-xr-xjs/src/util/make_unicode.py2
-rw-r--r--js/src/vm/ArgumentsObject.h26
-rw-r--r--js/src/vm/ArrayBufferObject.cpp30
-rw-r--r--js/src/vm/ArrayBufferObject.h14
-rw-r--r--js/src/vm/ArrayBufferViewObject.cpp175
-rw-r--r--js/src/vm/ArrayBufferViewObject.h112
-rw-r--r--js/src/vm/BigIntType.h1
-rw-r--r--js/src/vm/CharacterEncoding.cpp13
-rw-r--r--js/src/vm/CommonPropertyNames.h1
-rw-r--r--js/src/vm/EnvironmentObject.cpp34
-rw-r--r--js/src/vm/EnvironmentObject.h7
-rw-r--r--js/src/vm/GlobalObject.cpp2
-rw-r--r--js/src/vm/GlobalObject.h13
-rw-r--r--js/src/vm/HelperThreadState.h21
-rw-r--r--js/src/vm/HelperThreads.cpp2
-rw-r--r--js/src/vm/Interpreter.cpp10
-rw-r--r--js/src/vm/Interpreter.h2
-rw-r--r--js/src/vm/Iteration.cpp2
-rw-r--r--js/src/vm/JSContext-inl.h46
-rw-r--r--js/src/vm/JSContext.cpp30
-rw-r--r--js/src/vm/JSContext.h10
-rw-r--r--js/src/vm/JSONParser.cpp3
-rw-r--r--js/src/vm/JSONParser.h2
-rw-r--r--js/src/vm/JSObject.cpp66
-rw-r--r--js/src/vm/JSScript.cpp6
-rw-r--r--js/src/vm/MemoryMetrics.cpp11
-rw-r--r--js/src/vm/Modules.cpp269
-rw-r--r--js/src/vm/Modules.h2
-rw-r--r--js/src/vm/NativeObject.cpp4
-rw-r--r--js/src/vm/Opcodes.h3
-rw-r--r--js/src/vm/PlainObject.cpp54
-rw-r--r--js/src/vm/PlainObject.h5
-rw-r--r--js/src/vm/PortableBaselineInterpret.cpp1053
-rw-r--r--js/src/vm/Realm.cpp10
-rw-r--r--js/src/vm/Realm.h6
-rw-r--r--js/src/vm/RegExpObject.cpp40
-rw-r--r--js/src/vm/RegExpShared.h2
-rw-r--r--js/src/vm/Runtime.h6
-rw-r--r--js/src/vm/Scope.cpp27
-rw-r--r--js/src/vm/Scope.h4
-rw-r--r--js/src/vm/SharedArrayObject.h8
-rw-r--r--js/src/vm/SharedStencil.h44
-rw-r--r--js/src/vm/Stack.cpp15
-rw-r--r--js/src/vm/StringType-inl.h4
-rw-r--r--js/src/vm/StringType.cpp10
-rw-r--r--js/src/vm/StringType.h14
-rw-r--r--js/src/vm/StructuredClone.cpp2
-rw-r--r--js/src/vm/TypedArrayObject.cpp205
-rw-r--r--js/src/vm/TypedArrayObject.h41
-rw-r--r--js/src/vm/UbiNodeCensus.cpp66
-rw-r--r--js/src/vm/Value.cpp7
-rw-r--r--js/src/vm/Watchtower.cpp13
-rw-r--r--js/src/wasm/GenerateBuiltinModules.py93
-rw-r--r--js/src/wasm/WasmBCClass.h8
-rw-r--r--js/src/wasm/WasmBCMemory.cpp8
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp140
-rw-r--r--js/src/wasm/WasmBinary.h47
-rw-r--r--js/src/wasm/WasmBuiltinModule.cpp109
-rw-r--r--js/src/wasm/WasmBuiltinModule.h61
-rw-r--r--js/src/wasm/WasmBuiltinModule.yaml226
-rw-r--r--js/src/wasm/WasmBuiltins.cpp6
-rw-r--r--js/src/wasm/WasmCode.cpp17
-rw-r--r--js/src/wasm/WasmCode.h5
-rw-r--r--js/src/wasm/WasmCodegenTypes.h2
-rw-r--r--js/src/wasm/WasmCompile.cpp51
-rw-r--r--js/src/wasm/WasmCompile.h13
-rw-r--r--js/src/wasm/WasmCompileArgs.h9
-rw-r--r--js/src/wasm/WasmFeatures.cpp22
-rw-r--r--js/src/wasm/WasmFrameIter.cpp13
-rw-r--r--js/src/wasm/WasmFrameIter.h8
-rw-r--r--js/src/wasm/WasmGcObject-inl.h3
-rw-r--r--js/src/wasm/WasmGenerator.cpp2
-rw-r--r--js/src/wasm/WasmInitExpr.cpp19
-rw-r--r--js/src/wasm/WasmInstance.cpp76
-rw-r--r--js/src/wasm/WasmInstance.h15
-rw-r--r--js/src/wasm/WasmIonCompile.cpp342
-rw-r--r--js/src/wasm/WasmIonCompile.h8
-rw-r--r--js/src/wasm/WasmJS.cpp78
-rw-r--r--js/src/wasm/WasmJS.h6
-rw-r--r--js/src/wasm/WasmModule.cpp2
-rw-r--r--js/src/wasm/WasmOpIter.cpp4
-rw-r--r--js/src/wasm/WasmOpIter.h51
-rw-r--r--js/src/wasm/WasmProcess.cpp13
-rw-r--r--js/src/wasm/WasmSerialize.cpp2
-rw-r--r--js/src/wasm/WasmStaticTypeDefs.cpp50
-rw-r--r--js/src/wasm/WasmStaticTypeDefs.h41
-rw-r--r--js/src/wasm/WasmTypeDef.h9
-rw-r--r--js/src/wasm/WasmValType.cpp99
-rw-r--r--js/src/wasm/WasmValType.h4
-rw-r--r--js/src/wasm/WasmValidate.cpp84
-rw-r--r--js/src/wasm/WasmValidate.h10
-rw-r--r--js/src/wasm/WasmValue.cpp2
-rw-r--r--js/src/wasm/WasmValue.h10
-rw-r--r--js/src/wasm/moz.build1
-rw-r--r--js/xpconnect/idl/xpccomponents.idl9
-rw-r--r--js/xpconnect/loader/ChromeScriptLoader.cpp9
-rw-r--r--js/xpconnect/loader/mozJSModuleLoader.cpp50
-rw-r--r--js/xpconnect/loader/mozJSModuleLoader.h9
-rw-r--r--js/xpconnect/loader/nsImportModule.cpp35
-rw-r--r--js/xpconnect/loader/nsImportModule.h112
-rw-r--r--js/xpconnect/src/Sandbox.cpp8
-rw-r--r--js/xpconnect/src/XPCComponents.cpp2
-rw-r--r--js/xpconnect/src/XPCConvert.cpp46
-rw-r--r--js/xpconnect/src/XPCJSContext.cpp5
-rw-r--r--js/xpconnect/src/XPCJSRuntime.cpp35
-rw-r--r--js/xpconnect/src/XPCShellImpl.cpp11
-rw-r--r--js/xpconnect/src/xpcprivate.h3
-rw-r--r--js/xpconnect/tests/browser/browser.toml3
-rw-r--r--js/xpconnect/tests/browser/browser_date_telemetry.js70
-rw-r--r--js/xpconnect/tests/browser/browser_dead_object.js11
-rw-r--r--js/xpconnect/tests/browser/browser_exception_leak.js11
-rw-r--r--js/xpconnect/tests/chrome/test_bug799348.xhtml4
-rw-r--r--js/xpconnect/tests/chrome/test_cows.xhtml2
-rw-r--r--js/xpconnect/tests/chrome/test_windowProxyDeadWrapper.html2
-rw-r--r--js/xpconnect/tests/chrome/test_xrayToJS.xhtml6
-rw-r--r--js/xpconnect/tests/components/native/moz.build1
-rw-r--r--js/xpconnect/tests/components/native/xpctest_module.cpp2
-rw-r--r--js/xpconnect/tests/components/native/xpctest_private.h11
-rw-r--r--js/xpconnect/tests/components/native/xpctest_returncode.cpp20
-rw-r--r--js/xpconnect/tests/idl/xpctest_esmreturncode.idl45
-rw-r--r--js/xpconnect/tests/idl/xpctest_utils.idl23
-rw-r--r--js/xpconnect/tests/unit/ReturnCodeChild.jsm51
-rw-r--r--js/xpconnect/tests/unit/es6module_devtoolsLoader.sys.mjs91
-rw-r--r--js/xpconnect/tests/unit/import_shared_in_worker.js10
-rw-r--r--js/xpconnect/tests/unit/lazy_shared_in_worker.js14
-rw-r--r--js/xpconnect/tests/unit/test_defineESModuleGetters_options.js18
-rw-r--r--js/xpconnect/tests/unit/test_defineESModuleGetters_options_worker.js1
-rw-r--r--js/xpconnect/tests/unit/test_import_devtools_loader.js28
-rw-r--r--js/xpconnect/tests/unit/test_import_global.js8
-rw-r--r--js/xpconnect/tests/unit/test_import_global_worker.js1
-rw-r--r--js/xpconnect/tests/unit/test_returncode.js4
-rw-r--r--js/xpconnect/tests/unit/xpcshell.toml1
852 files changed, 12788 insertions, 6466 deletions
diff --git a/js/app.mozbuild b/js/app.mozbuild
index 292107c05d..10a127ac3b 100644
--- a/js/app.mozbuild
+++ b/js/app.mozbuild
@@ -13,11 +13,6 @@ else:
"/js/src/tests",
]
-if CONFIG["JS_STANDALONE"] and CONFIG["OS_ARCH"] != "WINNT":
- DIRS += [
- "/build/unix",
- ]
-
DIRS += [
"/config/external/fdlibm",
"/config/external/nspr",
diff --git a/js/loader/ModuleLoadRequest.cpp b/js/loader/ModuleLoadRequest.cpp
index d90d41da58..7e188160fc 100644
--- a/js/loader/ModuleLoadRequest.cpp
+++ b/js/loader/ModuleLoadRequest.cpp
@@ -27,6 +27,9 @@ NS_IMPL_CYCLE_COLLECTION_CLASS(ModuleLoadRequest)
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(ModuleLoadRequest,
ScriptLoadRequest)
+ if (tmp->mWaitingParentRequest) {
+ tmp->mWaitingParentRequest->ChildModuleUnlinked();
+ }
NS_IMPL_CYCLE_COLLECTION_UNLINK(mLoader, mRootModule, mModuleScript, mImports,
mWaitingParentRequest,
mDynamicReferencingScript)
@@ -230,6 +233,18 @@ void ModuleLoadRequest::LoadFinished() {
mLoader->OnModuleLoadComplete(request);
}
+void ModuleLoadRequest::ChildModuleUnlinked() {
+ // This module was waiting for a child request, but the child reqeust
+ // got unlinked by CC and will never complete.
+ // It also means this module itself is also in the cycle, and will be
+ // unlinked or has already been unlinked, and will be collected.
+ // There's no need to normally finish the module request.
+ // Just reflect the awaiting imports count, so that the assertion in the
+ // destructor passes.
+ MOZ_ASSERT(mAwaitingImports > 0);
+ mAwaitingImports--;
+}
+
void ModuleLoadRequest::SetDynamicImport(LoadedScript* aReferencingScript,
JS::Handle<JSString*> aSpecifier,
JS::Handle<JSObject*> aPromise) {
diff --git a/js/loader/ModuleLoadRequest.h b/js/loader/ModuleLoadRequest.h
index cb33c532fc..4a2eeadf43 100644
--- a/js/loader/ModuleLoadRequest.h
+++ b/js/loader/ModuleLoadRequest.h
@@ -125,6 +125,8 @@ class ModuleLoadRequest final : public ScriptLoadRequest {
void CancelImports();
void CheckModuleDependenciesLoaded();
+ void ChildModuleUnlinked();
+
void AssertAllImportsFinished() const;
void AssertAllImportsCancelled() const;
diff --git a/js/loader/ModuleLoaderBase.cpp b/js/loader/ModuleLoaderBase.cpp
index 59e77b2d9c..228c96ad69 100644
--- a/js/loader/ModuleLoaderBase.cpp
+++ b/js/loader/ModuleLoaderBase.cpp
@@ -969,7 +969,12 @@ void ModuleLoaderBase::FinishDynamicImport(
LOG(("ScriptLoadRequest (%p): Finish dynamic import %x %d", aRequest,
unsigned(aResult), JS_IsExceptionPending(aCx)));
- MOZ_ASSERT(GetCurrentModuleLoader(aCx) == aRequest->mLoader);
+ MOZ_ASSERT_IF(NS_SUCCEEDED(aResult),
+ GetCurrentModuleLoader(aCx) == aRequest->mLoader);
+ // For failure case, aRequest may have already been unlinked by CC.
+ MOZ_ASSERT_IF(
+ NS_FAILED(aResult),
+ GetCurrentModuleLoader(aCx) == aRequest->mLoader || !aRequest->mLoader);
// If aResult is a failed result, we don't have an EvaluationPromise. If it
// succeeded, evaluationPromise may still be null, but in this case it will
@@ -1057,7 +1062,8 @@ bool ModuleLoaderBase::HasPendingDynamicImports() const {
void ModuleLoaderBase::CancelDynamicImport(ModuleLoadRequest* aRequest,
nsresult aResult) {
- MOZ_ASSERT(aRequest->mLoader == this);
+ // aRequest may have already been unlinked by CC.
+ MOZ_ASSERT(aRequest->mLoader == this || !aRequest->mLoader);
RefPtr<ScriptLoadRequest> req = mDynamicImportRequests.Steal(aRequest);
if (!aRequest->IsCanceled()) {
diff --git a/js/moz.configure b/js/moz.configure
index 760507eee2..cbcaf38f01 100644
--- a/js/moz.configure
+++ b/js/moz.configure
@@ -567,7 +567,7 @@ set_config("JS_MASM_VERBOSE", depends_if("--enable-masm-verbose")(lambda _: True
# FJCVTZS instruction as part of ARMv8.3-JSConv.
@depends(target)
def is_apple_silicon(target):
- return target.os == "OSX" and target.kernel == "Darwin" and target.cpu == "aarch64"
+ return target.kernel == "Darwin" and target.cpu == "aarch64"
option(
@@ -705,29 +705,6 @@ option(
help="Force disable all wasm experimental features for testing.",
)
-# Support for WebAssembly function-references.
-# ===========================
-
-
-option(
- "--disable-wasm-function-references",
- default=True,
- help="{Enable|Disable} WebAssembly function-references",
-)
-
-
-@depends("--disable-wasm-function-references", "--wasm-no-experimental")
-def wasm_function_references(value, no_experimental):
- if no_experimental:
- return
-
- if value:
- return True
-
-
-set_config("ENABLE_WASM_FUNCTION_REFERENCES", wasm_function_references)
-set_define("ENABLE_WASM_FUNCTION_REFERENCES", wasm_function_references)
-
# Support for WebAssembly tail-calls.
# ===========================
@@ -765,28 +742,14 @@ set_define("ENABLE_WASM_TAIL_CALLS", wasm_tail_calls)
# ===========================
-@depends("--disable-wasm-function-references")
-def default_wasm_gc(function_references):
- if function_references:
- return True
-
-
-option(
- "--disable-wasm-gc", default=default_wasm_gc, help="{Enable|Disable} WebAssembly GC"
-)
+option("--disable-wasm-gc", default=True, help="{Enable|Disable} WebAssembly GC")
-@depends(
- "--disable-wasm-gc", "--disable-wasm-function-references", "--wasm-no-experimental"
-)
-def wasm_gc(value, function_references, no_experimental):
- if no_experimental or not value:
+@depends("--disable-wasm-gc")
+def wasm_gc(value):
+ if not value:
return
-
- if function_references:
- return True
-
- die("--disable-wasm-gc only possible with --disable-wasm-function-references")
+ return True
set_config("ENABLE_WASM_GC", wasm_gc)
@@ -796,20 +759,14 @@ set_define("ENABLE_WASM_GC", wasm_gc)
# ==========================================
-@depends(milestone.is_nightly)
-def default_wasm_js_string_builtins(is_nightly):
- if is_nightly:
- return True
-
-
option(
- "--enable-wasm-js-string-builtins",
- default=default_wasm_js_string_builtins,
+ "--disable-wasm-js-string-builtins",
+ default=True,
help="{Enable|Disable} WebAssembly JS String Builtins",
)
-@depends("--enable-wasm-js-string-builtins", "--wasm-no-experimental")
+@depends("--disable-wasm-js-string-builtins", "--wasm-no-experimental")
def wasm_js_string_builtins(value, no_experimental):
if no_experimental or not value:
return
@@ -839,25 +796,6 @@ def enable_shared_memory(value):
set_config("ENABLE_SHARED_MEMORY", enable_shared_memory)
set_define("ENABLE_SHARED_MEMORY", enable_shared_memory)
-# Support for WebAssembly extended constant expressions
-# =====================================================
-
-
-option(
- "--disable-wasm-extended-const",
- help="{Enable|Disable} WebAssembly extended constant expressions",
-)
-
-
-@depends("--disable-wasm-extended-const")
-def wasm_extended_const(value):
- if value:
- return True
-
-
-set_config("ENABLE_WASM_EXTENDED_CONST", wasm_extended_const)
-set_define("ENABLE_WASM_EXTENDED_CONST", wasm_extended_const)
-
# Support for WebAssembly SIMD
# =====================================================
@@ -1128,25 +1066,16 @@ set_define("ENABLE_WASM_MEMORY_CONTROL", wasm_memory_control)
# =====================================
-@depends(milestone.is_nightly)
-def default_wasm_multi_memory(is_nightly):
- if is_nightly:
- return True
-
-
option(
- "--enable-wasm-multi-memory",
- default=default_wasm_multi_memory,
+ "--disable-wasm-multi-memory",
help="{Enable|Disable} WebAssembly multi-memory",
)
-@depends("--enable-wasm-multi-memory", "--wasm-no-experimental")
-def wasm_multi_memory(value, no_experimental):
- if no_experimental or not value:
- return
-
- return True
+@depends("--disable-wasm-multi-memory")
+def wasm_multi_memory(value):
+ if value:
+ return True
set_config("ENABLE_WASM_MULTI_MEMORY", wasm_multi_memory)
@@ -1410,6 +1339,16 @@ with only_when(compile_environment & depends(target.os)(lambda os: os != "WINNT"
set_define("HAVE_PTHREAD_GET_NAME_NP", check_symbol("pthread_get_name_np"))
set_define("HAVE_STRERROR", check_symbol("strerror"))
+ set_config(
+ "HAVE_LANGINFO_CODESET",
+ try_link(
+ includes=["langinfo.h"],
+ body="char* cs = nl_langinfo(CODESET);",
+ check_msg="for nl_langinfo and CODESET",
+ when=building_with_gnu_cc,
+ ),
+ )
+
@depends(check_symbol("__cxa_demangle", language="C++"), moz_debug, dmd)
def demangle_symbols(cxa_demangle, moz_debug, dmd):
# Demangle only for debug or DMD builds
diff --git a/js/public/ContextOptions.h b/js/public/ContextOptions.h
index 25744ce9ea..0ac79c0ec7 100644
--- a/js/public/ContextOptions.h
+++ b/js/public/ContextOptions.h
@@ -27,9 +27,6 @@ class JS_PUBLIC_API ContextOptions {
wasmVerbose_(false),
wasmBaseline_(true),
wasmIon_(true),
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, ...) wasm##NAME##_(STAGE == WasmFeatureStage::Default),
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
testWasmAwaitTier2_(false),
disableIon_(false),
disableEvalSecurityChecks_(false),
@@ -98,15 +95,6 @@ class JS_PUBLIC_API ContextOptions {
return *this;
}
-#define WASM_FEATURE(NAME, ...) \
- bool wasm##NAME() const { return wasm##NAME##_; } \
- ContextOptions& setWasm##NAME(bool flag) { \
- wasm##NAME##_ = flag; \
- return *this; \
- }
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
-
bool throwOnAsmJSValidationFailure() const {
return compileOptions_.throwOnAsmJSValidationFailure();
}
@@ -224,9 +212,6 @@ class JS_PUBLIC_API ContextOptions {
bool wasmVerbose_ : 1;
bool wasmBaseline_ : 1;
bool wasmIon_ : 1;
-#define WASM_FEATURE(NAME, ...) bool wasm##NAME##_ : 1;
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
bool testWasmAwaitTier2_ : 1;
// JIT options.
diff --git a/js/public/GCAPI.h b/js/public/GCAPI.h
index b0f1325a1e..9bdaca9661 100644
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -308,22 +308,21 @@ typedef enum JSGCParamKey {
JSGC_LARGE_HEAP_INCREMENTAL_LIMIT = 26,
/**
- * Attempt to run a minor GC in the idle time if the free space falls
- * below this number of bytes.
+ * Free space bytes threshold for eager nursery collection.
*
* Default: NurseryChunkUsableSize / 4
- * Pref: None
+ * Pref: javascript.options.mem.nursery_eager_collection_threshold_kb
*/
- JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION = 27,
+ JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB = 27,
/**
- * Attempt to run a minor GC in the idle time if the free space falls
- * below this percentage (from 0 to 99).
+ * Free space fraction threshold for eager nursery collection. This is a
+ * percentage (from 0 to 99).
*
* Default: 25
- * Pref: None
+ * Pref: javascript.options.mem.nursery_eager_collection_threshold_percent
*/
- JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT = 30,
+ JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT = 30,
/**
* Minimum size of the generational GC nurseries.
@@ -418,9 +417,9 @@ typedef enum JSGCParamKey {
* collected in this many milliseconds.
*
* Default: 5000
- * Pref: None
+ * Pref: javascript.options.mem.nursery_eager_collection_timeout_ms
*/
- JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS = 46,
+ JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS = 46,
/**
* The system page size in KB.
@@ -456,6 +455,7 @@ typedef enum JSGCParamKey {
/**
* The heap size above which to use parallel marking.
*
+ * Pref: javascript.options.mem.gc_parallel_marking_threshold_mb
* Default: ParallelMarkingThresholdMB
*/
JSGC_PARALLEL_MARKING_THRESHOLD_MB = 50,
@@ -1287,10 +1287,26 @@ JS_GetExternalStringCallbacks(JSString* str);
namespace JS {
+/**
+ * Check whether the nursery should be eagerly collected, this is before it is
+ * full.
+ *
+ * The idea is that this can be called when the host environment has some idle
+ * time which it can use to for GC activity.
+ *
+ * Returns GCReason::NO_REASON to indicate no collection is desired.
+ */
extern JS_PUBLIC_API GCReason WantEagerMinorGC(JSRuntime* rt);
extern JS_PUBLIC_API GCReason WantEagerMajorGC(JSRuntime* rt);
+/**
+ * Check whether the nursery should be eagerly collected as per WantEagerMajorGC
+ * above, and if so run a collection.
+ *
+ * The idea is that this can be called when the host environment has some idle
+ * time which it can use to for GC activity.
+ */
extern JS_PUBLIC_API void MaybeRunNurseryCollection(JSRuntime* rt,
JS::GCReason reason);
diff --git a/js/public/HeapAPI.h b/js/public/HeapAPI.h
index 3dfe00bd0d..26cca9e1c3 100644
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -535,9 +535,11 @@ static MOZ_ALWAYS_INLINE TenuredChunkBase* GetCellChunkBase(
return chunk;
}
-static MOZ_ALWAYS_INLINE JS::Zone* GetTenuredGCThingZone(const uintptr_t addr) {
- MOZ_ASSERT(addr);
- const uintptr_t zone_addr = (addr & ~ArenaMask) | ArenaZoneOffset;
+static MOZ_ALWAYS_INLINE JS::Zone* GetTenuredGCThingZone(const void* ptr) {
+ // This takes a void* because the compiler can't see type relationships in
+ // this header. |ptr| must be a pointer to a tenured GC thing.
+ MOZ_ASSERT(ptr);
+ const uintptr_t zone_addr = (uintptr_t(ptr) & ~ArenaMask) | ArenaZoneOffset;
return *reinterpret_cast<JS::Zone**>(zone_addr);
}
@@ -631,7 +633,7 @@ MOZ_ALWAYS_INLINE bool IsCellPointerValid(const void* ptr) {
auto* cell = reinterpret_cast<const Cell*>(ptr);
if (!IsInsideNursery(cell)) {
- return detail::GetTenuredGCThingZone(addr) != nullptr;
+ return detail::GetTenuredGCThingZone(cell) != nullptr;
}
return true;
@@ -649,16 +651,13 @@ MOZ_ALWAYS_INLINE bool IsCellPointerValidOrNull(const void* cell) {
namespace JS {
-static MOZ_ALWAYS_INLINE Zone* GetTenuredGCThingZone(GCCellPtr thing) {
- MOZ_ASSERT(!js::gc::IsInsideNursery(thing.asCell()));
- return js::gc::detail::GetTenuredGCThingZone(thing.unsafeAsUIntPtr());
-}
+extern JS_PUBLIC_API Zone* GetTenuredGCThingZone(GCCellPtr thing);
extern JS_PUBLIC_API Zone* GetNurseryCellZone(js::gc::Cell* cell);
static MOZ_ALWAYS_INLINE Zone* GetGCThingZone(GCCellPtr thing) {
if (!js::gc::IsInsideNursery(thing.asCell())) {
- return js::gc::detail::GetTenuredGCThingZone(thing.unsafeAsUIntPtr());
+ return js::gc::detail::GetTenuredGCThingZone(thing.asCell());
}
return GetNurseryCellZone(thing.asCell());
@@ -666,9 +665,9 @@ static MOZ_ALWAYS_INLINE Zone* GetGCThingZone(GCCellPtr thing) {
static MOZ_ALWAYS_INLINE Zone* GetStringZone(JSString* str) {
if (!js::gc::IsInsideNursery(str)) {
- return js::gc::detail::GetTenuredGCThingZone(
- reinterpret_cast<uintptr_t>(str));
+ return js::gc::detail::GetTenuredGCThingZone(str);
}
+
return GetNurseryCellZone(reinterpret_cast<js::gc::Cell*>(str));
}
@@ -767,7 +766,7 @@ static MOZ_ALWAYS_INLINE void ExposeGCThingToActiveJS(JS::GCCellPtr thing) {
// GC things owned by other runtimes are always black.
MOZ_ASSERT(!thing.mayBeOwnedByOtherRuntime());
- auto* zone = JS::shadow::Zone::from(JS::GetTenuredGCThingZone(thing));
+ auto* zone = JS::shadow::Zone::from(detail::GetTenuredGCThingZone(cell));
if (zone->needsIncrementalBarrier()) {
PerformIncrementalReadBarrier(thing);
} else if (!zone->isGCPreparing() && detail::NonBlackCellIsMarkedGray(cell)) {
@@ -785,8 +784,8 @@ static MOZ_ALWAYS_INLINE void IncrementalReadBarrier(JS::GCCellPtr thing) {
return;
}
- auto* zone = JS::shadow::Zone::from(JS::GetTenuredGCThingZone(thing));
auto* cell = reinterpret_cast<TenuredCell*>(thing.asCell());
+ auto* zone = JS::shadow::Zone::from(detail::GetTenuredGCThingZone(cell));
if (zone->needsIncrementalBarrier() &&
!detail::TenuredCellIsMarkedBlack(cell)) {
// GC things owned by other runtimes are always black.
@@ -807,8 +806,7 @@ static MOZ_ALWAYS_INLINE bool EdgeNeedsSweepUnbarriered(JSObject** objp) {
return false;
}
- auto zone =
- JS::shadow::Zone::from(detail::GetTenuredGCThingZone(uintptr_t(*objp)));
+ auto zone = JS::shadow::Zone::from(detail::GetTenuredGCThingZone(*objp));
if (!zone->isGCSweepingOrCompacting()) {
return false;
}
diff --git a/js/public/MemoryMetrics.h b/js/public/MemoryMetrics.h
index f30844d8aa..305ab1feb0 100644
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -642,6 +642,7 @@ struct ZoneStats {
MACRO(Other, MallocHeap, scopesMallocHeap) \
MACRO(Other, GCHeapUsed, regExpSharedsGCHeap) \
MACRO(Other, MallocHeap, regExpSharedsMallocHeap) \
+ MACRO(Other, MallocHeap, zoneObject) \
MACRO(Other, MallocHeap, regexpZone) \
MACRO(Other, MallocHeap, jitZone) \
MACRO(Other, MallocHeap, cacheIRStubs) \
diff --git a/js/public/Modules.h b/js/public/Modules.h
index 580962235f..2e7192e120 100644
--- a/js/public/Modules.h
+++ b/js/public/Modules.h
@@ -36,6 +36,8 @@ union Utf8Unit;
namespace JS {
+enum class ModuleType : uint32_t { Unknown = 0, JavaScript, JSON };
+
/**
* The HostResolveImportedModule hook.
*
@@ -170,6 +172,14 @@ extern JS_PUBLIC_API JSObject* CompileModule(
SourceText<mozilla::Utf8Unit>& srcBuf);
/**
+ * Parse the given source buffer as a JSON module in the scope of the current
+ * global of cx and return a synthetic module record.
+ */
+extern JS_PUBLIC_API JSObject* CompileJsonModule(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ SourceText<char16_t>& srcBuf);
+
+/**
* Set a private value associated with a source text module record.
*/
extern JS_PUBLIC_API void SetModulePrivate(JSObject* module,
diff --git a/js/public/ProfilingFrameIterator.h b/js/public/ProfilingFrameIterator.h
index 886ed806e9..bbad2b1244 100644
--- a/js/public/ProfilingFrameIterator.h
+++ b/js/public/ProfilingFrameIterator.h
@@ -14,6 +14,7 @@
#include "jstypes.h"
#include "js/GCAnnotations.h"
+#include "js/ProfilingCategory.h"
#include "js/TypeDecls.h"
namespace js {
@@ -141,7 +142,9 @@ class MOZ_NON_PARAM JS_PUBLIC_API ProfilingFrameIterator {
Frame_BaselineInterpreter,
Frame_Baseline,
Frame_Ion,
- Frame_Wasm
+ Frame_WasmBaseline,
+ Frame_WasmIon,
+ Frame_WasmOther,
};
struct Frame {
@@ -166,6 +169,23 @@ class MOZ_NON_PARAM JS_PUBLIC_API ProfilingFrameIterator {
MOZ_ASSERT(kind == Frame_BaselineInterpreter);
return interpreterPC_;
}
+ ProfilingCategoryPair profilingCategory() const {
+ switch (kind) {
+ case FrameKind::Frame_BaselineInterpreter:
+ return JS::ProfilingCategoryPair::JS_BaselineInterpret;
+ case FrameKind::Frame_Baseline:
+ return JS::ProfilingCategoryPair::JS_Baseline;
+ case FrameKind::Frame_Ion:
+ return JS::ProfilingCategoryPair::JS_IonMonkey;
+ case FrameKind::Frame_WasmBaseline:
+ return JS::ProfilingCategoryPair::JS_WasmBaseline;
+ case FrameKind::Frame_WasmIon:
+ return JS::ProfilingCategoryPair::JS_WasmIon;
+ case FrameKind::Frame_WasmOther:
+ return JS::ProfilingCategoryPair::JS_WasmOther;
+ }
+ MOZ_CRASH();
+ }
} JS_HAZ_GC_INVALIDATED;
bool isWasm() const;
diff --git a/js/public/RegExpFlags.h b/js/public/RegExpFlags.h
index 36a2e76c29..e4663786fc 100644
--- a/js/public/RegExpFlags.h
+++ b/js/public/RegExpFlags.h
@@ -12,6 +12,7 @@
#include "mozilla/Assertions.h" // MOZ_ASSERT
#include "mozilla/Attributes.h" // MOZ_IMPLICIT
+#include <ostream> // ostream
#include <stdint.h> // uint8_t
namespace JS {
@@ -129,6 +130,15 @@ class RegExpFlags {
explicit operator bool() const { return flags_ != 0; }
Flag value() const { return flags_; }
+ constexpr operator Flag() const { return flags_; }
+
+ void set(Flag flags, bool value) {
+ if (value) {
+ flags_ |= flags;
+ } else {
+ flags_ &= ~flags;
+ }
+ }
};
inline RegExpFlags& operator&=(RegExpFlags& flags, RegExpFlags::Flag flag) {
@@ -158,6 +168,39 @@ inline RegExpFlags operator|(const RegExpFlags& lhs, const RegExpFlags& rhs) {
return result;
}
+inline bool MaybeParseRegExpFlag(char c, RegExpFlags::Flag* flag) {
+ switch (c) {
+ case 'd':
+ *flag = RegExpFlag::HasIndices;
+ return true;
+ case 'g':
+ *flag = RegExpFlag::Global;
+ return true;
+ case 'i':
+ *flag = RegExpFlag::IgnoreCase;
+ return true;
+ case 'm':
+ *flag = RegExpFlag::Multiline;
+ return true;
+ case 's':
+ *flag = RegExpFlag::DotAll;
+ return true;
+ case 'u':
+ *flag = RegExpFlag::Unicode;
+ return true;
+ case 'v':
+ *flag = RegExpFlag::UnicodeSets;
+ return true;
+ case 'y':
+ *flag = RegExpFlag::Sticky;
+ return true;
+ default:
+ return false;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, RegExpFlags flags);
+
} // namespace JS
#endif // js_RegExpFlags_h
diff --git a/js/public/RootingAPI.h b/js/public/RootingAPI.h
index 471c72dc42..e35a2c5bc8 100644
--- a/js/public/RootingAPI.h
+++ b/js/public/RootingAPI.h
@@ -981,12 +981,12 @@ enum class AutoGCRooterKind : uint8_t {
Limit
};
-using RootedListHeads =
- mozilla::EnumeratedArray<RootKind, RootKind::Limit, js::StackRootedBase*>;
+using RootedListHeads = mozilla::EnumeratedArray<RootKind, js::StackRootedBase*,
+ size_t(RootKind::Limit)>;
using AutoRooterListHeads =
- mozilla::EnumeratedArray<AutoGCRooterKind, AutoGCRooterKind::Limit,
- AutoGCRooter*>;
+ mozilla::EnumeratedArray<AutoGCRooterKind, AutoGCRooter*,
+ size_t(AutoGCRooterKind::Limit)>;
// Superclass of JSContext which can be used for rooting data in use by the
// current thread but that does not provide all the functions of a JSContext.
diff --git a/js/public/Stack.h b/js/public/Stack.h
index 6f01b2b728..7384e92d39 100644
--- a/js/public/Stack.h
+++ b/js/public/Stack.h
@@ -140,6 +140,7 @@ struct JS_PUBLIC_API FirstSubsumedFrame {
// unnecessarily.
FirstSubsumedFrame(const FirstSubsumedFrame&) = delete;
FirstSubsumedFrame& operator=(const FirstSubsumedFrame&) = delete;
+ FirstSubsumedFrame& operator=(FirstSubsumedFrame&&) = delete;
FirstSubsumedFrame(FirstSubsumedFrame&& rhs)
: principals(rhs.principals), ignoreSelfHosted(rhs.ignoreSelfHosted) {
@@ -147,11 +148,6 @@ struct JS_PUBLIC_API FirstSubsumedFrame {
rhs.principals = nullptr;
}
- FirstSubsumedFrame& operator=(FirstSubsumedFrame&& rhs) {
- new (this) FirstSubsumedFrame(std::move(rhs));
- return *this;
- }
-
~FirstSubsumedFrame() {
if (principals) {
JS_DropPrincipals(cx, principals);
diff --git a/js/public/UbiNodeCensus.h b/js/public/UbiNodeCensus.h
index 4086300580..9ac470725c 100644
--- a/js/public/UbiNodeCensus.h
+++ b/js/public/UbiNodeCensus.h
@@ -7,6 +7,7 @@
#ifndef js_UbiNodeCensus_h
#define js_UbiNodeCensus_h
+#include "js/GCVector.h"
#include "js/UbiNode.h"
#include "js/UbiNodeBreadthFirst.h"
@@ -222,8 +223,9 @@ using CensusTraversal = BreadthFirst<CensusHandler>;
// Parse the breakdown language (as described in
// js/src/doc/Debugger/Debugger.Memory.md) into a CountTypePtr. A null pointer
// is returned on error and is reported to the cx.
-JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
- HandleValue breakdownValue);
+JS_PUBLIC_API CountTypePtr
+ParseBreakdown(JSContext* cx, HandleValue breakdownValue,
+ MutableHandle<JS::GCVector<JSLinearString*>> seen);
} // namespace ubi
} // namespace JS
diff --git a/js/public/Value.h b/js/public/Value.h
index 59f71da4f1..98f0f9273b 100644
--- a/js/public/Value.h
+++ b/js/public/Value.h
@@ -383,6 +383,9 @@ enum JSWhyMagic {
/** arguments object can't be created because environment is dead. */
JS_MISSING_ARGUMENTS,
+ /** exception value thrown when interrupting irregexp */
+ JS_INTERRUPT_REGEXP,
+
/** for local use */
JS_GENERIC_MAGIC,
@@ -645,7 +648,20 @@ class alignas(8) Value {
}
#endif
+ void changeGCThingPayload(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::gc::IsCellPointerValid(cell));
+#ifdef DEBUG
+ assertTraceKindMatches(cell);
+#endif
+ asBits_ = bitsFromTagAndPayload(toTag(), PayloadType(cell));
+ MOZ_ASSERT(toGCThing() == cell);
+ }
+
private:
+#ifdef DEBUG
+ void assertTraceKindMatches(js::gc::Cell* cell) const;
+#endif
+
void setObjectNoCheck(JSObject* obj) {
asBits_ = bitsFromTagAndPayload(JSVAL_TAG_OBJECT, PayloadType(obj));
}
diff --git a/js/public/WasmFeatures.h b/js/public/WasmFeatures.h
index f2089f7fba..7e30a748d2 100644
--- a/js/public/WasmFeatures.h
+++ b/js/public/WasmFeatures.h
@@ -13,22 +13,6 @@
// generate most of the feature gating code in a centralized manner. See
// 'Adding a feature' below for the exact steps needed to add a new feature.
//
-// Each feature is either `DEFAULT`, `TENTATIVE`, or `EXPERIMENTAL`:
-//
-// Default features are enabled by default in ContextOptions and in the
-// JS-shell, and are given a `--no-wasm-FEATURE` shell flag to disable. The
-// `--wasm-FEATURE` flag is rejected.
-//
-// Tentative features are like Default features, but the `--wasm-FEATURE` flag
-// is silently ignored.
-//
-// Experimental features are disabled by default in ContextOptions and in the
-// JS-shell, and are given a `--wasm-FEATURE` shell flag to enable. The
-// `--no-wasm-FEATURE` flag is silently ignored.
-//
-// The browser pref is `javascript.options.wasm-FEATURE` for default, tentative,
-// and experimental features alike.
-//
// # Adding a feature
//
// 1. Add a configure switch for the feature in js/moz.configure
@@ -44,12 +28,10 @@
// e. flag predicate: Expression used to predicate enablement of feature
// flag. Useful for disabling a feature when dependent feature is not
// enabled or if we are fuzzing.
-// f. shell flag: The stem of the JS-shell flag. Will be expanded to
-// --no-wasm-FEATURE or --wasm-FEATURE as explained above.
-// g. preference name: The stem of the browser preference. Will be expanded
+// f. preference name: The stem of the browser preference. Will be expanded
// to `javascript.options.wasm-FEATURE`.
// 4. Add the preference to module/libpref/init/StaticPrefList.yaml
-// a. Use conditionally compiled flag
+// a. Set `set_spidermonkey_pref: startup`
// b. Set value to 'true' for default features, @IS_NIGHTLY_BUILD@ for
// tentative features, and 'false' for experimental features.
// 5. [fuzzing] Add the feature to gluesmith/src/lib.rs, if wasm-smith has
@@ -60,16 +42,6 @@
#else
# define WASM_RELAXED_SIMD_ENABLED 0
#endif
-#ifdef ENABLE_WASM_EXTENDED_CONST
-# define WASM_EXTENDED_CONST_ENABLED 1
-#else
-# define WASM_EXTENDED_CONST_ENABLED 0
-#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
-# define WASM_FUNCTION_REFERENCES_ENABLED 1
-#else
-# define WASM_FUNCTION_REFERENCES_ENABLED 0
-#endif
#ifdef ENABLE_WASM_GC
# define WASM_GC_ENABLED 1
#else
@@ -106,168 +78,98 @@
# define WASM_JS_STRING_BUILTINS_ENABLED 0
#endif
-enum class WasmFeatureStage {
- Experimental = 0,
- Tentative,
- Default,
-};
-
// clang-format off
#define JS_FOR_WASM_FEATURES(FEATURE) \
FEATURE( \
- /* capitalized name */ ExtendedConst, \
- /* lower case name */ extendedConst, \
- /* stage */ WasmFeatureStage::Tentative, \
- /* compile predicate */ WASM_EXTENDED_CONST_ENABLED, \
- /* compiler predicate */ true, \
- /* flag predicate */ true, \
- /* flag force enable */ false, \
- /* flag fuzz enable */ true, \
- /* shell flag */ "extended-const", \
- /* preference name */ "extended_const") \
- FEATURE( \
- /* capitalized name */ Exceptions, \
- /* lower case name */ exceptions, \
- /* stage */ WasmFeatureStage::Default, \
- /* compile predicate */ true, \
- /* compiler predicate */ AnyCompilerAvailable(cx), \
- /* flag predicate */ true, \
- /* flag force enable */ WasmExnRefFlag(cx), \
- /* flag fuzz enable */ true, \
- /* shell flag */ "exceptions", \
- /* preference name */ "exceptions") \
- FEATURE( \
/* capitalized name */ ExnRef, \
/* lower case name */ exnref, \
- /* stage */ WasmFeatureStage::Experimental, \
/* compile predicate */ true, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ true, \
- /* shell flag */ "exnref", \
- /* preference name */ "exnref ") \
- FEATURE( \
- /* capitalized name */ FunctionReferences, \
- /* lower case name */ functionReferences, \
- /* stage */ WasmFeatureStage::Tentative, \
- /* compile predicate */ WASM_FUNCTION_REFERENCES_ENABLED, \
- /* compiler predicate */ AnyCompilerAvailable(cx), \
- /* flag predicate */ true, \
- /* flag force enable */ WasmGcFlag(cx), \
- /* flag fuzz enable */ false, \
- /* shell flag */ "function-references", \
- /* preference name */ "function_references") \
+ /* preference name */ exnref) \
FEATURE( \
/* capitalized name */ Gc, \
/* lower case name */ gc, \
- /* stage */ WasmFeatureStage::Tentative, \
/* compile predicate */ WASM_GC_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ false, \
- /* shell flag */ "gc", \
- /* preference name */ "gc") \
+ /* preference name */ gc) \
FEATURE( \
/* capitalized name */ JSStringBuiltins, \
/* lower case name */ jsStringBuiltins, \
- /* stage */ WasmFeatureStage::Experimental, \
/* compile predicate */ WASM_JS_STRING_BUILTINS_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ true, \
- /* shell flag */ "js-string-builtins", \
- /* preference name */ "js_string_builtins") \
+ /* preference name */ js_string_builtins) \
FEATURE( \
/* capitalized name */ RelaxedSimd, \
/* lower case name */ v128Relaxed, \
- /* stage */ WasmFeatureStage::Tentative, \
/* compile predicate */ WASM_RELAXED_SIMD_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ js::jit::JitSupportsWasmSimd(), \
/* flag force enable */ false, \
/* flag fuzz enable */ true, \
- /* shell flag */ "relaxed-simd", \
- /* preference name */ "relaxed_simd") \
+ /* preference name */ relaxed_simd) \
FEATURE( \
/* capitalized name */ Memory64, \
/* lower case name */ memory64, \
- /* stage */ WasmFeatureStage::Tentative, \
/* compile predicate */ WASM_MEMORY64_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ true, \
- /* shell flag */ "memory64", \
- /* preference name */ "memory64") \
+ /* preference name */ memory64) \
FEATURE( \
/* capitalized name */ MemoryControl, \
/* lower case name */ memoryControl, \
- /* stage */ WasmFeatureStage::Experimental, \
/* compile predicate */ WASM_MEMORY_CONTROL_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ false, \
- /* shell flag */ "memory-control", \
- /* preference name */ "memory_control") \
+ /* preference name */ memory_control) \
FEATURE( \
/* capitalized name */ MultiMemory, \
/* lower case name */ multiMemory, \
- /* stage */ WasmFeatureStage::Experimental, \
/* compile predicate */ WASM_MULTI_MEMORY_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
- /* flag fuzz enable */ false, \
- /* shell flag */ "multi-memory", \
- /* preference name */ "multi_memory") \
+ /* flag fuzz enable */ true, \
+ /* preference name */ multi_memory) \
FEATURE( \
/* capitalized name */ TailCalls, \
/* lower case name */ tailCalls, \
- /* stage */ WasmFeatureStage::Tentative, \
/* compile predicate */ WASM_TAIL_CALLS_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ true, \
- /* shell flag */ "tail-calls", \
- /* preference name */ "tail_calls") \
+ /* preference name */ tail_calls) \
FEATURE( \
/* capitalized name */ MozIntGemm, \
/* lower case name */ mozIntGemm, \
- /* stage */ WasmFeatureStage::Experimental, \
/* compile predicate */ WASM_MOZ_INTGEMM_ENABLED, \
/* compiler predicate */ AnyCompilerAvailable(cx), \
/* flag predicate */ IsSimdPrivilegedContext(cx), \
/* flag force enable */ false, \
/* flag fuzz enable */ false, \
- /* shell flag */ "moz-intgemm", \
- /* preference name */ "moz_intgemm") \
+ /* preference name */ moz_intgemm) \
FEATURE( \
/* capitalized name */ TestSerialization, \
/* lower case name */ testSerialization, \
- /* stage */ WasmFeatureStage::Experimental, \
/* compile predicate */ 1, \
/* compiler predicate */ IonAvailable(cx), \
/* flag predicate */ true, \
/* flag force enable */ false, \
/* flag fuzz enable */ false, \
- /* shell flag */ "test-serialization", \
- /* preference name */ "test-serialization") \
- FEATURE( \
- /* capitalized name */ TestMetadata, \
- /* lower case name */ testMetadata, \
- /* stage */ WasmFeatureStage::Experimental, \
- /* compile predicate */ 1, \
- /* compiler predicate */ AnyCompilerAvailable(cx), \
- /* flag predicate */ true, \
- /* flag force enable */ false, \
- /* flag fuzz enable */ false, \
- /* shell flag */ "test-metadata", \
- /* preference name */ "test_metadata")
+ /* preference name */ test_serialization)
// clang-format on
diff --git a/js/public/experimental/CompileScript.h b/js/public/experimental/CompileScript.h
index 308a5848d9..ebb87e6227 100644
--- a/js/public/experimental/CompileScript.h
+++ b/js/public/experimental/CompileScript.h
@@ -105,71 +105,21 @@ JS_PUBLIC_API const JSErrorReport* GetFrontendWarningAt(
JS::FrontendContext* fc, size_t index,
const JS::ReadOnlyCompileOptions& options);
-// Temporary storage used during compiling and preparing to instantiate a
-// Stencil.
-//
-// Off-thread consumers can allocate this instance off main thread, and pass it
-// back to the main thread, in order to reduce the main thread allocation.
-struct CompilationStorage {
- private:
- // Owned CompilationInput.
- //
- // This uses raw pointer instead of UniquePtr because CompilationInput
- // is opaque.
- JS_HAZ_NON_GC_POINTER js::frontend::CompilationInput* input_ = nullptr;
- bool isBorrowed_ = false;
-
- public:
- CompilationStorage() = default;
- explicit CompilationStorage(js::frontend::CompilationInput* input)
- : input_(input), isBorrowed_(true) {}
- CompilationStorage(CompilationStorage&& other)
- : input_(other.input_), isBorrowed_(other.isBorrowed_) {
- other.input_ = nullptr;
- }
-
- ~CompilationStorage();
-
- private:
- CompilationStorage(const CompilationStorage& other) = delete;
- void operator=(const CompilationStorage& aOther) = delete;
-
- public:
- bool hasInput() { return !!input_; }
-
- // Internal function that initializes the CompilationInput. It should only be
- // called once.
- bool allocateInput(FrontendContext* fc,
- const JS::ReadOnlyCompileOptions& options);
-
- js::frontend::CompilationInput& getInput() {
- MOZ_ASSERT(hasInput());
- return *input_;
- }
-
- // Size of dynamic data. Note that GC data is counted by GC and not here.
- size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
-
- void trace(JSTracer* trc);
-};
-
extern JS_PUBLIC_API already_AddRefed<JS::Stencil> CompileGlobalScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<mozilla::Utf8Unit>& srcBuf,
- JS::CompilationStorage& compileStorage);
+ JS::SourceText<mozilla::Utf8Unit>& srcBuf);
extern JS_PUBLIC_API already_AddRefed<JS::Stencil> CompileGlobalScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<char16_t>& srcBuf, JS::CompilationStorage& compileStorage);
+ JS::SourceText<char16_t>& srcBuf);
extern JS_PUBLIC_API already_AddRefed<JS::Stencil> CompileModuleScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<mozilla::Utf8Unit>& srcBuf,
- JS::CompilationStorage& compileStorage);
+ JS::SourceText<mozilla::Utf8Unit>& srcBuf);
extern JS_PUBLIC_API already_AddRefed<JS::Stencil> CompileModuleScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<char16_t>& srcBuf, JS::CompilationStorage& compileStorage);
+ JS::SourceText<char16_t>& srcBuf);
extern JS_PUBLIC_API bool PrepareForInstantiate(
JS::FrontendContext* fc, JS::Stencil& stencil,
diff --git a/js/public/experimental/JSStencil.h b/js/public/experimental/JSStencil.h
index 8a79687898..780c3e1c62 100644
--- a/js/public/experimental/JSStencil.h
+++ b/js/public/experimental/JSStencil.h
@@ -47,8 +47,6 @@ struct PreallocatedCompilationGCOutput;
namespace JS {
-struct CompilationStorage;
-
using Stencil = js::frontend::CompilationStencil;
using FrontendContext = js::FrontendContext;
@@ -56,7 +54,7 @@ using FrontendContext = js::FrontendContext;
//
// Off-thread APIs can allocate this instance off main thread, and pass it back
// to the main thread, in order to reduce the main thread allocation.
-struct InstantiationStorage {
+struct JS_PUBLIC_API InstantiationStorage {
private:
// Owned CompilationGCOutput.
//
diff --git a/js/public/friend/ErrorNumbers.msg b/js/public/friend/ErrorNumbers.msg
index 5daa625ff9..e75e7b973c 100644
--- a/js/public/friend/ErrorNumbers.msg
+++ b/js/public/friend/ErrorNumbers.msg
@@ -154,7 +154,6 @@ MSG_DEF(JSMSG_CANT_DECLARE_GLOBAL_BINDING, 2, JSEXN_TYPEERR, "cannot declare glo
// Date
MSG_DEF(JSMSG_INVALID_DATE, 0, JSEXN_RANGEERR, "invalid date")
MSG_DEF(JSMSG_BAD_TOISOSTRING_PROP, 0, JSEXN_TYPEERR, "toISOString property is not callable")
-MSG_DEF(JSMSG_DEPRECATED_LATE_WEEKDAY, 0, JSEXN_WARN, "day of week after day of month in date format is deprecated")
// String
MSG_DEF(JSMSG_BAD_URI, 0, JSEXN_URIERR, "malformed URI sequence")
@@ -580,6 +579,7 @@ MSG_DEF(JSMSG_QUERY_LINE_WITHOUT_URL, 0, JSEXN_TYPEERR, "findScripts query objec
MSG_DEF(JSMSG_DEBUG_CANT_SET_OPT_ENV, 1, JSEXN_REFERENCEERR, "can't set '{0}' in an optimized-out environment")
MSG_DEF(JSMSG_DEBUG_INVISIBLE_COMPARTMENT, 0, JSEXN_TYPEERR, "object in compartment marked as invisible to Debugger")
MSG_DEF(JSMSG_DEBUG_CENSUS_BREAKDOWN, 1, JSEXN_TYPEERR, "unrecognized 'by' value in takeCensus breakdown: {0}")
+MSG_DEF(JSMSG_DEBUG_CENSUS_BREAKDOWN_NESTED, 1, JSEXN_TYPEERR, "takeCensus breakdown 'by' value nested within itself: {0}")
MSG_DEF(JSMSG_DEBUG_PROMISE_NOT_RESOLVED, 0, JSEXN_TYPEERR, "Promise hasn't been resolved")
MSG_DEF(JSMSG_DEBUG_PROMISE_NOT_FULFILLED, 0, JSEXN_TYPEERR, "Promise hasn't been fulfilled")
MSG_DEF(JSMSG_DEBUG_PROMISE_NOT_REJECTED, 0, JSEXN_TYPEERR, "Promise hasn't been rejected")
@@ -722,6 +722,7 @@ MSG_DEF(JSMSG_MISSING_EXPORT, 1, JSEXN_SYNTAXERR, "local binding f
MSG_DEF(JSMSG_BAD_MODULE_STATUS, 1, JSEXN_INTERNALERR, "module record has unexpected status: {0}")
MSG_DEF(JSMSG_DYNAMIC_IMPORT_FAILED, 1, JSEXN_TYPEERR, "error loading dynamically imported module: {0}")
MSG_DEF(JSMSG_DYNAMIC_IMPORT_NOT_SUPPORTED, 0, JSEXN_TYPEERR, "Dynamic import not supported in this context")
+MSG_DEF(JSMSG_BAD_MODULE_TYPE, 0, JSEXN_TYPEERR, "invalid module type")
// Import maps
MSG_DEF(JSMSG_IMPORT_MAPS_PARSE_FAILED, 1, JSEXN_SYNTAXERR, "Failed to parse import map: Invalid JSON format. {0}")
diff --git a/js/public/friend/UsageStatistics.h b/js/public/friend/UsageStatistics.h
index 27fea9b1a8..c098548ad9 100644
--- a/js/public/friend/UsageStatistics.h
+++ b/js/public/friend/UsageStatistics.h
@@ -91,7 +91,7 @@ extern JS_PUBLIC_API void JS_SetAccumulateTelemetryCallback(
* fixed member of the mozilla::UseCounter enum by the callback.
*/
-enum class JSUseCounter { ASMJS, WASM, WASM_LEGACY_EXCEPTIONS, LATE_WEEKDAY };
+enum class JSUseCounter { ASMJS, WASM, WASM_LEGACY_EXCEPTIONS };
using JSSetUseCounterCallback = void (*)(JSObject*, JSUseCounter);
diff --git a/js/src/aclocal.m4 b/js/src/aclocal.m4
index 6f641abda9..c9ad56b61e 100644
--- a/js/src/aclocal.m4
+++ b/js/src/aclocal.m4
@@ -6,7 +6,6 @@ dnl
builtin(include, ../../build/autoconf/hooks.m4)dnl
builtin(include, ../../build/autoconf/config.status.m4)dnl
builtin(include, ../../build/autoconf/toolchain.m4)dnl
-builtin(include, ../../build/autoconf/codeset.m4)dnl
builtin(include, ../../build/autoconf/altoptions.m4)dnl
builtin(include, ../../build/autoconf/mozprog.m4)dnl
builtin(include, ../../build/autoconf/mozheader.m4)dnl
diff --git a/js/src/builtin/.eslintrc.js b/js/src/builtin/.eslintrc.js
index 89beca7d4c..76aad2f6fd 100644
--- a/js/src/builtin/.eslintrc.js
+++ b/js/src/builtin/.eslintrc.js
@@ -121,6 +121,14 @@ module.exports = {
message: "'const' declarations are disallowed to avoid TDZ checks, use 'var' instead",
},
],
+ // Method signatures are important in builtins so disable unused argument errors.
+ "no-unused-vars": [
+ "error",
+ {
+ args: "none",
+ vars: "local",
+ },
+ ],
},
globals: {
diff --git a/js/src/builtin/DataViewObject.cpp b/js/src/builtin/DataViewObject.cpp
index 1f7be86a70..425fdce51d 100644
--- a/js/src/builtin/DataViewObject.cpp
+++ b/js/src/builtin/DataViewObject.cpp
@@ -65,150 +65,27 @@ DataViewObject* DataViewObject::create(
}
ResizableDataViewObject* ResizableDataViewObject::create(
- JSContext* cx, size_t byteOffset, size_t byteLength, bool autoLength,
+ JSContext* cx, size_t byteOffset, size_t byteLength, AutoLength autoLength,
Handle<ArrayBufferObjectMaybeShared*> arrayBuffer, HandleObject proto) {
MOZ_ASSERT(arrayBuffer->isResizable());
MOZ_ASSERT(!arrayBuffer->isDetached());
- MOZ_ASSERT(!autoLength || byteLength == 0,
+ MOZ_ASSERT(autoLength == AutoLength::No || byteLength == 0,
"byte length is zero for 'auto' length views");
auto* obj = NewObjectWithClassProto<ResizableDataViewObject>(cx, proto);
- if (!obj || !obj->init(cx, arrayBuffer, byteOffset, byteLength,
- /* bytesPerElement = */ 1)) {
+ if (!obj || !obj->initResizable(cx, arrayBuffer, byteOffset, byteLength,
+ /* bytesPerElement = */ 1, autoLength)) {
return nullptr;
}
- obj->setFixedSlot(AUTO_LENGTH_SLOT, BooleanValue(autoLength));
-
return obj;
}
-/**
- * GetViewByteLength ( viewRecord )
- *
- * GetViewByteLength can be rewritten into the following spec steps when
- * inlining the calls to MakeDataViewWithBufferWitnessRecord and
- * IsViewOutOfBounds.
- *
- * 1. Let buffer be view.[[ViewedArrayBuffer]].
- * 2. If IsDetachedBuffer(buffer) is true, then
- * a. Return out-of-bounds.
- * 3. If IsFixedLengthArrayBuffer(buffer) is true,
- * a. Return view.[[ByteLength]].
- * 4. Let bufferByteLength be ArrayBufferByteLength(buffer, order).
- * 5. Let byteOffsetStart be view.[[ByteOffset]].
- * 6. If byteOffsetStart > bufferByteLength, then
- * a. Return out-of-bounds.
- * 7. If view.[[ByteLength]] is auto, then
- * a. Return bufferByteLength - byteOffsetStart.
- * 8. Let viewByteLength be view.[[ByteLength]].
- * 9. Let byteOffsetEnd be byteOffsetStart + viewByteLength.
- * 10. If byteOffsetEnd > bufferByteLength, then
- * a. Return out-of-bounds.
- * 11. Return viewByteLength.
- *
- * The additional call to IsFixedLengthArrayBuffer is an optimization to skip
- * unnecessary validation which doesn't apply for fixed length data-views.
- *
- * https://tc39.es/ecma262/#sec-getviewbytelength
- * https://tc39.es/ecma262/#sec-makedataviewwithbufferwitnessrecord
- * https://tc39.es/ecma262/#sec-isviewoutofbounds
- */
-mozilla::Maybe<size_t> DataViewObject::byteLength() {
- if (MOZ_UNLIKELY(hasDetachedBuffer())) {
- return mozilla::Nothing{};
- }
-
- if (MOZ_LIKELY(is<FixedLengthDataViewObject>())) {
- size_t viewByteLength = rawByteLength();
- return mozilla::Some(viewByteLength);
- }
-
- auto* buffer = bufferEither();
- MOZ_ASSERT(buffer->isResizable());
-
- size_t bufferByteLength = buffer->byteLength();
- size_t byteOffsetStart = ArrayBufferViewObject::byteOffset();
- if (byteOffsetStart > bufferByteLength) {
- return mozilla::Nothing{};
- }
-
- if (as<ResizableDataViewObject>().isAutoLength()) {
- return mozilla::Some(bufferByteLength - byteOffsetStart);
- }
-
- size_t viewByteLength = rawByteLength();
- size_t byteOffsetEnd = byteOffsetStart + viewByteLength;
- if (byteOffsetEnd > bufferByteLength) {
- return mozilla::Nothing{};
- }
- return mozilla::Some(viewByteLength);
-}
-
-/**
- * IsViewOutOfBounds ( viewRecord )
- *
- * IsViewOutOfBounds can be rewritten into the following spec steps when
- * inlining the call to MakeDataViewWithBufferWitnessRecord.
- *
- * 1. Let buffer be obj.[[ViewedArrayBuffer]].
- * 2. If IsDetachedBuffer(buffer) is true, then
- * a. Return true.
- * 3. If IsFixedLengthArrayBuffer(buffer) is true, then
- * a. Return false.
- * 4. Let byteLength be ArrayBufferByteLength(buffer, order).
- * 5. Let byteOffsetStart be view.[[ByteOffset]].
- * 6. If byteOffsetStart > bufferByteLength, then
- * a. Return true.
- * 7. If view.[[ByteLength]] is auto, then
- * a. Return false.
- * 8. Let byteOffsetEnd be byteOffsetStart + view.[[ByteLength]].
- * 9. If byteOffsetEnd > bufferByteLength, then
- * a. Return true.
- * 10. Return false.
- *
- * The additional call to IsFixedLengthArrayBuffer is an optimization to skip
- * unnecessary validation which doesn't apply for fixed length data-views.
- *
- * https://tc39.es/ecma262/#sec-makedataviewwithbufferwitnessrecord
- * https://tc39.es/ecma262/#sec-isviewoutofbounds
- */
-mozilla::Maybe<size_t> DataViewObject::byteOffset() {
- if (MOZ_UNLIKELY(hasDetachedBuffer())) {
- return mozilla::Nothing{};
- }
-
- size_t byteOffsetStart = ArrayBufferViewObject::byteOffset();
-
- if (MOZ_LIKELY(is<FixedLengthDataViewObject>())) {
- return mozilla::Some(byteOffsetStart);
- }
-
- auto* buffer = bufferEither();
- MOZ_ASSERT(buffer->isResizable());
-
- size_t bufferByteLength = buffer->byteLength();
- if (byteOffsetStart > bufferByteLength) {
- return mozilla::Nothing{};
- }
-
- if (as<ResizableDataViewObject>().isAutoLength()) {
- return mozilla::Some(byteOffsetStart);
- }
-
- size_t viewByteLength = rawByteLength();
- size_t byteOffsetEnd = byteOffsetStart + viewByteLength;
- if (byteOffsetEnd > bufferByteLength) {
- return mozilla::Nothing{};
- }
- return mozilla::Some(byteOffsetStart);
-}
-
// ES2017 draft rev 931261ecef9b047b14daacf82884134da48dfe0f
// 24.3.2.1 DataView (extracted part of the main algorithm)
bool DataViewObject::getAndCheckConstructorArgs(
JSContext* cx, HandleObject bufobj, const CallArgs& args,
- size_t* byteOffsetPtr, size_t* byteLengthPtr, bool* autoLengthPtr) {
+ size_t* byteOffsetPtr, size_t* byteLengthPtr, AutoLength* autoLengthPtr) {
// Step 3.
if (!bufobj->is<ArrayBufferObjectMaybeShared>()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
@@ -243,10 +120,10 @@ bool DataViewObject::getAndCheckConstructorArgs(
MOZ_ASSERT(offset <= ArrayBufferObject::ByteLengthLimit);
uint64_t viewByteLength = 0;
- bool autoLength = false;
+ auto autoLength = AutoLength::No;
if (!args.hasDefined(2)) {
if (buffer->isResizable()) {
- autoLength = true;
+ autoLength = AutoLength::Yes;
} else {
// Step 8.a
viewByteLength = bufferByteLength - offset;
@@ -305,7 +182,7 @@ bool DataViewObject::constructSameCompartment(JSContext* cx,
size_t byteOffset = 0;
size_t byteLength = 0;
- bool autoLength = false;
+ auto autoLength = AutoLength::No;
if (!getAndCheckConstructorArgs(cx, bufobj, args, &byteOffset, &byteLength,
&autoLength)) {
return false;
@@ -365,7 +242,7 @@ bool DataViewObject::constructWrapped(JSContext* cx, HandleObject bufobj,
// NB: This entails the IsArrayBuffer check
size_t byteOffset = 0;
size_t byteLength = 0;
- bool autoLength = false;
+ auto autoLength = AutoLength::No;
if (!getAndCheckConstructorArgs(cx, unwrapped, args, &byteOffset, &byteLength,
&autoLength)) {
return false;
diff --git a/js/src/builtin/DataViewObject.h b/js/src/builtin/DataViewObject.h
index a17b3e1174..db134a5696 100644
--- a/js/src/builtin/DataViewObject.h
+++ b/js/src/builtin/DataViewObject.h
@@ -48,7 +48,7 @@ class DataViewObject : public ArrayBufferViewObject {
const CallArgs& args,
size_t* byteOffsetPtr,
size_t* byteLengthPtr,
- bool* autoLengthPtr);
+ AutoLength* autoLengthPtr);
static bool constructSameCompartment(JSContext* cx, HandleObject bufobj,
const CallArgs& args);
static bool constructWrapped(JSContext* cx, HandleObject bufobj,
@@ -58,16 +58,16 @@ class DataViewObject : public ArrayBufferViewObject {
JSContext* cx, size_t byteOffset, size_t byteLength,
Handle<ArrayBufferObjectMaybeShared*> arrayBuffer, HandleObject proto);
- protected:
- size_t rawByteLength() const {
- return size_t(getFixedSlot(LENGTH_SLOT).toPrivate());
- }
-
public:
static const JSClass protoClass_;
- mozilla::Maybe<size_t> byteLength();
- mozilla::Maybe<size_t> byteOffset();
+ /**
+ * Return the current byteLength, or |Nothing| if the DataView is detached or
+ * out-of-bounds.
+ */
+ mozilla::Maybe<size_t> byteLength() {
+ return ArrayBufferViewObject::length();
+ }
template <typename NativeType>
static bool offsetIsInBounds(uint64_t offset, size_t byteLength) {
@@ -173,19 +173,11 @@ class FixedLengthDataViewObject : public DataViewObject {
public:
static const JSClass class_;
- size_t byteOffset() const { return ArrayBufferViewObject::byteOffset(); }
-
- size_t byteLength() const { return rawByteLength(); }
-
- bool offsetIsInBounds(uint32_t byteSize, uint64_t offset) const {
- return DataViewObject::offsetIsInBounds(byteSize, offset, byteLength());
+ size_t byteOffset() const {
+ return ArrayBufferViewObject::byteOffsetSlotValue();
}
- template <typename NativeType>
- NativeType read(uint64_t offset, bool isLittleEndian) {
- return DataViewObject::read<NativeType>(offset, byteLength(),
- isLittleEndian);
- }
+ size_t byteLength() const { return ArrayBufferViewObject::lengthSlotValue(); }
};
/**
@@ -195,19 +187,14 @@ class ResizableDataViewObject : public DataViewObject {
friend class DataViewObject;
static ResizableDataViewObject* create(
- JSContext* cx, size_t byteOffset, size_t byteLength, bool autoLength,
- Handle<ArrayBufferObjectMaybeShared*> arrayBuffer, HandleObject proto);
+ JSContext* cx, size_t byteOffset, size_t byteLength,
+ AutoLength autoLength, Handle<ArrayBufferObjectMaybeShared*> arrayBuffer,
+ HandleObject proto);
public:
- static const uint8_t AUTO_LENGTH_SLOT = DataViewObject::RESERVED_SLOTS;
-
- static const uint8_t RESERVED_SLOTS = DataViewObject::RESERVED_SLOTS + 1;
+ static const uint8_t RESERVED_SLOTS = RESIZABLE_RESERVED_SLOTS;
static const JSClass class_;
-
- bool isAutoLength() const {
- return getFixedSlot(AUTO_LENGTH_SLOT).toBoolean();
- }
};
// For structured cloning.
diff --git a/js/src/builtin/ModuleObject.cpp b/js/src/builtin/ModuleObject.cpp
index 0be9aec17b..b9db9bf02d 100644
--- a/js/src/builtin/ModuleObject.cpp
+++ b/js/src/builtin/ModuleObject.cpp
@@ -20,7 +20,7 @@
#include "gc/Tracer.h"
#include "js/ColumnNumber.h" // JS::ColumnNumberOneOrigin, JS::LimitedColumnNumberOneOrigin
#include "js/friend/ErrorMessages.h" // JSMSG_*
-#include "js/Modules.h" // JS::GetModulePrivate, JS::ModuleDynamicImportHook
+#include "js/Modules.h" // JS::GetModulePrivate, JS::ModuleDynamicImportHook, JS::ModuleType
#include "vm/EqualityOperations.h" // js::SameValue
#include "vm/Interpreter.h" // Execute, Lambda, ReportRuntimeLexicalError
#include "vm/ModuleBuilder.h" // js::ModuleBuilder
@@ -31,6 +31,7 @@
#include "builtin/HandlerFunction-inl.h" // js::ExtraValueFromHandler, js::NewHandler{,WithExtraValue}, js::TargetFromHandler
#include "gc/GCContext-inl.h"
+#include "vm/EnvironmentObject-inl.h" // EnvironmentObject::setAliasedBinding
#include "vm/JSObject-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/List-inl.h"
@@ -184,8 +185,8 @@ ResolvedBindingObject* ResolvedBindingObject::create(
DEFINE_ATOM_OR_NULL_ACCESSOR_METHOD(ModuleRequestObject, specifier,
SpecifierSlot)
-ArrayObject* ModuleRequestObject::assertions() const {
- JSObject* obj = getReservedSlot(AssertionSlot).toObjectOrNull();
+ArrayObject* ModuleRequestObject::attributes() const {
+ JSObject* obj = getReservedSlot(AttributesSlot).toObjectOrNull();
if (!obj) {
return nullptr;
}
@@ -193,6 +194,52 @@ ArrayObject* ModuleRequestObject::assertions() const {
return &obj->as<ArrayObject>();
}
+bool ModuleRequestObject::hasAttributes() const {
+ return !getReservedSlot(ModuleRequestObject::AttributesSlot)
+ .isNullOrUndefined();
+}
+
+/* static */
+bool ModuleRequestObject::getModuleType(
+ JSContext* cx, const Handle<ModuleRequestObject*> moduleRequest,
+ JS::ModuleType& moduleType) {
+ if (!moduleRequest->hasAttributes()) {
+ moduleType = JS::ModuleType::JavaScript;
+ return true;
+ }
+
+ Rooted<ArrayObject*> attributesArray(cx, moduleRequest->attributes());
+ RootedObject attributeObject(cx);
+ RootedId typeId(cx, NameToId(cx->names().type));
+ RootedValue value(cx);
+
+ uint32_t numberOfAttributes = attributesArray->length();
+ for (uint32_t i = 0; i < numberOfAttributes; i++) {
+ attributeObject = &attributesArray->getDenseElement(i).toObject();
+
+ if (!GetProperty(cx, attributeObject, attributeObject, typeId, &value)) {
+ continue;
+ }
+
+ int32_t isJsonString;
+ if (!js::CompareStrings(cx, cx->names().json, value.toString(),
+ &isJsonString)) {
+ return false;
+ }
+
+ if (isJsonString == 0) {
+ moduleType = JS::ModuleType::JSON;
+ return true;
+ }
+
+ moduleType = JS::ModuleType::Unknown;
+ return true;
+ }
+
+ moduleType = JS::ModuleType::JavaScript;
+ return true;
+}
+
/* static */
bool ModuleRequestObject::isInstance(HandleValue value) {
return value.isObject() && value.toObject().is<ModuleRequestObject>();
@@ -201,7 +248,7 @@ bool ModuleRequestObject::isInstance(HandleValue value) {
/* static */
ModuleRequestObject* ModuleRequestObject::create(
JSContext* cx, Handle<JSAtom*> specifier,
- Handle<ArrayObject*> maybeAssertions) {
+ Handle<ArrayObject*> maybeAttributes) {
ModuleRequestObject* self =
NewObjectWithGivenProto<ModuleRequestObject>(cx, nullptr);
if (!self) {
@@ -209,7 +256,7 @@ ModuleRequestObject* ModuleRequestObject::create(
}
self->initReservedSlot(SpecifierSlot, StringOrNullValue(specifier));
- self->initReservedSlot(AssertionSlot, ObjectOrNullValue(maybeAssertions));
+ self->initReservedSlot(AttributesSlot, ObjectOrNullValue(maybeAttributes));
return self;
}
@@ -618,6 +665,21 @@ void ModuleNamespaceObject::ProxyHandler::finalize(JS::GCContext* gcx,
}
///////////////////////////////////////////////////////////////////////////
+// SyntheticModuleFields
+
+// The fields of a synthetic module record, as described in:
+// https://tc39.es/proposal-json-modules/#sec-synthetic-module-records
+class js::SyntheticModuleFields {
+ public:
+ ExportNameVector exportNames;
+
+ public:
+ void trace(JSTracer* trc);
+};
+
+void SyntheticModuleFields::trace(JSTracer* trc) { exportNames.trace(trc); }
+
+///////////////////////////////////////////////////////////////////////////
// CyclicModuleFields
// The fields of a cyclic module record, as described in:
@@ -857,6 +919,10 @@ Span<const ExportEntry> ModuleObject::starExportEntries() const {
return cyclicModuleFields()->starExportEntries();
}
+const ExportNameVector& ModuleObject::syntheticExportNames() const {
+ return syntheticModuleFields()->exportNames;
+}
+
void ModuleObject::initFunctionDeclarations(
UniquePtr<FunctionDeclarationVector> decls) {
cyclicModuleFields()->functionDeclarations = std::move(decls);
@@ -883,12 +949,39 @@ ModuleObject* ModuleObject::create(JSContext* cx) {
}
/* static */
+ModuleObject* ModuleObject::createSynthetic(
+ JSContext* cx, MutableHandle<ExportNameVector> exportNames) {
+ Rooted<UniquePtr<SyntheticModuleFields>> syntheticFields(cx);
+ syntheticFields = cx->make_unique<SyntheticModuleFields>();
+ if (!syntheticFields) {
+ return nullptr;
+ }
+
+ Rooted<ModuleObject*> self(
+ cx, NewObjectWithGivenProto<ModuleObject>(cx, nullptr));
+ if (!self) {
+ return nullptr;
+ }
+
+ InitReservedSlot(self, SyntheticModuleFieldsSlot, syntheticFields.release(),
+ MemoryUse::ModuleSyntheticFields);
+
+ self->syntheticModuleFields()->exportNames = std::move(exportNames.get());
+
+ return self;
+}
+
+/* static */
void ModuleObject::finalize(JS::GCContext* gcx, JSObject* obj) {
ModuleObject* self = &obj->as<ModuleObject>();
if (self->hasCyclicModuleFields()) {
gcx->delete_(obj, self->cyclicModuleFields(),
MemoryUse::ModuleCyclicFields);
}
+ if (self->hasSyntheticModuleFields()) {
+ gcx->delete_(obj, self->syntheticModuleFields(),
+ MemoryUse::ModuleSyntheticFields);
+ }
}
ModuleEnvironmentObject& ModuleObject::initialEnvironment() const {
@@ -960,6 +1053,7 @@ void ModuleObject::setAsyncEvaluating() {
void ModuleObject::initScriptSlots(HandleScript script) {
MOZ_ASSERT(script);
MOZ_ASSERT(script->sourceObject());
+ MOZ_ASSERT(script->filename());
initReservedSlot(ScriptSlot, PrivateGCThingValue(script));
cyclicModuleFields()->scriptSourceObject = script->sourceObject();
}
@@ -1021,10 +1115,12 @@ static inline void AssertValidModuleStatus(ModuleStatus status) {
}
ModuleStatus ModuleObject::status() const {
- // TODO: When implementing synthetic module records it may be convenient to
- // make this method always return a ModuleStatus::Evaluated for such a module
- // so we can assert a module's status without checking which kind it is, even
- // though synthetic modules don't have this field according to the spec.
+ // Always return `ModuleStatus::Evaluated` so we can assert a module's status
+ // without checking which kind it is, even though synthetic modules don't have
+ // this field according to the spec.
+ if (hasSyntheticModuleFields()) {
+ return ModuleStatus::Evaluated;
+ }
ModuleStatus status = cyclicModuleFields()->status;
AssertValidModuleStatus(status);
@@ -1161,6 +1257,22 @@ ModuleObject* ModuleObject::getCycleRoot() const {
return cyclicModuleFields()->cycleRoot;
}
+bool ModuleObject::hasSyntheticModuleFields() const {
+ bool result = !getReservedSlot(SyntheticModuleFieldsSlot).isUndefined();
+ MOZ_ASSERT_IF(result, !hasCyclicModuleFields());
+ return result;
+}
+
+SyntheticModuleFields* ModuleObject::syntheticModuleFields() {
+ MOZ_ASSERT(!hasCyclicModuleFields());
+ void* ptr = getReservedSlot(SyntheticModuleFieldsSlot).toPrivate();
+ MOZ_ASSERT(ptr);
+ return static_cast<SyntheticModuleFields*>(ptr);
+}
+const SyntheticModuleFields* ModuleObject::syntheticModuleFields() const {
+ return const_cast<ModuleObject*>(this)->syntheticModuleFields();
+}
+
bool ModuleObject::hasTopLevelCapability() const {
return cyclicModuleFields()->topLevelCapability;
}
@@ -1206,6 +1318,9 @@ void ModuleObject::trace(JSTracer* trc, JSObject* obj) {
if (module.hasCyclicModuleFields()) {
module.cyclicModuleFields()->trace(trc);
}
+ if (module.hasSyntheticModuleFields()) {
+ module.syntheticModuleFields()->trace(trc);
+ }
}
/* static */
@@ -1328,6 +1443,27 @@ bool ModuleObject::createEnvironment(JSContext* cx,
return true;
}
+/*static*/
+bool ModuleObject::createSyntheticEnvironment(JSContext* cx,
+ Handle<ModuleObject*> self,
+ Handle<GCVector<Value>> values) {
+ Rooted<ModuleEnvironmentObject*> env(
+ cx, ModuleEnvironmentObject::createSynthetic(cx, self));
+ if (!env) {
+ return false;
+ }
+
+ MOZ_ASSERT(env->shape()->propMapLength() == values.length());
+
+ for (uint32_t i = 0; i < values.length(); i++) {
+ env->setAliasedBinding(env->firstSyntheticValueSlot() + i, values[i]);
+ }
+
+ self->setInitialEnvironment(env);
+
+ return true;
+}
+
///////////////////////////////////////////////////////////////////////////
// ModuleBuilder
@@ -2542,10 +2678,11 @@ static bool OnResolvedDynamicModule(JSContext* cx, unsigned argc, Value* vp) {
return RejectPromiseWithPendingError(cx, promise);
}
- MOZ_ASSERT(module->getCycleRoot()
- ->topLevelCapability()
- ->as<PromiseObject>()
- .state() == JS::PromiseState::Fulfilled);
+ MOZ_ASSERT_IF(module->hasCyclicModuleFields(),
+ module->getCycleRoot()
+ ->topLevelCapability()
+ ->as<PromiseObject>()
+ .state() == JS::PromiseState::Fulfilled);
RootedObject ns(cx, GetOrCreateModuleNamespace(cx, module));
if (!ns) {
diff --git a/js/src/builtin/ModuleObject.h b/js/src/builtin/ModuleObject.h
index 015cb42a5f..d39d65b18c 100644
--- a/js/src/builtin/ModuleObject.h
+++ b/js/src/builtin/ModuleObject.h
@@ -43,6 +43,7 @@ namespace js {
class ArrayObject;
class CyclicModuleFields;
+class SyntheticModuleFields;
class ListObject;
class ModuleEnvironmentObject;
class ModuleObject;
@@ -51,16 +52,20 @@ class ScriptSourceObject;
class ModuleRequestObject : public NativeObject {
public:
- enum { SpecifierSlot = 0, AssertionSlot, SlotCount };
+ enum { SpecifierSlot = 0, AttributesSlot, SlotCount };
static const JSClass class_;
static bool isInstance(HandleValue value);
[[nodiscard]] static ModuleRequestObject* create(
JSContext* cx, Handle<JSAtom*> specifier,
- Handle<ArrayObject*> maybeAssertions);
+ Handle<ArrayObject*> maybeAttributes);
JSAtom* specifier() const;
- ArrayObject* assertions() const;
+ ArrayObject* attributes() const;
+ bool hasAttributes() const;
+ static bool getModuleType(JSContext* cx,
+ const Handle<ModuleRequestObject*> moduleRequest,
+ JS::ModuleType& moduleType);
};
using ModuleRequestVector =
@@ -309,6 +314,10 @@ constexpr uint32_t ASYNC_EVALUATING_POST_ORDER_INIT = 1;
// Value that the field is set to after being cleared.
constexpr uint32_t ASYNC_EVALUATING_POST_ORDER_CLEARED = 0;
+// Currently, the ModuleObject class is used to represent both the Source Text
+// Module Record and the Synthetic Module Record. Ideally, this is something
+// that should be refactored to follow the same hierarchy as in the spec.
+// TODO: See Bug 1880519.
class ModuleObject : public NativeObject {
public:
// Module fields including those for AbstractModuleRecords described by:
@@ -318,6 +327,8 @@ class ModuleObject : public NativeObject {
EnvironmentSlot,
NamespaceSlot,
CyclicModuleFieldsSlot,
+ // `SyntheticModuleFields` if a synthetic module. Otherwise `undefined`.
+ SyntheticModuleFieldsSlot,
SlotCount
};
@@ -327,6 +338,9 @@ class ModuleObject : public NativeObject {
static ModuleObject* create(JSContext* cx);
+ static ModuleObject* createSynthetic(
+ JSContext* cx, MutableHandle<ExportNameVector> exportNames);
+
// Initialize the slots on this object that are dependent on the script.
void initScriptSlots(HandleScript script);
@@ -364,6 +378,8 @@ class ModuleObject : public NativeObject {
mozilla::Span<const ExportEntry> localExportEntries() const;
mozilla::Span<const ExportEntry> indirectExportEntries() const;
mozilla::Span<const ExportEntry> starExportEntries() const;
+ const ExportNameVector& syntheticExportNames() const;
+
IndirectBindingMap& importBindings();
void setStatus(ModuleStatus newStatus);
@@ -390,6 +406,8 @@ class ModuleObject : public NativeObject {
void clearAsyncEvaluatingPostOrder();
void setCycleRoot(ModuleObject* cycleRoot);
ModuleObject* getCycleRoot() const;
+ bool hasCyclicModuleFields() const;
+ bool hasSyntheticModuleFields() const;
static void onTopLevelEvaluationFinished(ModuleObject* module);
@@ -413,6 +431,9 @@ class ModuleObject : public NativeObject {
MutableHandle<UniquePtr<ExportNameVector>> exports);
static bool createEnvironment(JSContext* cx, Handle<ModuleObject*> self);
+ static bool createSyntheticEnvironment(JSContext* cx,
+ Handle<ModuleObject*> self,
+ Handle<GCVector<Value>> values);
void initAsyncSlots(JSContext* cx, bool hasTopLevelAwait,
Handle<ListObject*> asyncParentModules);
@@ -423,9 +444,11 @@ class ModuleObject : public NativeObject {
static void trace(JSTracer* trc, JSObject* obj);
static void finalize(JS::GCContext* gcx, JSObject* obj);
- bool hasCyclicModuleFields() const;
CyclicModuleFields* cyclicModuleFields();
const CyclicModuleFields* cyclicModuleFields() const;
+
+ SyntheticModuleFields* syntheticModuleFields();
+ const SyntheticModuleFields* syntheticModuleFields() const;
};
JSObject* GetOrCreateModuleMetaObject(JSContext* cx, HandleObject module);
diff --git a/js/src/builtin/ReflectParse.cpp b/js/src/builtin/ReflectParse.cpp
index 62eac477ad..dde953143e 100644
--- a/js/src/builtin/ReflectParse.cpp
+++ b/js/src/builtin/ReflectParse.cpp
@@ -2885,7 +2885,8 @@ bool ASTSerializer::expression(ParseNode* pn, MutableHandleValue dst) {
}
case ParseNodeKind::DotExpr:
- case ParseNodeKind::OptionalDotExpr: {
+ case ParseNodeKind::OptionalDotExpr:
+ case ParseNodeKind::ArgumentsLength: {
PropertyAccessBase* prop = &pn->as<PropertyAccessBase>();
MOZ_ASSERT(prop->pn_pos.encloses(prop->expression().pn_pos));
diff --git a/js/src/builtin/String.cpp b/js/src/builtin/String.cpp
index 16e92b554c..1da2270cfb 100644
--- a/js/src/builtin/String.cpp
+++ b/js/src/builtin/String.cpp
@@ -566,15 +566,10 @@ static inline void CopyChars(CharT* to, const JSLinearString* from,
MOZ_ASSERT(begin + length <= from->length());
JS::AutoCheckCannotGC nogc;
- if constexpr (std::is_same_v<CharT, Latin1Char>) {
- MOZ_ASSERT(from->hasLatin1Chars());
+ if (from->hasLatin1Chars()) {
CopyChars(to, from->latin1Chars(nogc) + begin, length);
} else {
- if (from->hasLatin1Chars()) {
- CopyChars(to, from->latin1Chars(nogc) + begin, length);
- } else {
- CopyChars(to, from->twoByteChars(nogc) + begin, length);
- }
+ CopyChars(to, from->twoByteChars(nogc) + begin, length);
}
}
diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
index f762f28f3e..498fa1746d 100644
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -1595,17 +1595,20 @@ static bool WasmLosslessInvoke(JSContext* cx, unsigned argc, Value* vp) {
JS_ReportErrorASCII(cx, "not enough arguments");
return false;
}
- if (!args.get(0).isObject()) {
+ if (!args.get(0).isObject() || !args.get(0).toObject().is<JSFunction>()) {
JS_ReportErrorASCII(cx, "argument is not an object");
return false;
}
- RootedFunction func(cx, args[0].toObject().maybeUnwrapIf<JSFunction>());
+ RootedFunction func(cx, &args[0].toObject().as<JSFunction>());
if (!func || !wasm::IsWasmExportedFunction(func)) {
JS_ReportErrorASCII(cx, "argument is not an exported wasm function");
return false;
}
+ // Switch to the function's realm
+ AutoRealm ar(cx, func);
+
// Get the instance and funcIndex for calling the function
wasm::Instance& instance = wasm::ExportedFunctionToInstance(func);
uint32_t funcIndex = wasm::ExportedFunctionToFuncIndex(func);
@@ -2042,6 +2045,95 @@ static bool WasmDisassemble(JSContext* cx, unsigned argc, Value* vp) {
return false;
}
+static bool ToIonDumpContents(JSContext* cx, HandleValue value,
+ wasm::IonDumpContents* contents) {
+ RootedString option(cx, JS::ToString(cx, value));
+
+ if (!option) {
+ return false;
+ }
+
+ bool isEqual = false;
+ if (!JS_StringEqualsLiteral(cx, option, "mir", &isEqual) || isEqual) {
+ *contents = wasm::IonDumpContents::UnoptimizedMIR;
+ return isEqual;
+ } else if (!JS_StringEqualsLiteral(cx, option, "unopt-mir", &isEqual) ||
+ isEqual) {
+ *contents = wasm::IonDumpContents::UnoptimizedMIR;
+ return isEqual;
+ } else if (!JS_StringEqualsLiteral(cx, option, "opt-mir", &isEqual) ||
+ isEqual) {
+ *contents = wasm::IonDumpContents::OptimizedMIR;
+ return isEqual;
+ } else if (!JS_StringEqualsLiteral(cx, option, "lir", &isEqual) || isEqual) {
+ *contents = wasm::IonDumpContents::LIR;
+ return isEqual;
+ } else {
+ return false;
+ }
+}
+
+static bool WasmDumpIon(JSContext* cx, unsigned argc, Value* vp) {
+ if (!wasm::HasSupport(cx)) {
+ JS_ReportErrorASCII(cx, "wasm support unavailable");
+ return false;
+ }
+
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ args.rval().set(UndefinedValue());
+
+ SharedMem<uint8_t*> dataPointer;
+ size_t byteLength;
+ if (!args.get(0).isObject() || !IsBufferSource(args.get(0).toObjectOrNull(),
+ &dataPointer, &byteLength)) {
+ JS_ReportErrorASCII(cx, "argument is not a buffer source");
+ return false;
+ }
+
+ uint32_t targetFuncIndex;
+ if (!ToUint32(cx, args.get(1), &targetFuncIndex)) {
+ JS_ReportErrorASCII(cx, "argument is not a func index");
+ return false;
+ }
+
+ wasm::IonDumpContents contents = wasm::IonDumpContents::Default;
+ if (args.length() > 2 && !ToIonDumpContents(cx, args.get(2), &contents)) {
+ JS_ReportErrorASCII(cx, "argument is not a valid dump contents");
+ return false;
+ }
+
+ wasm::MutableBytes bytecode = cx->new_<wasm::ShareableBytes>();
+ if (!bytecode) {
+ return false;
+ }
+ if (!bytecode->append(dataPointer.unwrap(), byteLength)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ UniqueChars error;
+ JSSprinter out(cx);
+ if (!out.init()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!wasm::DumpIonFunctionInModule(*bytecode, targetFuncIndex, contents, out,
+ &error)) {
+ if (error) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_COMPILE_ERROR, error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ args.rval().set(StringValue(out.release(cx)));
+ return true;
+}
+
enum class Flag { Tier2Complete, Deserialized };
static bool WasmReturnFlag(JSContext* cx, unsigned argc, Value* vp, Flag flag) {
@@ -2073,7 +2165,6 @@ static bool WasmReturnFlag(JSContext* cx, unsigned argc, Value* vp, Flag flag) {
return true;
}
-#if defined(DEBUG)
static bool wasmMetadataAnalysis(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
@@ -2082,10 +2173,6 @@ static bool wasmMetadataAnalysis(JSContext* cx, unsigned argc, Value* vp) {
return false;
}
- if (!cx->options().wasmTestMetadata()) {
- return false;
- }
-
if (args[0].toObject().is<WasmModuleObject>()) {
HashMap<const char*, uint32_t, mozilla::CStringHasher, SystemAllocPolicy>
hashmap = args[0]
@@ -2096,6 +2183,7 @@ static bool wasmMetadataAnalysis(JSContext* cx, unsigned argc, Value* vp) {
.metadataAnalysis(cx);
if (hashmap.empty()) {
JS_ReportErrorASCII(cx, "Metadata analysis has failed");
+ return false;
}
// metadataAnalysis returned a map of {key, value} with various statistics
@@ -2107,16 +2195,21 @@ static bool wasmMetadataAnalysis(JSContext* cx, unsigned argc, Value* vp) {
auto value = iter.get().value();
JSString* string = JS_NewStringCopyZ(cx, key);
+ if (!string) {
+ return false;
+ }
+
if (!props.append(
IdValuePair(NameToId(string->asLinear().toPropertyName(cx)),
NumberValue(value)))) {
- ReportOutOfMemory(cx);
return false;
}
}
- JSObject* results =
- NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ JSObject* results = NewPlainObjectWithUniqueNames(cx, props);
+ if (!results) {
+ return false;
+ }
args.rval().setObject(*results);
return true;
@@ -2127,7 +2220,6 @@ static bool wasmMetadataAnalysis(JSContext* cx, unsigned argc, Value* vp) {
return false;
}
-#endif
static bool WasmHasTier2CompilationCompleted(JSContext* cx, unsigned argc,
Value* vp) {
@@ -4523,7 +4615,9 @@ static bool ReadGeckoProfilingStack(JSContext* cx, unsigned argc, Value* vp) {
case JS::ProfilingFrameIterator::Frame_Ion:
frameKindStr = "ion";
break;
- case JS::ProfilingFrameIterator::Frame_Wasm:
+ case JS::ProfilingFrameIterator::Frame_WasmBaseline:
+ case JS::ProfilingFrameIterator::Frame_WasmIon:
+ case JS::ProfilingFrameIterator::Frame_WasmOther:
frameKindStr = "wasm";
break;
default:
@@ -5264,15 +5358,17 @@ class CustomSerializableObject : public NativeObject {
static ActivityLog* getThreadLog() {
if (!self.initialized() || !self.get()) {
self.infallibleInit();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
self.set(js_new<ActivityLog>());
- MOZ_RELEASE_ASSERT(self.get());
+ if (!self.get()) {
+ oomUnsafe.crash("allocating activity log");
+ }
if (!TlsContext.get()->runtime()->atExit(
[](void* vpData) {
auto* log = static_cast<ActivityLog*>(vpData);
js_delete(log);
},
self.get())) {
- AutoEnterOOMUnsafeRegion oomUnsafe;
oomUnsafe.crash("atExit");
}
}
@@ -7199,6 +7295,8 @@ static bool CompileToStencil(JSContext* cx, uint32_t argc, Value* vp) {
}
CompileOptions options(cx);
+ options.setFile("<compileToStencil>");
+
RootedString displayURL(cx);
RootedString sourceMapURL(cx);
UniqueChars fileNameBytes;
@@ -7230,13 +7328,10 @@ static bool CompileToStencil(JSContext* cx, uint32_t argc, Value* vp) {
AutoReportFrontendContext fc(cx);
RefPtr<JS::Stencil> stencil;
- JS::CompilationStorage compileStorage;
if (isModule) {
- stencil =
- JS::CompileModuleScriptToStencil(&fc, options, srcBuf, compileStorage);
+ stencil = JS::CompileModuleScriptToStencil(&fc, options, srcBuf);
} else {
- stencil =
- JS::CompileGlobalScriptToStencil(&fc, options, srcBuf, compileStorage);
+ stencil = JS::CompileGlobalScriptToStencil(&fc, options, srcBuf);
}
if (!stencil) {
return false;
@@ -7368,6 +7463,8 @@ static bool CompileToStencilXDR(JSContext* cx, uint32_t argc, Value* vp) {
}
CompileOptions options(cx);
+ options.setFile("<compileToStencilXDR>");
+
RootedString displayURL(cx);
RootedString sourceMapURL(cx);
UniqueChars fileNameBytes;
@@ -9750,6 +9847,15 @@ JS_FOR_WASM_FEATURES(WASM_FEATURE)
" ImportJitExit - wasm-to-jitted-JS stubs\n"
" all - all kinds, including obscure ones\n"),
+ JS_FN_HELP("wasmDumpIon", WasmDumpIon, 2, 0,
+"wasmDumpIon(bytecode, funcIndex, [, contents])\n",
+"wasmDumpIon(bytecode, funcIndex, [, contents])"
+" Returns a dump of compiling a function in the specified module with Ion."
+" The `contents` flag controls what is dumped. one of:"
+" `mir` | `unopt-mir`: Unoptimized MIR (the default)"
+" `opt-mir`: Optimized MIR"
+" `lir`: LIR"),
+
JS_FN_HELP("wasmHasTier2CompilationCompleted", WasmHasTier2CompilationCompleted, 1, 0,
"wasmHasTier2CompilationCompleted(module)",
" Returns a boolean indicating whether a given module has finished compiled code for tier2. \n"
@@ -9948,11 +10054,9 @@ JS_FOR_WASM_FEATURES(WASM_FEATURE)
" element's edge is the node of the i+1'th array element; the destination of\n"
" the last array element is implicitly |target|.\n"),
-#if defined(DEBUG)
JS_FN_HELP("wasmMetadataAnalysis", wasmMetadataAnalysis, 1, 0,
"wasmMetadataAnalysis(wasmObject)",
" Prints an analysis of the size of metadata on this wasm object.\n"),
-#endif
#if defined(DEBUG) || defined(JS_JITSPEW)
JS_FN_HELP("dumpObject", DumpObject, 1, 0,
diff --git a/js/src/builtin/intl/NumberFormat.js b/js/src/builtin/intl/NumberFormat.js
index be3b74a8ac..6dc77fb639 100644
--- a/js/src/builtin/intl/NumberFormat.js
+++ b/js/src/builtin/intl/NumberFormat.js
@@ -190,7 +190,7 @@ function UnwrapNumberFormat(nf) {
*
* Applies digit options used for number formatting onto the intl object.
*
- * ES2024 Intl draft rev 74ca7099f103d143431b2ea422ae640c6f43e3e6
+ * ES2024 Intl draft rev a1db4567870dbe505121a4255f1210338757190a
*/
function SetNumberFormatDigitOptions(
lazyData,
@@ -216,15 +216,6 @@ function SetNumberFormatDigitOptions(
lazyData.minimumIntegerDigits = mnid;
// Step 7.
- var roundingPriority = GetOption(
- options,
- "roundingPriority",
- "string",
- ["auto", "morePrecision", "lessPrecision"],
- "auto"
- );
-
- // Step 8.
var roundingIncrement = GetNumberOption(
options,
"roundingIncrement",
@@ -233,7 +224,7 @@ function SetNumberFormatDigitOptions(
1
);
- // Step 9.
+ // Step 8.
switch (roundingIncrement) {
case 1:
case 2:
@@ -259,7 +250,7 @@ function SetNumberFormatDigitOptions(
);
}
- // Step 10.
+ // Step 9.
var roundingMode = GetOption(
options,
"roundingMode",
@@ -278,6 +269,15 @@ function SetNumberFormatDigitOptions(
"halfExpand"
);
+ // Step 10.
+ var roundingPriority = GetOption(
+ options,
+ "roundingPriority",
+ "string",
+ ["auto", "morePrecision", "lessPrecision"],
+ "auto"
+ );
+
// Step 11.
var trailingZeroDisplay = GetOption(
options,
@@ -303,52 +303,52 @@ function SetNumberFormatDigitOptions(
// Step 16.
lazyData.trailingZeroDisplay = trailingZeroDisplay;
- // Steps 17-18.
+ // Step 17.
var hasSignificantDigits = mnsd !== undefined || mxsd !== undefined;
- // Step 19-20.
+ // Step 28.
var hasFractionDigits = mnfd !== undefined || mxfd !== undefined;
- // Steps 21 and 23.a.
+ // Steps 19 and 21.a.
var needSignificantDigits =
roundingPriority !== "auto" || hasSignificantDigits;
- // Steps 22 and 23.b.i.
+ // Steps 20 and 21.b.i.
var needFractionalDigits =
roundingPriority !== "auto" ||
!(hasSignificantDigits || (!hasFractionDigits && notation === "compact"));
- // Step 24.
+ // Step 22.
if (needSignificantDigits) {
- // Step 24.a.
+ // Step 22.a.
if (hasSignificantDigits) {
- // Step 24.a.i.
+ // Step 22.a.i.
mnsd = DefaultNumberOption(mnsd, 1, 21, 1);
lazyData.minimumSignificantDigits = mnsd;
- // Step 24.a.ii.
+ // Step 22.a.ii.
mxsd = DefaultNumberOption(mxsd, mnsd, 21, 21);
lazyData.maximumSignificantDigits = mxsd;
} else {
- // Step 24.b.i.
+ // Step 22.b.i.
lazyData.minimumSignificantDigits = 1;
- // Step 24.b.ii.
+ // Step 22.b.ii.
lazyData.maximumSignificantDigits = 21;
}
}
- // Step 25.
+ // Step 23.
if (needFractionalDigits) {
- // Step 25.a.
+ // Step 23.a.
if (hasFractionDigits) {
- // Step 25.a.i.
+ // Step 23.a.i.
mnfd = DefaultNumberOption(mnfd, 0, 100, undefined);
- // Step 25.a.ii.
+ // Step 23.a.ii.
mxfd = DefaultNumberOption(mxfd, 0, 100, undefined);
- // Step 25.a.iii.
+ // Step 23.a.iii.
if (mnfd === undefined) {
assert(
mxfd !== undefined,
@@ -357,31 +357,31 @@ function SetNumberFormatDigitOptions(
mnfd = std_Math_min(mnfdDefault, mxfd);
}
- // Step 25.a.iv.
+ // Step 23.a.iv.
else if (mxfd === undefined) {
mxfd = std_Math_max(mxfdDefault, mnfd);
}
- // Step 25.a.v.
+ // Step 23.a.v.
else if (mnfd > mxfd) {
ThrowRangeError(JSMSG_INVALID_DIGITS_VALUE, mxfd);
}
- // Step 25.a.vi.
+ // Step 23.a.vi.
lazyData.minimumFractionDigits = mnfd;
- // Step 25.a.vii.
+ // Step 23.a.vii.
lazyData.maximumFractionDigits = mxfd;
} else {
- // Step 25.b.i.
+ // Step 23.b.i.
lazyData.minimumFractionDigits = mnfdDefault;
- // Step 25.b.ii.
+ // Step 23.b.ii.
lazyData.maximumFractionDigits = mxfdDefault;
}
}
- // Steps 26-30.
+ // Steps 24-28.
if (!needSignificantDigits && !needFractionalDigits) {
assert(!hasSignificantDigits, "bad significant digits in fallback case");
assert(
@@ -393,23 +393,23 @@ function SetNumberFormatDigitOptions(
`bad notation in fallback case: ${notation}`
);
- // Steps 26.a-e.
+ // Steps 24.a-f.
lazyData.minimumFractionDigits = 0;
lazyData.maximumFractionDigits = 0;
lazyData.minimumSignificantDigits = 1;
lazyData.maximumSignificantDigits = 2;
lazyData.roundingPriority = "morePrecision";
} else {
- // Steps 27-30.
+ // Steps 25-28.
//
// Our implementation stores |roundingPriority| instead of using
// [[RoundingType]].
lazyData.roundingPriority = roundingPriority;
}
- // Step 31.
+ // Step 29.
if (roundingIncrement !== 1) {
- // Step 31.a.
+ // Step 29.a.
//
// [[RoundingType]] is `fractionDigits` if |roundingPriority| is equal to
// "auto" and |hasSignificantDigits| is false.
@@ -428,7 +428,7 @@ function SetNumberFormatDigitOptions(
);
}
- // Step 31.b.
+ // Step 29.b.
//
// Minimum and maximum fraction digits must be equal.
if (
@@ -1128,7 +1128,7 @@ function Intl_NumberFormat_formatRangeToParts(start, end) {
*
* Returns the resolved options for a NumberFormat object.
*
- * ES2024 Intl draft rev 74ca7099f103d143431b2ea422ae640c6f43e3e6
+ * ES2024 Intl draft rev a1db4567870dbe505121a4255f1210338757190a
*/
function Intl_NumberFormat_resolvedOptions() {
// Steps 1-3.
@@ -1244,20 +1244,15 @@ function Intl_NumberFormat_resolvedOptions() {
}
DefineDataProperty(result, "signDisplay", internals.signDisplay);
- DefineDataProperty(result, "roundingMode", internals.roundingMode);
DefineDataProperty(result, "roundingIncrement", internals.roundingIncrement);
+ DefineDataProperty(result, "roundingMode", internals.roundingMode);
+ DefineDataProperty(result, "roundingPriority", internals.roundingPriority);
DefineDataProperty(
result,
"trailingZeroDisplay",
internals.trailingZeroDisplay
);
- // Steps 6-8.
- //
- // Our implementation doesn't use [[RoundingType]], but instead directly
- // stores the computed `roundingPriority` value.
- DefineDataProperty(result, "roundingPriority", internals.roundingPriority);
-
- // Step 9.
+ // Step 6.
return result;
}
diff --git a/js/src/builtin/intl/PluralRules.js b/js/src/builtin/intl/PluralRules.js
index 1dbf6656df..260fdbd568 100644
--- a/js/src/builtin/intl/PluralRules.js
+++ b/js/src/builtin/intl/PluralRules.js
@@ -339,7 +339,7 @@ function Intl_PluralRules_selectRange(start, end) {
*
* Returns the resolved options for a PluralRules object.
*
- * ES2024 Intl draft rev 74ca7099f103d143431b2ea422ae640c6f43e3e6
+ * ES2024 Intl draft rev a1db4567870dbe505121a4255f1210338757190a
*/
function Intl_PluralRules_resolvedOptions() {
// Step 1.
@@ -359,7 +359,20 @@ function Intl_PluralRules_resolvedOptions() {
var internals = getPluralRulesInternals(pluralRules);
- // Steps 3-4.
+ // Step 4.
+ var internalsPluralCategories = internals.pluralCategories;
+ if (internalsPluralCategories === null) {
+ internalsPluralCategories = intl_GetPluralCategories(pluralRules);
+ internals.pluralCategories = internalsPluralCategories;
+ }
+
+ // Step 5.b.
+ var pluralCategories = [];
+ for (var i = 0; i < internalsPluralCategories.length; i++) {
+ DefineDataProperty(pluralCategories, i, internalsPluralCategories[i]);
+ }
+
+ // Steps 3 and 5.
var result = {
locale: internals.locale,
type: internals.type,
@@ -406,35 +419,16 @@ function Intl_PluralRules_resolvedOptions() {
);
}
- DefineDataProperty(result, "roundingMode", internals.roundingMode);
+ DefineDataProperty(result, "pluralCategories", pluralCategories);
DefineDataProperty(result, "roundingIncrement", internals.roundingIncrement);
+ DefineDataProperty(result, "roundingMode", internals.roundingMode);
+ DefineDataProperty(result, "roundingPriority", internals.roundingPriority);
DefineDataProperty(
result,
"trailingZeroDisplay",
internals.trailingZeroDisplay
);
- // Step 5.
- var internalsPluralCategories = internals.pluralCategories;
- if (internalsPluralCategories === null) {
- internalsPluralCategories = intl_GetPluralCategories(pluralRules);
- internals.pluralCategories = internalsPluralCategories;
- }
-
- var pluralCategories = [];
- for (var i = 0; i < internalsPluralCategories.length; i++) {
- DefineDataProperty(pluralCategories, i, internalsPluralCategories[i]);
- }
-
// Step 6.
- DefineDataProperty(result, "pluralCategories", pluralCategories);
-
- // Steps 7-9.
- //
- // Our implementation doesn't use [[RoundingType]], but instead directly
- // stores the computed `roundingPriority` value.
- DefineDataProperty(result, "roundingPriority", internals.roundingPriority);
-
- // Step 10.
return result;
}
diff --git a/js/src/builtin/intl/make_intl_data.py b/js/src/builtin/intl/make_intl_data.py
index 7042c0a005..a8357445c4 100755
--- a/js/src/builtin/intl/make_intl_data.py
+++ b/js/src/builtin/intl/make_intl_data.py
@@ -2213,7 +2213,7 @@ def listIANAFiles(tzdataDir):
def readIANAFiles(tzdataDir, files):
"""Read all IANA time zone files from the given iterable."""
- nameSyntax = "[\w/+\-]+"
+ nameSyntax = r"[\w/+\-]+"
pZone = re.compile(r"Zone\s+(?P<name>%s)\s+.*" % nameSyntax)
pLink = re.compile(
r"Link\s+(?P<target>%s)\s+(?P<name>%s)(?:\s+#.*)?" % (nameSyntax, nameSyntax)
@@ -2310,7 +2310,7 @@ def readICUResourceFile(filename):
maybeMultiComments = r"(?:/\*[^*]*\*/)*"
maybeSingleComment = r"(?://.*)?"
lineStart = "^%s" % maybeMultiComments
- lineEnd = "%s\s*%s$" % (maybeMultiComments, maybeSingleComment)
+ lineEnd = r"%s\s*%s$" % (maybeMultiComments, maybeSingleComment)
return re.compile(r"\s*".join(chain([lineStart], args, [lineEnd])))
tableName = r'(?P<quote>"?)(?P<name>.+?)(?P=quote)'
@@ -2554,7 +2554,7 @@ def icuTzDataVersion(icuTzDir):
zoneinfo = os.path.join(icuTzDir, "zoneinfo64.txt")
if not os.path.isfile(zoneinfo):
raise RuntimeError("file not found: %s" % zoneinfo)
- version = searchInFile("^//\s+tz version:\s+([0-9]{4}[a-z])$", zoneinfo)
+ version = searchInFile(r"^//\s+tz version:\s+([0-9]{4}[a-z])$", zoneinfo)
if version is None:
raise RuntimeError(
"%s does not contain a valid tzdata version string" % zoneinfo
@@ -3711,7 +3711,7 @@ const allUnits = {};
""".format(
all_units_array
)
- + """
+ + r"""
// Test only sanctioned unit identifiers are allowed.
for (const typeAndUnit of allUnits) {
diff --git a/js/src/builtin/temporal/PlainDate.cpp b/js/src/builtin/temporal/PlainDate.cpp
index 759456c9cc..a4ad0e418f 100644
--- a/js/src/builtin/temporal/PlainDate.cpp
+++ b/js/src/builtin/temporal/PlainDate.cpp
@@ -2295,8 +2295,7 @@ static bool PlainDate_getISOFields(JSContext* cx, const CallArgs& args) {
}
// Step 8.
- auto* obj =
- NewPlainObjectWithUniqueNames(cx, fields.begin(), fields.length());
+ auto* obj = NewPlainObjectWithUniqueNames(cx, fields);
if (!obj) {
return false;
}
diff --git a/js/src/builtin/temporal/PlainDateTime.cpp b/js/src/builtin/temporal/PlainDateTime.cpp
index 8861b484bd..8f137cfe43 100644
--- a/js/src/builtin/temporal/PlainDateTime.cpp
+++ b/js/src/builtin/temporal/PlainDateTime.cpp
@@ -2507,8 +2507,7 @@ static bool PlainDateTime_getISOFields(JSContext* cx, const CallArgs& args) {
}
// Step 14.
- auto* obj =
- NewPlainObjectWithUniqueNames(cx, fields.begin(), fields.length());
+ auto* obj = NewPlainObjectWithUniqueNames(cx, fields);
if (!obj) {
return false;
}
diff --git a/js/src/builtin/temporal/PlainMonthDay.cpp b/js/src/builtin/temporal/PlainMonthDay.cpp
index f97b7ad68c..0896100a3f 100644
--- a/js/src/builtin/temporal/PlainMonthDay.cpp
+++ b/js/src/builtin/temporal/PlainMonthDay.cpp
@@ -903,8 +903,7 @@ static bool PlainMonthDay_getISOFields(JSContext* cx, const CallArgs& args) {
}
// Step 8.
- auto* obj =
- NewPlainObjectWithUniqueNames(cx, fields.begin(), fields.length());
+ auto* obj = NewPlainObjectWithUniqueNames(cx, fields);
if (!obj) {
return false;
}
diff --git a/js/src/builtin/temporal/PlainTime.cpp b/js/src/builtin/temporal/PlainTime.cpp
index 9501b5853b..bf35b9d93e 100644
--- a/js/src/builtin/temporal/PlainTime.cpp
+++ b/js/src/builtin/temporal/PlainTime.cpp
@@ -2432,8 +2432,7 @@ static bool PlainTime_getISOFields(JSContext* cx, const CallArgs& args) {
}
// Step 10.
- auto* obj =
- NewPlainObjectWithUniqueNames(cx, fields.begin(), fields.length());
+ auto* obj = NewPlainObjectWithUniqueNames(cx, fields);
if (!obj) {
return false;
}
diff --git a/js/src/builtin/temporal/PlainYearMonth.cpp b/js/src/builtin/temporal/PlainYearMonth.cpp
index a4e2f8f9e4..b95efd3179 100644
--- a/js/src/builtin/temporal/PlainYearMonth.cpp
+++ b/js/src/builtin/temporal/PlainYearMonth.cpp
@@ -1534,8 +1534,7 @@ static bool PlainYearMonth_getISOFields(JSContext* cx, const CallArgs& args) {
}
// Step 8.
- auto* obj =
- NewPlainObjectWithUniqueNames(cx, fields.begin(), fields.length());
+ auto* obj = NewPlainObjectWithUniqueNames(cx, fields);
if (!obj) {
return false;
}
diff --git a/js/src/builtin/temporal/ZonedDateTime.cpp b/js/src/builtin/temporal/ZonedDateTime.cpp
index 690ff223b1..92842a9626 100644
--- a/js/src/builtin/temporal/ZonedDateTime.cpp
+++ b/js/src/builtin/temporal/ZonedDateTime.cpp
@@ -3958,8 +3958,7 @@ static bool ZonedDateTime_getISOFields(JSContext* cx, const CallArgs& args) {
}
// Step 22.
- auto* obj =
- NewPlainObjectWithUniqueNames(cx, fields.begin(), fields.length());
+ auto* obj = NewPlainObjectWithUniqueNames(cx, fields);
if (!obj) {
return false;
}
diff --git a/js/src/debugger/Object.cpp b/js/src/debugger/Object.cpp
index c5a4f1f6dc..17528b0fd9 100644
--- a/js/src/debugger/Object.cpp
+++ b/js/src/debugger/Object.cpp
@@ -209,6 +209,7 @@ struct MOZ_STACK_CLASS DebuggerObject::CallData {
bool createSource();
bool makeDebuggeeValueMethod();
bool isSameNativeMethod();
+ bool isSameNativeWithJitInfoMethod();
bool isNativeGetterWithJitInfo();
bool unsafeDereferenceMethod();
bool unwrapMethod();
@@ -1338,7 +1339,18 @@ bool DebuggerObject::CallData::isSameNativeMethod() {
return false;
}
- return DebuggerObject::isSameNative(cx, object, args[0], args.rval());
+ return DebuggerObject::isSameNative(cx, object, args[0], CheckJitInfo::No,
+ args.rval());
+}
+
+bool DebuggerObject::CallData::isSameNativeWithJitInfoMethod() {
+ if (!args.requireAtLeast(
+ cx, "Debugger.Object.prototype.isSameNativeWithJitInfo", 1)) {
+ return false;
+ }
+
+ return DebuggerObject::isSameNative(cx, object, args[0], CheckJitInfo::Yes,
+ args.rval());
}
bool DebuggerObject::CallData::isNativeGetterWithJitInfo() {
@@ -1424,6 +1436,11 @@ struct DebuggerObject::PromiseReactionRecordBuilder
// so we ignore it.
return true;
}
+ if (!unwrappedGenerator->realm()->isDebuggee()) {
+ // Caller can keep the reference to the debugger object even after
+ // removing the realm from debuggee. Do nothing for this case.
+ return true;
+ }
return dbg->getFrame(cx, unwrappedGenerator, &frame) && push(cx, frame);
}
@@ -1535,6 +1552,7 @@ const JSFunctionSpec DebuggerObject::methods_[] = {
JS_DEBUG_FN("createSource", createSource, 1),
JS_DEBUG_FN("makeDebuggeeValue", makeDebuggeeValueMethod, 1),
JS_DEBUG_FN("isSameNative", isSameNativeMethod, 1),
+ JS_DEBUG_FN("isSameNativeWithJitInfo", isSameNativeWithJitInfoMethod, 1),
JS_DEBUG_FN("isNativeGetterWithJitInfo", isNativeGetterWithJitInfo, 1),
JS_DEBUG_FN("unsafeDereference", unsafeDereferenceMethod, 0),
JS_DEBUG_FN("unwrap", unwrapMethod, 0),
@@ -2576,9 +2594,36 @@ static JSAtom* MaybeGetSelfHostedFunctionName(const Value& v) {
return GetClonedSelfHostedFunctionName(fun);
}
+static bool IsSameNative(JSFunction* a, JSFunction* b,
+ DebuggerObject::CheckJitInfo checkJitInfo) {
+ if (a->native() != b->native()) {
+ return false;
+ }
+
+ if (checkJitInfo == DebuggerObject::CheckJitInfo::No) {
+ return true;
+ }
+
+ // Both function should agree with the existence of JitInfo.
+
+ if (a->hasJitInfo() != b->hasJitInfo()) {
+ return false;
+ }
+
+ if (!a->hasJitInfo()) {
+ return true;
+ }
+
+ if (a->jitInfo() == b->jitInfo()) {
+ return true;
+ }
+
+ return false;
+}
+
/* static */
bool DebuggerObject::isSameNative(JSContext* cx, Handle<DebuggerObject*> object,
- HandleValue value,
+ HandleValue value, CheckJitInfo checkJitInfo,
MutableHandleValue result) {
RootedValue referentValue(cx, ObjectValue(*object->referent()));
@@ -2602,7 +2647,8 @@ bool DebuggerObject::isSameNative(JSContext* cx, Handle<DebuggerObject*> object,
RootedFunction referentFun(cx, EnsureNativeFunction(referentValue));
- result.setBoolean(referentFun && referentFun->native() == fun->native());
+ result.setBoolean(referentFun &&
+ IsSameNative(referentFun, fun, checkJitInfo));
return true;
}
diff --git a/js/src/debugger/Object.h b/js/src/debugger/Object.h
index 5141bd3133..15d2800e76 100644
--- a/js/src/debugger/Object.h
+++ b/js/src/debugger/Object.h
@@ -145,9 +145,11 @@ class DebuggerObject : public NativeObject {
Handle<DebuggerObject*> object,
HandleValue value,
MutableHandleValue result);
+ enum class CheckJitInfo { No, Yes };
[[nodiscard]] static bool isSameNative(JSContext* cx,
Handle<DebuggerObject*> object,
HandleValue value,
+ CheckJitInfo checkJitInfo,
MutableHandleValue result);
[[nodiscard]] static bool isNativeGetterWithJitInfo(
JSContext* cx, Handle<DebuggerObject*> object, MutableHandleValue result);
diff --git a/js/src/devtools/automation/autospider.py b/js/src/devtools/automation/autospider.py
index a49d2fd505..5e427fd3a5 100755
--- a/js/src/devtools/automation/autospider.py
+++ b/js/src/devtools/automation/autospider.py
@@ -217,9 +217,40 @@ def ensure_dir_exists(
with open(os.path.join(DIR.scripts, "variants", args.variant)) as fh:
variant = json.load(fh)
+# Some of the variants request a particular word size (eg ARM simulators).
+word_bits = variant.get("bits")
+
+# On Linux and Windows, we build 32- and 64-bit versions on a 64 bit
+# host, so the caller has to specify what is desired.
+if word_bits is None and args.platform:
+ platform_arch = args.platform.split("-")[0]
+ if platform_arch in ("win32", "linux"):
+ word_bits = 32
+ elif platform_arch in ("win64", "linux64"):
+ word_bits = 64
+
+# Fall back to the word size of the host.
+if word_bits is None:
+ word_bits = 64 if platform.architecture()[0] == "64bit" else 32
+
+# Need a platform name to use as a key in variant files.
+if args.platform:
+ variant_platform = args.platform.split("-")[0]
+elif platform.system() == "Windows":
+ variant_platform = "win64" if word_bits == 64 else "win32"
+elif platform.system() == "Linux":
+ variant_platform = "linux64" if word_bits == 64 else "linux"
+elif platform.system() == "Darwin":
+ variant_platform = "macosx64"
+else:
+ variant_platform = "other"
+
CONFIGURE_ARGS = variant["configure-args"]
-compiler = variant.get("compiler")
+if variant_platform in ("win32", "win64"):
+ compiler = "clang-cl"
+else:
+ compiler = variant.get("compiler")
if compiler != "gcc" and "clang-plugin" not in CONFIGURE_ARGS:
CONFIGURE_ARGS += " --enable-clang-plugin"
@@ -254,34 +285,6 @@ opt = variant.get("nspr")
if opt is None or opt:
CONFIGURE_ARGS += " --enable-nspr-build"
-# Some of the variants request a particular word size (eg ARM simulators).
-word_bits = variant.get("bits")
-
-# On Linux and Windows, we build 32- and 64-bit versions on a 64 bit
-# host, so the caller has to specify what is desired.
-if word_bits is None and args.platform:
- platform_arch = args.platform.split("-")[0]
- if platform_arch in ("win32", "linux"):
- word_bits = 32
- elif platform_arch in ("win64", "linux64"):
- word_bits = 64
-
-# Fall back to the word size of the host.
-if word_bits is None:
- word_bits = 64 if platform.architecture()[0] == "64bit" else 32
-
-# Need a platform name to use as a key in variant files.
-if args.platform:
- variant_platform = args.platform.split("-")[0]
-elif platform.system() == "Windows":
- variant_platform = "win64" if word_bits == 64 else "win32"
-elif platform.system() == "Linux":
- variant_platform = "linux64" if word_bits == 64 else "linux"
-elif platform.system() == "Darwin":
- variant_platform = "macosx64"
-else:
- variant_platform = "other"
-
env["LD_LIBRARY_PATH"] = ":".join(
d
for d in [
@@ -437,7 +440,7 @@ CONFIGURE_ARGS += " --prefix={OBJDIR}/dist".format(OBJDIR=quote(OBJDIR))
# Generate a mozconfig.
with open(mozconfig, "wt") as fh:
if AUTOMATION and platform.system() == "Windows":
- fh.write('. "$topsrcdir/build/%s/mozconfig.vs-latest"\n' % variant_platform)
+ fh.write('. "$topsrcdir/build/mozconfig.clang-cl"\n')
fh.write("ac_add_options --enable-project=js\n")
fh.write("ac_add_options " + CONFIGURE_ARGS + "\n")
fh.write("mk_add_options MOZ_OBJDIR=" + quote(OBJDIR) + "\n")
@@ -665,7 +668,7 @@ if use_minidump:
[
mach,
"python",
- "virtualenv=build",
+ "--virtualenv=build",
os.path.join(DIR.source, "testing/mozbase/mozcrash/mozcrash/mozcrash.py"),
os.getenv("TMPDIR", "/tmp"),
os.path.join(OBJDIR, "dist/crashreporter-symbols"),
diff --git a/js/src/devtools/automation/variants/pbl-debug b/js/src/devtools/automation/variants/pbl-debug
new file mode 100644
index 0000000000..984589a4f0
--- /dev/null
+++ b/js/src/devtools/automation/variants/pbl-debug
@@ -0,0 +1,7 @@
+{
+ "configure-args": "--enable-portable-baseline-interp --enable-portable-baseline-interp-force",
+ "debug": true,
+ "env": {
+ "JSTESTS_EXTRA_ARGS": "--jitflags=debug"
+ }
+}
diff --git a/js/src/devtools/gc-ubench/harness.js b/js/src/devtools/gc-ubench/harness.js
index db7fa06d63..124baa17a5 100644
--- a/js/src/devtools/gc-ubench/harness.js
+++ b/js/src/devtools/gc-ubench/harness.js
@@ -254,14 +254,19 @@ var AllocationLoadManager = class {
var gLoadMgr = undefined;
function format_with_units(n, label, shortlabel, kbase) {
+ function format(n, prefix, unit) {
+ let s = Number.isInteger(n) ? n.toString() : n.toFixed(2);
+ return `${s}${prefix}${unit}`;
+ }
+
if (n < kbase * 4) {
return `${n} ${label}`;
} else if (n < kbase ** 2 * 4) {
- return `${(n / kbase).toFixed(2)}K${shortlabel}`;
+ return format(n / kbase, 'K', shortlabel);
} else if (n < kbase ** 3 * 4) {
- return `${(n / kbase ** 2).toFixed(2)}M${shortlabel}`;
+ return format(n / kbase ** 2, 'M', shortlabel);
}
- return `${(n / kbase ** 3).toFixed(2)}G${shortlabel}`;
+ return format(n / kbase ** 3, 'G', shortlabel);
}
function format_bytes(bytes) {
diff --git a/js/src/devtools/gc-ubench/index.html b/js/src/devtools/gc-ubench/index.html
index 4abce385f2..4efbb75def 100644
--- a/js/src/devtools/gc-ubench/index.html
+++ b/js/src/devtools/gc-ubench/index.html
@@ -15,61 +15,59 @@
<!-- List of garbage-creating test loads -->
<script src="test_list.js"></script>
-
- <!-- Collect all test loads into a `tests` Map -->
- <script>
- var tests = new Map();
- foreach_test_file(path => import("./" + path));
- </script>
-
</head>
<body onload="onload()" onunload="onunload()">
-<canvas id="graph" width="1080" height="400" style="padding-left:10px"></canvas>
-<canvas id="memgraph" width="1080" height="400" style="padding-left:10px"></canvas>
+<canvas id="graph" width="980" height="320" style="padding-left:10px"></canvas>
+<canvas id="memgraph" width="980" height="320" style="padding-left:10px"></canvas>
<div id="memgraph-disabled" style="display: none"><i>No performance.mozMemory object available. If running Firefox, set dom.enable_memory_stats to True to see heap size info.</i></div>
<hr>
-<div id='track-sizes-div'>
- Show heap size graph: <input id='track-sizes' type='checkbox' onclick="trackHeapSizes(this.checked)">
-</div>
+<form>
+ <div id='track-sizes-div'>
+ <input id='track-sizes' type='checkbox' onclick="trackHeapSizes(this.checked)">
+ <label for='track-sizes'>Show heap size graph</label>
+ </div>
-<div>
- Update display:
- <input type="checkbox" id="do-graph" onchange="onUpdateDisplayChanged()" checked></input>
-</div>
+ <div>
+ <input type="checkbox" id="do-graph" onchange="onUpdateDisplayChanged()" checked></input>
+ <label for='do-graph'>Update display</label>
+ </div>
-<div>
- Run allocation load
- <input type="checkbox" id="do-load" onchange="onDoLoadChange()" checked></input>
-</div>
+ <div>
+ <input type="checkbox" id="do-load" onchange="onDoLoadChange()" checked></input>
+ <label for='do-load'>Run allocation load</label>
+ </div>
-<div>
- Allocation load:
+ <div>
+ <label for='test-selection'>Allocation load:</label>
<select id="test-selection" required onchange="onLoadChange()"></select>
<span id="load-running">(init)</span>
-</div>
+ </div>
-<div>
- &nbsp;&nbsp;&nbsp;&nbsp;Garbage items per frame:
- <input type="text" id="garbage-per-frame" size="5" value="8K"
+ <div>
+ <label for='garbage-per-frame'>&nbsp;&nbsp;&nbsp;&nbsp;Garbage items per frame:</label>
+ <input type="text" id="garbage-per-frame" size="8" value="8K"
onchange="garbage_per_frame_changed()"></input>
-</div>
-<div>
- &nbsp;&nbsp;&nbsp;&nbsp;Garbage piles:
- <input type="text" id="garbage-piles" size="5" value="8"
+ </div>
+
+ <div>
+ <label for='garbage-piles'>&nbsp;&nbsp;&nbsp;&nbsp;Garbage piles:</label>
+ <input type="text" id="garbage-piles" size="8" value="8"
onchange="garbage_piles_changed()"></input>
-</div>
+ </div>
+</form>
<hr>
-<div>
- Duration: <input type="text" id="test-duration" size="3" value="8" onchange="duration_changed()"></input>s
- <input type="button" id="test-one" value="Run Test" onclick="run_one_test()"></input>
- <input type="button" id="test-all" value="Run All Tests" onclick="run_all_tests()"></input>
-</div>
+<form>
+ <label for='test-duration'>Duration:</label>
+ <input type="text" id="test-duration" size="3" value="8" onchange="duration_changed()"></input>s
+ <input type="button" id="test-one" value="Run Test" onclick="run_one_test()"></input>
+ <input type="button" id="test-all" value="Run All Tests" onclick="run_all_tests()"></input>
+</form>
<div>
&nbsp;&nbsp;&nbsp;&nbsp;Time remaining: <span id="test-progress">(not running)</span>
diff --git a/js/src/devtools/gc-ubench/ui.js b/js/src/devtools/gc-ubench/ui.js
index 4905f97904..4ff84481c9 100644
--- a/js/src/devtools/gc-ubench/ui.js
+++ b/js/src/devtools/gc-ubench/ui.js
@@ -10,6 +10,8 @@ var stroke = {
var numSamples = 500;
+var tests = new Map();
+
var gHistogram = new Map(); // {ms: count}
var gHistory = new FrameHistory(numSamples);
var gPerf = new PerfTracker();
@@ -51,9 +53,15 @@ var Firefox = class extends Host {
get gcBytes() {
return gMemory.zone.gcBytes;
}
+ get mallocBytes() {
+ return gMemory.zone.mallocBytes;
+ }
get gcAllocTrigger() {
return gMemory.zone.gcAllocTrigger;
}
+ get mallocTrigger() {
+ return gMemory.zone.mallocTriggerBytes;
+ }
features = {
haveMemorySizes: 'gcBytes' in gMemory,
@@ -85,22 +93,32 @@ function parse_units(v) {
}
var Graph = class {
- constructor(ctx) {
- this.ctx = ctx;
+ constructor(canvas) {
+ this.ctx = canvas.getContext('2d');
+
+ // Adjust scale for high-DPI displays.
+ this.scale = window.devicePixelRatio || 1;
+ let rect = canvas.getBoundingClientRect();
+ canvas.width = Math.floor(rect.width * this.scale);
+ canvas.height = Math.floor(rect.height * this.scale);
+ canvas.style.width = rect.width;
+ canvas.style.height = rect.height;
+
+ // Record canvas size to draw into.
+ this.width = canvas.width;
+ this.height = canvas.height;
- var { height } = ctx.canvas;
this.layout = {
- xAxisLabel_Y: height - 20,
+ xAxisLabel_Y: this.height - 20 * this.scale,
};
}
xpos(index) {
- return index * 2;
+ return (index / numSamples) * (this.width - 100 * this.scale);
}
clear() {
- const { width, height } = this.ctx.canvas;
- this.ctx.clearRect(0, 0, width, height);
+ this.ctx.clearRect(0, 0, this.width, this.height);
}
drawScale(delay) {
@@ -117,20 +135,22 @@ var Graph = class {
drawAxisLabels(x_label, y_label) {
const ctx = this.ctx;
- const { width, height } = ctx.canvas;
- ctx.fillText(x_label, width / 2, this.layout.xAxisLabel_Y);
+ ctx.font = `${10 * this.scale}px sans-serif`;
+
+ ctx.fillText(x_label, this.width / 2, this.layout.xAxisLabel_Y);
ctx.save();
ctx.rotate(Math.PI / 2);
- var start = height / 2 - ctx.measureText(y_label).width / 2;
- ctx.fillText(y_label, start, -width + 20);
+ var start = this.height / 2 - ctx.measureText(y_label).width / 2;
+ ctx.fillText(y_label, start, -this.width + 20 * this.scale);
ctx.restore();
}
drawFrame() {
const ctx = this.ctx;
- const { width, height } = ctx.canvas;
+ const width = this.width;
+ const height = this.height;
// Draw frame to show size
ctx.strokeStyle = "rgb(0,0,0)";
@@ -148,22 +168,17 @@ var Graph = class {
var LatencyGraph = class extends Graph {
constructor(ctx) {
super(ctx);
- console.log(this.ctx);
}
ypos(delay) {
- const { height } = this.ctx.canvas;
-
- const r = height + 100 - Math.log(delay) * 64;
- if (r < 5) {
- return 5;
- }
- return r;
+ return this.height + this.scale * (100 - Math.log(delay) * 64);
}
drawHBar(delay, label, color = "rgb(0,0,0)", label_offset = 0) {
const ctx = this.ctx;
+ let y = this.ypos(delay);
+
ctx.fillStyle = color;
ctx.strokeStyle = color;
ctx.fillText(
@@ -277,20 +292,19 @@ var LatencyGraph = class extends Graph {
var MemoryGraph = class extends Graph {
constructor(ctx) {
super(ctx);
- this.worstEver = this.bestEver = gHost.gcBytes;
- this.limit = Math.max(this.worstEver, gHost.gcAllocTrigger);
+ this.range = 1;
}
ypos(size) {
- const { height } = this.ctx.canvas;
-
- const range = this.limit - this.bestEver;
- const percent = (size - this.bestEver) / range;
+ const percent = size / this.range;
+ return (1 - percent) * this.height * 0.9 + this.scale * 20;
+ }
- return (1 - percent) * height * 0.9 + 20;
+ drawHBarForBytes(size, name, color) {
+ this.drawHBar(size, `${format_bytes(size)} ${name}`, color)
}
- drawHBar(size, label, color = "rgb(150,150,150)") {
+ drawHBar(size, label, color) {
const ctx = this.ctx;
const y = this.ypos(size);
@@ -313,50 +327,48 @@ var MemoryGraph = class extends Graph {
this.clear();
this.drawFrame();
- var worst = 0,
- worstpos = 0;
+ let gcMaxPos = 0;
+ let mallocMaxPos = 0;
+ let gcMax = 0;
+ let mallocMax = 0;
for (let i = 0; i < numSamples; i++) {
- if (gHistory.gcBytes[i] >= worst) {
- worst = gHistory.gcBytes[i];
- worstpos = i;
+ if (gHistory.gcBytes[i] >= gcMax) {
+ gcMax = gHistory.gcBytes[i];
+ gcMaxPos = i;
}
- if (gHistory.gcBytes[i] < this.bestEver) {
- this.bestEver = gHistory.gcBytes[i];
+ if (gHistory.mallocBytes[i] >= mallocMax) {
+ mallocMax = gHistory.mallocBytes[i];
+ mallocMaxPos = i;
}
}
- if (this.worstEver < worst) {
- this.worstEver = worst;
- this.limit = Math.max(this.worstEver, gHost.gcAllocTrigger);
- }
+ this.range = Math.max(gcMax, mallocMax, gHost.gcAllocTrigger, gHost.mallocTrigger);
- this.drawHBar(
- this.bestEver,
- `${format_bytes(this.bestEver)} min`,
- "#00cf61"
- );
- this.drawHBar(
- this.worstEver,
- `${format_bytes(this.worstEver)} max`,
- "#cc1111"
- );
- this.drawHBar(
- gHost.gcAllocTrigger,
- `${format_bytes(gHost.gcAllocTrigger)} trigger`,
- "#cc11cc"
- );
+ this.drawHBarForBytes(gcMax, "GC max", "#00cf61");
+ this.drawHBarForBytes(mallocMax, "Malloc max", "#cc1111");
+ this.drawHBarForBytes(gHost.gcAllocTrigger, "GC trigger", "#cc11cc");
+ this.drawHBarForBytes(gHost.mallocTrigger, "Malloc trigger", "#cc11cc");
ctx.fillStyle = "rgb(255,0,0)";
- if (worst) {
+
+ if (gcMax !== 0) {
ctx.fillText(
- format_bytes(worst),
- this.xpos(worstpos) - 10,
- this.ypos(worst) - 14
+ format_bytes(gcMax),
+ this.xpos(gcMaxPos) - 10,
+ this.ypos(gcMax) - 14
+ );
+ }
+ if (mallocMax !== 0) {
+ ctx.fillText(
+ format_bytes(mallocMax),
+ this.xpos(mallocMaxPos) - 10,
+ this.ypos(mallocMax) - 14
);
}
+ const where = sampleIndex % numSamples;
+
ctx.beginPath();
- var where = sampleIndex % numSamples;
ctx.arc(
this.xpos(where),
this.ypos(gHistory.gcBytes[where]),
@@ -366,13 +378,40 @@ var MemoryGraph = class extends Graph {
true
);
ctx.fill();
+ ctx.beginPath();
+ ctx.arc(
+ this.xpos(where),
+ this.ypos(gHistory.mallocBytes[where]),
+ 5,
+ 0,
+ Math.PI * 2,
+ true
+ );
+ ctx.fill();
+
+ ctx.beginPath();
+ for (let i = 0; i < numSamples; i++) {
+ let x = this.xpos(i);
+ let y = this.ypos(gHistory.gcBytes[i]);
+ if (i == (sampleIndex + 1) % numSamples) {
+ ctx.moveTo(x, y);
+ } else {
+ ctx.lineTo(x, y);
+ }
+ if (i == where) {
+ ctx.stroke();
+ }
+ }
+ ctx.stroke();
ctx.beginPath();
for (let i = 0; i < numSamples; i++) {
+ let x = this.xpos(i);
+ let y = this.ypos(gHistory.mallocBytes[i]);
if (i == (sampleIndex + 1) % numSamples) {
- ctx.moveTo(this.xpos(i), this.ypos(gHistory.gcBytes[i]));
+ ctx.moveTo(x, y);
} else {
- ctx.lineTo(this.xpos(i), this.ypos(gHistory.gcBytes[i]));
+ ctx.lineTo(x, y);
}
if (i == where) {
ctx.stroke();
@@ -380,6 +419,8 @@ var MemoryGraph = class extends Graph {
}
ctx.stroke();
+ ctx.fillStyle = "rgb(0,0,0)";
+
this.drawAxisLabels("Time", "Heap Memory Usage");
}
};
@@ -466,10 +507,17 @@ function reset_draw_state() {
}
function onunload() {
- gLoadMgr.deactivateLoad();
+ if (gLoadMgr) {
+ gLoadMgr.deactivateLoad();
+ }
}
-function onload() {
+async function onload() {
+ // Collect all test loads into the `tests` Map.
+ let imports = [];
+ foreach_test_file(path => imports.push(import("./" + path)));
+ await Promise.all(imports);
+
// The order of `tests` is currently based on their asynchronous load
// order, rather than the listed order. Rearrange by extracting the test
// names from their filenames, which is kind of gross.
@@ -517,7 +565,7 @@ function onload() {
// Acquire our canvas.
var canvas = document.getElementById("graph");
- latencyGraph = new LatencyGraph(canvas.getContext("2d"));
+ latencyGraph = new LatencyGraph(canvas);
if (!gHost.features.haveMemorySizes) {
document.getElementById("memgraph-disabled").style.display = "block";
@@ -676,7 +724,7 @@ function garbage_per_frame_changed() {
return;
}
if (gLoadMgr.load_running()) {
- gLoadMgr.change_garbagePerFrame = value;
+ gLoadMgr.change_garbagePerFrame(value);
console.log(
`Updated garbage-per-frame to ${
gLoadMgr.activeLoad().garbagePerFrame
@@ -692,7 +740,7 @@ function trackHeapSizes(track) {
if (enabled.trackingSizes) {
canvas.style.display = "block";
- memoryGraph = new MemoryGraph(canvas.getContext("2d"));
+ memoryGraph = new MemoryGraph(canvas);
} else {
canvas.style.display = "none";
memoryGraph = null;
diff --git a/js/src/doc/Debugger/Debugger.Memory.md b/js/src/doc/Debugger/Debugger.Memory.md
index c20354ef3c..5e8c594433 100644
--- a/js/src/doc/Debugger/Debugger.Memory.md
+++ b/js/src/doc/Debugger/Debugger.Memory.md
@@ -305,7 +305,7 @@ which produces a result like this:
In general, a `breakdown` value has one of the following forms:
-* <code>{ by: "count", count:<i>count<i>, bytes:<i>bytes</i> }</code>
+* <code>{ by: "count", count:<i>count</i>, bytes:<i>bytes</i> }</code>
The trivial categorization: none whatsoever. Simply tally up the items
visited. If <i>count</i> is true, count the number of items visited; if
@@ -409,6 +409,16 @@ In general, a `breakdown` value has one of the following forms:
breakdown value produces. All breakdown values are optional, and default
to `{ type: "count" }`.
+* `{ by: "filename", then:breakdown, noFilename:noFilenameBreakdown }`
+
+ For scripts only, group by the filename of the script.
+
+ Further categorize all of the scripts from each distinct filename
+ using breakdown.
+
+ Scripts that lack a filename are counted using noFilenameBreakdown.
+ These appear in the result `Map` under the key string `"noFilename"`.
+
* `{ by: "internalType", then: breakdown }`
Group items by the names given their types internally by SpiderMonkey.
@@ -441,6 +451,9 @@ In general, a `breakdown` value has one of the following forms:
To simplify breakdown values, all `then` and `other` properties are optional.
If omitted, they are treated as if they were `{ type: "count" }`.
+Breakdown groupings cannot be nested within themselves. This would not be
+useful, and forbidding this prevents infinite recursion.
+
If the `options` argument has no `breakdown` property, `takeCensus` defaults
to the following:
diff --git a/js/src/doc/Debugger/Debugger.Object.md b/js/src/doc/Debugger/Debugger.Object.md
index c6f8b90a49..d81bffdaec 100644
--- a/js/src/doc/Debugger/Debugger.Object.md
+++ b/js/src/doc/Debugger/Debugger.Object.md
@@ -469,6 +469,15 @@ by code in <i>d</i>'s compartment.
If <i>value</i> is a native function in the debugger's compartment, return
whether the referent is a native function for the same C++ native.
+### `isSameNativeWithJitInfo(value)`
+If <i>value</i> is a native function in the debugger's compartment, return
+whether the referent is a native function for the same C++ native with the
+same JSJitInfo pointer value.
+
+This can be used to distinguish functions with shared native function
+implementation with different JSJitInfo pointer to define the underlying
+functionality.
+
### `isNativeGetterWithJitInfo()`
Return whether the referent is a native getter function with JSJitInfo.
diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitter.cpp
index df44743768..2759fa5924 100644
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -925,6 +925,7 @@ restart:
// Watch out for getters!
case ParseNodeKind::OptionalDotExpr:
case ParseNodeKind::DotExpr:
+ case ParseNodeKind::ArgumentsLength:
MOZ_ASSERT(pn->is<BinaryNode>());
*answer = true;
return true;
@@ -2601,6 +2602,7 @@ bool BytecodeEmitter::emitDestructuringLHSRef(ParseNode* target,
*emitted = 0;
break;
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &target->as<PropertyAccess>();
bool isSuper = prop->isSuper();
@@ -2760,6 +2762,7 @@ bool BytecodeEmitter::emitSetOrInitializeDestructuring(
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
// The reference is already pushed by emitDestructuringLHSRef.
// [stack] # if Super
@@ -4367,6 +4370,7 @@ bool BytecodeEmitter::emitAssignmentOrInit(ParseNodeKind kind, ParseNode* lhs,
: NameOpEmitter::Kind::SimpleAssignment);
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &lhs->as<PropertyAccess>();
bool isSuper = prop->isSuper();
@@ -4466,6 +4470,7 @@ bool BytecodeEmitter::emitAssignmentOrInit(ParseNodeKind kind, ParseNode* lhs,
if (isCompound) {
MOZ_ASSERT(rhs);
switch (lhs->getKind()) {
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &lhs->as<PropertyAccess>();
if (!poe->emitGet(prop->key().atom())) {
@@ -4512,6 +4517,7 @@ bool BytecodeEmitter::emitAssignmentOrInit(ParseNodeKind kind, ParseNode* lhs,
}
offset += noe->emittedBindOp();
break;
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr:
if (!poe->prepareForRhs()) {
// [stack] # if Simple Assignment with Super
@@ -4579,6 +4585,7 @@ bool BytecodeEmitter::emitAssignmentOrInit(ParseNodeKind kind, ParseNode* lhs,
}
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &lhs->as<PropertyAccess>();
if (!poe->emitAssignment(prop->key().atom())) {
@@ -4666,7 +4673,7 @@ bool BytecodeEmitter::emitShortCircuitAssignment(AssignmentNode* node) {
numPushed = noe->emittedBindOp();
break;
}
-
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &lhs->as<PropertyAccess>();
bool isSuper = prop->isSuper();
@@ -4802,7 +4809,7 @@ bool BytecodeEmitter::emitShortCircuitAssignment(AssignmentNode* node) {
}
break;
}
-
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &lhs->as<PropertyAccess>();
@@ -7326,6 +7333,7 @@ bool BytecodeEmitter::emitDeleteOptionalChain(UnaryNode* deleteNode) {
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr:
case ParseNodeKind::OptionalDotExpr: {
auto* propExpr = &kid->as<PropertyAccessBase>();
@@ -7978,6 +7986,7 @@ bool BytecodeEmitter::emitOptionalCalleeAndThis(ParseNode* callee,
}
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
MOZ_ASSERT(emitterMode != BytecodeEmitter::SelfHosting);
PropertyAccess* prop = &callee->as<PropertyAccess>();
@@ -8076,6 +8085,7 @@ bool BytecodeEmitter::emitCalleeAndThis(ParseNode* callee, CallNode* maybeCall,
}
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
MOZ_ASSERT(emitterMode != BytecodeEmitter::SelfHosting);
PropertyAccess* prop = &callee->as<PropertyAccess>();
@@ -8197,6 +8207,7 @@ ParseNode* BytecodeEmitter::getCoordNode(ParseNode* callNode,
coordNode = argsList;
switch (calleeNode->getKind()) {
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr:
// Use the position of a property access identifier.
//
@@ -8658,6 +8669,7 @@ bool BytecodeEmitter::emitOptionalTree(
}
break;
}
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &pn->as<PropertyAccess>();
bool isSuper = prop->isSuper();
@@ -9015,6 +9027,7 @@ bool BytecodeEmitter::emitSequenceExpr(ListNode* node, ValueUsage valueUsage) {
MOZ_NEVER_INLINE bool BytecodeEmitter::emitIncOrDec(UnaryNode* incDec,
ValueUsage valueUsage) {
switch (incDec->kid()->getKind()) {
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr:
return emitPropIncDec(incDec, valueUsage);
case ParseNodeKind::ElemExpr:
@@ -9859,6 +9872,12 @@ static bool NeedsPrivateBrand(ParseNode* member) {
!member->as<ClassMethod>().isStatic();
}
+#ifdef ENABLE_DECORATORS
+static bool HasDecorators(ParseNode* member) {
+ return member->is<ClassMethod>() && member->as<ClassMethod>().decorators();
+}
+#endif
+
mozilla::Maybe<MemberInitializers> BytecodeEmitter::setupMemberInitializers(
ListNode* classMembers, FieldPlacement placement) {
bool isStatic = placement == FieldPlacement::Static;
@@ -9866,6 +9885,9 @@ mozilla::Maybe<MemberInitializers> BytecodeEmitter::setupMemberInitializers(
size_t numFields = 0;
size_t numPrivateInitializers = 0;
bool hasPrivateBrand = false;
+#ifdef ENABLE_DECORATORS
+ bool hasDecorators = false;
+#endif
for (ParseNode* member : classMembers->contents()) {
if (NeedsFieldInitializer(member, isStatic)) {
numFields++;
@@ -9875,6 +9897,11 @@ mozilla::Maybe<MemberInitializers> BytecodeEmitter::setupMemberInitializers(
} else if (NeedsPrivateBrand(member)) {
hasPrivateBrand = true;
}
+#ifdef ENABLE_DECORATORS
+ if (!hasDecorators && HasDecorators(member)) {
+ hasDecorators = true;
+ }
+#endif
}
// If there are more initializers than can be represented, return invalid.
@@ -9882,8 +9909,11 @@ mozilla::Maybe<MemberInitializers> BytecodeEmitter::setupMemberInitializers(
MemberInitializers::MaxInitializers) {
return Nothing();
}
- return Some(
- MemberInitializers(hasPrivateBrand, numFields + numPrivateInitializers));
+ return Some(MemberInitializers(hasPrivateBrand,
+#ifdef ENABLE_DECORATORS
+ hasDecorators,
+#endif
+ numFields + numPrivateInitializers));
}
// Purpose of .fieldKeys:
@@ -10691,119 +10721,122 @@ bool BytecodeEmitter::emitInitializeInstanceMembers(
}
}
#ifdef ENABLE_DECORATORS
- // Decorators Proposal
- // https://arai-a.github.io/ecma262-compare/?pr=2417&id=sec-initializeinstanceelements
- // 4. For each element e of elements, do
- // 4.a. If elementRecord.[[Kind]] is field or accessor, then
- // 4.a.i. Perform ? InitializeFieldOrAccessor(O, elementRecord).
- //
+ if (memberInitializers.hasDecorators) {
+ // Decorators Proposal
+ // https://arai-a.github.io/ecma262-compare/?pr=2417&id=sec-initializeinstanceelements
+ // 4. For each element e of elements, do
+ // 4.a. If elementRecord.[[Kind]] is field or accessor, then
+ // 4.a.i. Perform ? InitializeFieldOrAccessor(O, elementRecord).
+ //
- // TODO: (See Bug 1817993) At the moment, we're applying the initialization
- // logic in two steps. The pre-decorator initialization code runs, stores
- // the initial value, and then we retrieve it here and apply the
- // initializers added by decorators. We should unify these two steps.
- if (!emitGetName(TaggedParserAtomIndex::WellKnown::dot_initializers_())) {
- // [stack] ARRAY
- return false;
- }
+ // TODO: (See Bug 1817993) At the moment, we're applying the
+ // initialization logic in two steps. The pre-decorator initialization
+ // code runs, stores the initial value, and then we retrieve it here and
+ // apply the initializers added by decorators. We should unify these two
+ // steps.
+ if (!emitGetName(TaggedParserAtomIndex::WellKnown::dot_initializers_())) {
+ // [stack] ARRAY
+ return false;
+ }
- if (!emit1(JSOp::Dup)) {
- // [stack] ARRAY ARRAY
- return false;
- }
+ if (!emit1(JSOp::Dup)) {
+ // [stack] ARRAY ARRAY
+ return false;
+ }
- if (!emitAtomOp(JSOp::GetProp,
- TaggedParserAtomIndex::WellKnown::length())) {
- // [stack] ARRAY LENGTH
- return false;
- }
+ if (!emitAtomOp(JSOp::GetProp,
+ TaggedParserAtomIndex::WellKnown::length())) {
+ // [stack] ARRAY LENGTH
+ return false;
+ }
- if (!emitNumberOp(static_cast<double>(numInitializers))) {
- // [stack] ARRAY LENGTH INDEX
- return false;
- }
+ if (!emitNumberOp(static_cast<double>(numInitializers))) {
+ // [stack] ARRAY LENGTH INDEX
+ return false;
+ }
- InternalWhileEmitter wh(this);
- // At this point, we have no context to determine offsets in the
- // code for this while statement. Ideally, it would correspond to
- // the field we're initializing.
- if (!wh.emitCond()) {
- // [stack] ARRAY LENGTH INDEX
- return false;
- }
+ InternalWhileEmitter wh(this);
+ // At this point, we have no context to determine offsets in the
+ // code for this while statement. Ideally, it would correspond to
+ // the field we're initializing.
+ if (!wh.emitCond()) {
+ // [stack] ARRAY LENGTH INDEX
+ return false;
+ }
- if (!emit1(JSOp::Dup)) {
- // [stack] ARRAY LENGTH INDEX INDEX
- return false;
- }
+ if (!emit1(JSOp::Dup)) {
+ // [stack] ARRAY LENGTH INDEX INDEX
+ return false;
+ }
- if (!emitDupAt(2)) {
- // [stack] ARRAY LENGTH INDEX INDEX LENGTH
- return false;
- }
+ if (!emitDupAt(2)) {
+ // [stack] ARRAY LENGTH INDEX INDEX LENGTH
+ return false;
+ }
- if (!emit1(JSOp::Lt)) {
- // [stack] ARRAY LENGTH INDEX BOOL
- return false;
- }
+ if (!emit1(JSOp::Lt)) {
+ // [stack] ARRAY LENGTH INDEX BOOL
+ return false;
+ }
- if (!wh.emitBody()) {
- // [stack] ARRAY LENGTH INDEX
- return false;
- }
+ if (!wh.emitBody()) {
+ // [stack] ARRAY LENGTH INDEX
+ return false;
+ }
- if (!emitDupAt(2)) {
- // [stack] ARRAY LENGTH INDEX ARRAY
- return false;
- }
+ if (!emitDupAt(2)) {
+ // [stack] ARRAY LENGTH INDEX ARRAY
+ return false;
+ }
- if (!emitDupAt(1)) {
- // [stack] ARRAY LENGTH INDEX ARRAY INDEX
- return false;
- }
+ if (!emitDupAt(1)) {
+ // [stack] ARRAY LENGTH INDEX ARRAY INDEX
+ return false;
+ }
- // Retrieve initializers for this field
- if (!emit1(JSOp::GetElem)) {
- // [stack] ARRAY LENGTH INDEX INITIALIZERS
- return false;
- }
+ // Retrieve initializers for this field
+ if (!emit1(JSOp::GetElem)) {
+ // [stack] ARRAY LENGTH INDEX INITIALIZERS
+ return false;
+ }
- // This is guaranteed to run after super(), so we don't need TDZ checks.
- if (!emitGetName(TaggedParserAtomIndex::WellKnown::dot_this_())) {
- // [stack] ARRAY LENGTH INDEX INITIALIZERS THIS
- return false;
- }
+ // This is guaranteed to run after super(), so we don't need TDZ checks.
+ if (!emitGetName(TaggedParserAtomIndex::WellKnown::dot_this_())) {
+ // [stack] ARRAY LENGTH INDEX INITIALIZERS THIS
+ return false;
+ }
- if (!emit1(JSOp::Swap)) {
- // [stack] ARRAY LENGTH INDEX THIS INITIALIZERS
- return false;
- }
+ if (!emit1(JSOp::Swap)) {
+ // [stack] ARRAY LENGTH INDEX THIS INITIALIZERS
+ return false;
+ }
- DecoratorEmitter de(this);
- if (!de.emitInitializeFieldOrAccessor()) {
- // [stack] ARRAY LENGTH INDEX
- return false;
- }
+ DecoratorEmitter de(this);
+ if (!de.emitInitializeFieldOrAccessor()) {
+ // [stack] ARRAY LENGTH INDEX
+ return false;
+ }
- if (!emit1(JSOp::Inc)) {
- // [stack] ARRAY LENGTH INDEX
- return false;
- }
+ if (!emit1(JSOp::Inc)) {
+ // [stack] ARRAY LENGTH INDEX
+ return false;
+ }
- if (!wh.emitEnd()) {
- // [stack] ARRAY LENGTH INDEX
- return false;
- }
+ if (!wh.emitEnd()) {
+ // [stack] ARRAY LENGTH INDEX
+ return false;
+ }
- if (!emitPopN(3)) {
- // [stack]
- return false;
- }
- // 5. Return unused.
+ if (!emitPopN(3)) {
+ // [stack]
+ return false;
+ }
+ // 5. Return unused.
- if (!de.emitCallExtraInitializers(TaggedParserAtomIndex::WellKnown::
- dot_instanceExtraInitializers_())) {
- return false;
+ if (!de.emitCallExtraInitializers(TaggedParserAtomIndex::WellKnown::
+ dot_instanceExtraInitializers_())) {
+ return false;
+ }
}
#endif
}
@@ -12502,6 +12535,32 @@ bool BytecodeEmitter::emitTree(
break;
}
+ case ParseNodeKind::ArgumentsLength: {
+ if (sc->isFunctionBox() &&
+ sc->asFunctionBox()->isEligibleForArgumentsLength() &&
+ !sc->asFunctionBox()->needsArgsObj()) {
+ if (!emit1(JSOp::ArgumentsLength)) {
+ return false;
+ }
+ } else {
+ PropOpEmitter poe(this, PropOpEmitter::Kind::Get,
+ PropOpEmitter::ObjKind::Other);
+ if (!poe.prepareForObj()) {
+ return false;
+ }
+
+ NameOpEmitter noe(this, TaggedParserAtomIndex::WellKnown::arguments(),
+ NameOpEmitter::Kind::Get);
+ if (!noe.emitGet()) {
+ return false;
+ }
+ if (!poe.emitGet(TaggedParserAtomIndex::WellKnown::length())) {
+ return false;
+ }
+ }
+ break;
+ }
+
case ParseNodeKind::ElemExpr: {
PropertyByValue* elem = &pn->as<PropertyByValue>();
bool isSuper = elem->isSuper();
diff --git a/js/src/frontend/CompileScript.cpp b/js/src/frontend/CompileScript.cpp
index 925b8201a2..b561d7d124 100644
--- a/js/src/frontend/CompileScript.cpp
+++ b/js/src/frontend/CompileScript.cpp
@@ -87,73 +87,46 @@ JS_PUBLIC_API const JSErrorReport* JS::GetFrontendWarningAt(
return &fc->warnings()[index];
}
-JS::CompilationStorage::~CompilationStorage() {
- if (input_ && !isBorrowed_) {
- js_delete(input_);
- input_ = nullptr;
- }
-}
-
-size_t JS::CompilationStorage::sizeOfIncludingThis(
- mozilla::MallocSizeOf mallocSizeOf) const {
- size_t sizeOfCompilationInput =
- input_ ? input_->sizeOfExcludingThis(mallocSizeOf) : 0;
- return mallocSizeOf(this) + sizeOfCompilationInput;
-}
-
-bool JS::CompilationStorage::allocateInput(
- FrontendContext* fc, const JS::ReadOnlyCompileOptions& options) {
- MOZ_ASSERT(!input_);
- input_ = fc->getAllocator()->new_<frontend::CompilationInput>(options);
- return !!input_;
-}
-
-void JS::CompilationStorage::trace(JSTracer* trc) {
- if (input_) {
- input_->trace(trc);
- }
-}
-
template <typename CharT>
static already_AddRefed<JS::Stencil> CompileGlobalScriptToStencilImpl(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<CharT>& srcBuf, JS::CompilationStorage& compilationStorage) {
+ JS::SourceText<CharT>& srcBuf) {
ScopeKind scopeKind =
options.nonSyntacticScope ? ScopeKind::NonSyntactic : ScopeKind::Global;
JS::SourceText<CharT> data(std::move(srcBuf));
- compilationStorage.allocateInput(fc, options);
- if (!compilationStorage.hasInput()) {
- return nullptr;
- }
+ frontend::CompilationInput compilationInput(options);
frontend::NoScopeBindingCache scopeCache;
LifoAlloc tempLifoAlloc(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
- RefPtr<frontend::CompilationStencil> stencil_ =
- frontend::CompileGlobalScriptToStencil(nullptr, fc, tempLifoAlloc,
- compilationStorage.getInput(),
- &scopeCache, data, scopeKind);
+ RefPtr<JS::Stencil> stencil_ = frontend::CompileGlobalScriptToStencil(
+ nullptr, fc, tempLifoAlloc, compilationInput, &scopeCache, data,
+ scopeKind);
+ // CompilationInput initialized with CompileGlobalScriptToStencil only
+ // references information from the JS::Stencil context and the
+ // ref-counted ScriptSource, which are both GC-free.
+ JS_HAZ_VALUE_IS_GC_SAFE(compilationInput);
return stencil_.forget();
}
template <typename CharT>
static already_AddRefed<JS::Stencil> CompileModuleScriptToStencilImpl(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& optionsInput,
- JS::SourceText<CharT>& srcBuf, JS::CompilationStorage& compilationStorage) {
+ JS::SourceText<CharT>& srcBuf) {
JS::CompileOptions options(nullptr, optionsInput);
options.setModule();
- compilationStorage.allocateInput(fc, options);
- if (!compilationStorage.hasInput()) {
- return nullptr;
- }
+ frontend::CompilationInput compilationInput(options);
NoScopeBindingCache scopeCache;
js::LifoAlloc tempLifoAlloc(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
- RefPtr<JS::Stencil> stencil =
- ParseModuleToStencil(nullptr, fc, tempLifoAlloc,
- compilationStorage.getInput(), &scopeCache, srcBuf);
+ RefPtr<JS::Stencil> stencil = ParseModuleToStencil(
+ nullptr, fc, tempLifoAlloc, compilationInput, &scopeCache, srcBuf);
+ // CompilationInput initialized with ParseModuleToStencil only
+ // references information from the JS::Stencil context and the
+ // ref-counted ScriptSource, which are both GC-free.
+ JS_HAZ_VALUE_IS_GC_SAFE(compilationInput);
if (!stencil) {
return nullptr;
}
@@ -164,42 +137,38 @@ static already_AddRefed<JS::Stencil> CompileModuleScriptToStencilImpl(
already_AddRefed<JS::Stencil> JS::CompileGlobalScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<mozilla::Utf8Unit>& srcBuf,
- JS::CompilationStorage& compileStorage) {
+ JS::SourceText<mozilla::Utf8Unit>& srcBuf) {
#ifdef DEBUG
fc->assertNativeStackLimitThread();
#endif
- return CompileGlobalScriptToStencilImpl(fc, options, srcBuf, compileStorage);
+ return CompileGlobalScriptToStencilImpl(fc, options, srcBuf);
}
already_AddRefed<JS::Stencil> JS::CompileGlobalScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& options,
- JS::SourceText<char16_t>& srcBuf, JS::CompilationStorage& compileStorage) {
+ JS::SourceText<char16_t>& srcBuf) {
#ifdef DEBUG
fc->assertNativeStackLimitThread();
#endif
- return CompileGlobalScriptToStencilImpl(fc, options, srcBuf, compileStorage);
+ return CompileGlobalScriptToStencilImpl(fc, options, srcBuf);
}
already_AddRefed<JS::Stencil> JS::CompileModuleScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& optionsInput,
- JS::SourceText<mozilla::Utf8Unit>& srcBuf,
- JS::CompilationStorage& compileStorage) {
+ JS::SourceText<mozilla::Utf8Unit>& srcBuf) {
#ifdef DEBUG
fc->assertNativeStackLimitThread();
#endif
- return CompileModuleScriptToStencilImpl(fc, optionsInput, srcBuf,
- compileStorage);
+ return CompileModuleScriptToStencilImpl(fc, optionsInput, srcBuf);
}
already_AddRefed<JS::Stencil> JS::CompileModuleScriptToStencil(
JS::FrontendContext* fc, const JS::ReadOnlyCompileOptions& optionsInput,
- JS::SourceText<char16_t>& srcBuf, JS::CompilationStorage& compileStorage) {
+ JS::SourceText<char16_t>& srcBuf) {
#ifdef DEBUG
fc->assertNativeStackLimitThread();
#endif
- return CompileModuleScriptToStencilImpl(fc, optionsInput, srcBuf,
- compileStorage);
+ return CompileModuleScriptToStencilImpl(fc, optionsInput, srcBuf);
}
bool JS::PrepareForInstantiate(JS::FrontendContext* fc, JS::Stencil& stencil,
diff --git a/js/src/frontend/FoldConstants.cpp b/js/src/frontend/FoldConstants.cpp
index 8a434418b1..b72c6d4726 100644
--- a/js/src/frontend/FoldConstants.cpp
+++ b/js/src/frontend/FoldConstants.cpp
@@ -407,6 +407,7 @@ restart:
case ParseNodeKind::ObjectExpr:
case ParseNodeKind::PropertyNameExpr:
case ParseNodeKind::DotExpr:
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::ElemExpr:
case ParseNodeKind::Arguments:
case ParseNodeKind::CallExpr:
diff --git a/js/src/frontend/FullParseHandler.h b/js/src/frontend/FullParseHandler.h
index 384d16b7d8..9209ba4d67 100644
--- a/js/src/frontend/FullParseHandler.h
+++ b/js/src/frontend/FullParseHandler.h
@@ -103,7 +103,8 @@ class FullParseHandler {
bool isPropertyOrPrivateMemberAccess(Node node) {
return node->isKind(ParseNodeKind::DotExpr) ||
node->isKind(ParseNodeKind::ElemExpr) ||
- node->isKind(ParseNodeKind::PrivateMemberExpr);
+ node->isKind(ParseNodeKind::PrivateMemberExpr) ||
+ node->isKind(ParseNodeKind::ArgumentsLength);
}
bool isOptionalPropertyOrPrivateMemberAccess(Node node) {
@@ -887,6 +888,11 @@ class FullParseHandler {
key->pn_pos.end);
}
+ ArgumentsLengthResult newArgumentsLength(Node expr, NameNodeType key) {
+ return newResult<ArgumentsLength>(expr, key, expr->pn_pos.begin,
+ key->pn_pos.end);
+ }
+
PropertyByValueResult newPropertyByValue(Node lhs, Node index, uint32_t end) {
return newResult<PropertyByValue>(lhs, index, lhs->pn_pos.begin, end);
}
@@ -1137,6 +1143,12 @@ class FullParseHandler {
TaggedParserAtomIndex::WellKnown::arguments();
}
+ bool isLengthName(Node node) {
+ return node->isKind(ParseNodeKind::PropertyNameExpr) &&
+ node->as<NameNode>().atom() ==
+ TaggedParserAtomIndex::WellKnown::length();
+ }
+
bool isEvalName(Node node) {
return node->isKind(ParseNodeKind::Name) &&
node->as<NameNode>().atom() ==
@@ -1150,6 +1162,10 @@ class FullParseHandler {
TaggedParserAtomIndex::WellKnown::async();
}
+ bool isArgumentsLength(Node node) {
+ return node->isKind(ParseNodeKind::ArgumentsLength);
+ }
+
bool isPrivateName(Node node) {
return node->isKind(ParseNodeKind::PrivateName);
}
diff --git a/js/src/frontend/NameFunctions.cpp b/js/src/frontend/NameFunctions.cpp
index 0ad8e55758..46b5bb074c 100644
--- a/js/src/frontend/NameFunctions.cpp
+++ b/js/src/frontend/NameFunctions.cpp
@@ -92,6 +92,7 @@ class NameResolver : public ParseNodeVisitor<NameResolver> {
*/
bool nameExpression(ParseNode* n, bool* foundName) {
switch (n->getKind()) {
+ case ParseNodeKind::ArgumentsLength:
case ParseNodeKind::DotExpr: {
PropertyAccess* prop = &n->as<PropertyAccess>();
if (!nameExpression(&prop->expression(), foundName)) {
diff --git a/js/src/frontend/ParseContext.cpp b/js/src/frontend/ParseContext.cpp
index ececac705b..622c467822 100644
--- a/js/src/frontend/ParseContext.cpp
+++ b/js/src/frontend/ParseContext.cpp
@@ -593,6 +593,14 @@ bool ParseContext::hasUsedName(const UsedNameTracker& usedNames,
return false;
}
+bool ParseContext::hasClosedOverName(const UsedNameTracker& usedNames,
+ TaggedParserAtomIndex name) {
+ if (auto p = usedNames.lookup(name)) {
+ return p->value().isClosedOver(scriptId());
+ }
+ return false;
+}
+
bool ParseContext::hasUsedFunctionSpecialName(const UsedNameTracker& usedNames,
TaggedParserAtomIndex name) {
MOZ_ASSERT(name == TaggedParserAtomIndex::WellKnown::arguments() ||
@@ -602,6 +610,13 @@ bool ParseContext::hasUsedFunctionSpecialName(const UsedNameTracker& usedNames,
functionBox()->bindingsAccessedDynamically();
}
+bool ParseContext::hasClosedOverFunctionSpecialName(
+ const UsedNameTracker& usedNames, TaggedParserAtomIndex name) {
+ MOZ_ASSERT(name == TaggedParserAtomIndex::WellKnown::arguments());
+ return hasClosedOverName(usedNames, name) ||
+ functionBox()->bindingsAccessedDynamically();
+}
+
bool ParseContext::declareFunctionThis(const UsedNameTracker& usedNames,
bool canSkipLazyClosedOverBindings) {
// The asm.js validator does all its own symbol-table management so, as an
@@ -644,17 +659,41 @@ bool ParseContext::declareFunctionArgumentsObject(
ParseContext::Scope& funScope = functionScope();
ParseContext::Scope& _varScope = varScope();
- bool usesArguments = false;
bool hasExtraBodyVarScope = &funScope != &_varScope;
// Time to implement the odd semantics of 'arguments'.
auto argumentsName = TaggedParserAtomIndex::WellKnown::arguments();
- bool tryDeclareArguments;
+ bool tryDeclareArguments = false;
+ bool needsArgsObject = false;
+
+ // When delazifying simply defer to the function box.
if (canSkipLazyClosedOverBindings) {
tryDeclareArguments = funbox->shouldDeclareArguments();
+ needsArgsObject = funbox->needsArgsObj();
} else {
- tryDeclareArguments = hasUsedFunctionSpecialName(usedNames, argumentsName);
+ // We cannot compute these values when delazifying, hence why we need to
+ // rely on the function box flags instead.
+ bool bindingClosedOver =
+ hasClosedOverFunctionSpecialName(usedNames, argumentsName);
+ bool bindingUsedOnlyHere =
+ hasUsedFunctionSpecialName(usedNames, argumentsName) &&
+ !bindingClosedOver;
+
+ // Declare arguments if there's a closed-over consumer of the binding, or if
+ // there is a non-length use and we will reference the binding during
+ // bytecode emission.
+ tryDeclareArguments =
+ !funbox->isEligibleForArgumentsLength() || bindingClosedOver;
+ // If we have a use and the binding isn't closed over, then we will do
+ // bytecode emission with the arguments intrinsic.
+ if (bindingUsedOnlyHere && funbox->isEligibleForArgumentsLength()) {
+ // If we're using the intrinsic we should not be declaring the binding.
+ MOZ_ASSERT(!tryDeclareArguments);
+ funbox->setUsesArgumentsIntrinsics();
+ } else if (tryDeclareArguments) {
+ needsArgsObject = true;
+ }
}
// ES 9.2.12 steps 19 and 20 say formal parameters, lexical bindings,
@@ -670,9 +709,19 @@ bool ParseContext::declareFunctionArgumentsObject(
DeclaredNamePtr p = _varScope.lookupDeclaredName(argumentsName);
if (p && p->value()->kind() == DeclarationKind::Var) {
if (hasExtraBodyVarScope) {
+ // While there is a binding in the var scope, we should declare
+ // the binding in the function scope.
tryDeclareArguments = true;
} else {
- usesArguments = true;
+ // A binding in the function scope (since varScope and functionScope are
+ // the same) exists, so arguments is used.
+ if (needsArgsObject) {
+ funbox->setNeedsArgsObj();
+ }
+
+ // There is no point in continuing on below: We know we already have
+ // a declaration of arguments in the function scope.
+ return true;
}
}
@@ -685,17 +734,11 @@ bool ParseContext::declareFunctionArgumentsObject(
return false;
}
funbox->setShouldDeclareArguments();
- usesArguments = true;
- } else if (hasExtraBodyVarScope) {
- // Formal parameters shadow the arguments object.
- return true;
+ if (needsArgsObject) {
+ funbox->setNeedsArgsObj();
+ }
}
}
-
- if (usesArguments) {
- funbox->setNeedsArgsObj();
- }
-
return true;
}
diff --git a/js/src/frontend/ParseContext.h b/js/src/frontend/ParseContext.h
index 8124073bf9..796b776d85 100644
--- a/js/src/frontend/ParseContext.h
+++ b/js/src/frontend/ParseContext.h
@@ -661,8 +661,12 @@ class ParseContext : public Nestable<ParseContext> {
bool hasUsedName(const UsedNameTracker& usedNames,
TaggedParserAtomIndex name);
+ bool hasClosedOverName(const UsedNameTracker& usedNames,
+ TaggedParserAtomIndex name);
bool hasUsedFunctionSpecialName(const UsedNameTracker& usedNames,
TaggedParserAtomIndex name);
+ bool hasClosedOverFunctionSpecialName(const UsedNameTracker& usedNames,
+ TaggedParserAtomIndex name);
bool declareFunctionThis(const UsedNameTracker& usedNames,
bool canSkipLazyClosedOverBindings);
@@ -673,6 +677,13 @@ class ParseContext : public Nestable<ParseContext> {
bool declareDotGeneratorName();
bool declareTopLevelDotGeneratorName();
+ // Used to determine if we have non-length uses of the arguments binding.
+ // This works by incrementing this counter each time we encounter the
+ // arguments name, and decrementing each time it is combined into
+ // arguments.length; as a result, if this is non-zero at the end of parsing,
+ // we have identified a non-length use of the arguments binding.
+ size_t numberOfArgumentsNames = 0;
+
private:
[[nodiscard]] bool isVarRedeclaredInInnermostScope(
TaggedParserAtomIndex name, ParserBase* parser, DeclarationKind kind,
diff --git a/js/src/frontend/ParseNode.h b/js/src/frontend/ParseNode.h
index 61c009c6e4..a6747897d6 100644
--- a/js/src/frontend/ParseNode.h
+++ b/js/src/frontend/ParseNode.h
@@ -75,6 +75,7 @@ class FunctionBox;
F(PostDecrementExpr, UnaryNode) \
F(PropertyNameExpr, NameNode) \
F(DotExpr, PropertyAccess) \
+ F(ArgumentsLength, ArgumentsLength) \
F(ElemExpr, PropertyByValue) \
F(PrivateMemberExpr, PrivateMemberAccess) \
F(OptionalDotExpr, OptionalPropertyAccess) \
@@ -616,6 +617,7 @@ inline bool IsTypeofKind(ParseNodeKind kind) {
MACRO(ClassNames) \
MACRO(ForNode) \
MACRO(PropertyAccess) \
+ MACRO(ArgumentsLength) \
MACRO(OptionalPropertyAccess) \
MACRO(PropertyByValue) \
MACRO(OptionalPropertyByValue) \
@@ -2014,7 +2016,8 @@ class PropertyAccessBase : public BinaryNode {
static bool test(const ParseNode& node) {
bool match = node.isKind(ParseNodeKind::DotExpr) ||
- node.isKind(ParseNodeKind::OptionalDotExpr);
+ node.isKind(ParseNodeKind::OptionalDotExpr) ||
+ node.isKind(ParseNodeKind::ArgumentsLength);
MOZ_ASSERT_IF(match, node.is<BinaryNode>());
MOZ_ASSERT_IF(match, node.as<BinaryNode>().right()->isKind(
ParseNodeKind::PropertyNameExpr));
@@ -2042,7 +2045,8 @@ class PropertyAccess : public PropertyAccessBase {
}
static bool test(const ParseNode& node) {
- bool match = node.isKind(ParseNodeKind::DotExpr);
+ bool match = node.isKind(ParseNodeKind::DotExpr) ||
+ node.isKind(ParseNodeKind::ArgumentsLength);
MOZ_ASSERT_IF(match, node.is<PropertyAccessBase>());
return match;
}
@@ -2051,6 +2055,26 @@ class PropertyAccess : public PropertyAccessBase {
// ParseNodeKind::SuperBase cannot result from any expression syntax.
return expression().isKind(ParseNodeKind::SuperBase);
}
+
+ protected:
+ using PropertyAccessBase::PropertyAccessBase;
+};
+
+class ArgumentsLength : public PropertyAccess {
+ public:
+ ArgumentsLength(ParseNode* lhs, NameNode* name, uint32_t begin, uint32_t end)
+ : PropertyAccess(ParseNodeKind::ArgumentsLength, lhs, name, begin, end) {
+ MOZ_ASSERT(lhs);
+ MOZ_ASSERT(name);
+ }
+
+ static bool test(const ParseNode& node) {
+ bool match = node.isKind(ParseNodeKind::ArgumentsLength);
+ MOZ_ASSERT_IF(match, node.is<PropertyAccessBase>());
+ return match;
+ }
+
+ bool isSuper() const { return false; }
};
class OptionalPropertyAccess : public PropertyAccessBase {
diff --git a/js/src/frontend/Parser.cpp b/js/src/frontend/Parser.cpp
index bcd6c30c02..5cb47f2425 100644
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -2472,6 +2472,11 @@ GeneralParser<ParseHandler, Unit>::functionBody(InHandling inHandling,
}
}
+ if (pc_->numberOfArgumentsNames > 0 || kind == FunctionSyntaxKind::Arrow) {
+ MOZ_ASSERT(pc_->isFunctionBox());
+ pc_->sc()->setIneligibleForArgumentsLength();
+ }
+
// Declare the 'arguments', 'this', and 'new.target' bindings if necessary
// before finishing up the scope so these special bindings get marked as
// closed over if necessary. Arrow functions don't have these bindings.
@@ -6570,6 +6575,8 @@ bool GeneralParser<ParseHandler, Unit>::forHeadStart(
return false;
}
}
+ } else if (handler_.isArgumentsLength(*forInitialPart)) {
+ pc_->sc()->setIneligibleForArgumentsLength();
} else if (handler_.isPropertyOrPrivateMemberAccess(*forInitialPart)) {
// Permitted: no additional testing/fixup needed.
} else if (handler_.isFunctionCall(*forInitialPart)) {
@@ -7917,7 +7924,12 @@ bool GeneralParser<ParseHandler, Unit>::finishClassConstructor(
bool hasPrivateBrand = classInitializedMembers.hasPrivateBrand();
if (hasPrivateBrand || numMemberInitializers > 0) {
// Now that we have full set of initializers, update the constructor.
- MemberInitializers initializers(hasPrivateBrand, numMemberInitializers);
+ MemberInitializers initializers(
+ hasPrivateBrand,
+#ifdef ENABLE_DECORATORS
+ classInitializedMembers.hasInstanceDecorators,
+#endif
+ numMemberInitializers);
ctorbox->setMemberInitializers(initializers);
// Field initialization need access to `this`.
@@ -10220,6 +10232,8 @@ typename ParseHandler::NodeResult GeneralParser<ParseHandler, Unit>::assignExpr(
return errorResult();
}
}
+ } else if (handler_.isArgumentsLength(lhs)) {
+ pc_->sc()->setIneligibleForArgumentsLength();
} else if (handler_.isPropertyOrPrivateMemberAccess(lhs)) {
// Permitted: no additional testing/fixup needed.
} else if (handler_.isFunctionCall(lhs)) {
@@ -10280,6 +10294,8 @@ bool GeneralParser<ParseHandler, Unit>::checkIncDecOperand(
return false;
}
}
+ } else if (handler_.isArgumentsLength(operand)) {
+ pc_->sc()->setIneligibleForArgumentsLength();
} else if (handler_.isPropertyOrPrivateMemberAccess(operand)) {
// Permitted: no additional testing/fixup needed.
} else if (handler_.isFunctionCall(operand)) {
@@ -10898,6 +10914,9 @@ template <class ParseHandler>
inline typename ParseHandler::NameNodeResult
PerHandlerParser<ParseHandler>::newName(TaggedParserAtomIndex name,
TokenPos pos) {
+ if (name == TaggedParserAtomIndex::WellKnown::arguments()) {
+ this->pc_->numberOfArgumentsNames++;
+ }
return handler_.newName(name, pos);
}
@@ -10926,6 +10945,13 @@ GeneralParser<ParseHandler, Unit>::memberPropertyAccess(
MOZ_ASSERT(!handler_.isSuperBase(lhs));
return handler_.newOptionalPropertyAccess(lhs, name);
}
+
+ if (handler_.isArgumentsName(lhs) && handler_.isLengthName(name)) {
+ MOZ_ASSERT(pc_->numberOfArgumentsNames > 0);
+ pc_->numberOfArgumentsNames--;
+ return handler_.newArgumentsLength(lhs, name);
+ }
+
return handler_.newPropertyAccess(lhs, name);
}
@@ -11484,6 +11510,10 @@ void GeneralParser<ParseHandler, Unit>::checkDestructuringAssignmentName(
return;
}
+ if (handler_.isArgumentsLength(name)) {
+ pc_->sc()->setIneligibleForArgumentsLength();
+ }
+
if (pc_->sc()->strict()) {
if (handler_.isArgumentsName(name)) {
if (pc_->sc()->strict()) {
@@ -12143,6 +12173,10 @@ GeneralParser<ParseHandler, Unit>::objectLiteral(YieldHandling yieldHandling,
}
}
+ if (handler_.isArgumentsLength(lhs)) {
+ pc_->sc()->setIneligibleForArgumentsLength();
+ }
+
Node rhs;
MOZ_TRY_VAR(rhs,
assignExpr(InAllowed, yieldHandling, TripledotProhibited));
diff --git a/js/src/frontend/SharedContext.cpp b/js/src/frontend/SharedContext.cpp
index 7fa3b724fb..488e3bd384 100644
--- a/js/src/frontend/SharedContext.cpp
+++ b/js/src/frontend/SharedContext.cpp
@@ -45,7 +45,8 @@ SharedContext::SharedContext(FrontendContext* fc, Kind kind,
inClass_(false),
localStrict(false),
hasExplicitUseStrict_(false),
- isScriptExtraFieldCopiedToStencil(false) {
+ isScriptExtraFieldCopiedToStencil(false),
+ eligibleForArgumentsLength(true) {
// Compute the script kind "input" flags.
if (kind == Kind::FunctionBox) {
setFlag(ImmutableFlags::IsFunction);
diff --git a/js/src/frontend/SharedContext.h b/js/src/frontend/SharedContext.h
index 12f9a6ed12..ab0606527e 100644
--- a/js/src/frontend/SharedContext.h
+++ b/js/src/frontend/SharedContext.h
@@ -180,6 +180,10 @@ class SharedContext {
// FunctionBox::copyUpdated* methods.
bool isScriptExtraFieldCopiedToStencil : 1;
+ // Indicates this shared context is eligible to use JSOp::ArgumentsLength
+ // when emitting the ArgumentsLength parse node.
+ bool eligibleForArgumentsLength : 1;
+
// End of fields.
enum class Kind : uint8_t { FunctionBox, Global, Eval, Module };
@@ -273,6 +277,11 @@ class SharedContext {
return retVal;
}
+ bool isEligibleForArgumentsLength() {
+ return eligibleForArgumentsLength && !bindingsAccessedDynamically();
+ }
+ void setIneligibleForArgumentsLength() { eligibleForArgumentsLength = false; }
+
void copyScriptExtraFields(ScriptStencilExtra& scriptExtra);
};
diff --git a/js/src/frontend/Stencil.cpp b/js/src/frontend/Stencil.cpp
index 7c6eba4c5a..30d1588415 100644
--- a/js/src/frontend/Stencil.cpp
+++ b/js/src/frontend/Stencil.cpp
@@ -575,7 +575,7 @@ void ScopeContext::cacheEnclosingScope(const InputScope& enclosingScope) {
}
bool hasEnv = si.hasSyntacticEnvironment();
- auto setCacthAll = [&](NameLocation loc) {
+ auto setCatchAll = [&](NameLocation loc) {
return si.scope().match([&](auto& scope_ref) {
using BindingMapPtr = decltype(scopeCache->createCacheFor(scope_ref));
BindingMapPtr bindingMapPtr = scopeCache->createCacheFor(scope_ref);
@@ -604,7 +604,7 @@ void ScopeContext::cacheEnclosingScope(const InputScope& enclosingScope) {
case ScopeKind::Function:
if (hasEnv) {
if (si.scope().funHasExtensibleScope()) {
- setCacthAll(NameLocation::Dynamic());
+ setCatchAll(NameLocation::Dynamic());
return;
}
@@ -733,21 +733,21 @@ void ScopeContext::cacheEnclosingScope(const InputScope& enclosingScope) {
if (!hasEnv) {
ScopeKind kind = si.scope().enclosing().kind();
if (kind == ScopeKind::Global || kind == ScopeKind::NonSyntactic) {
- setCacthAll(NameLocation::Global(BindingKind::Var));
+ setCatchAll(NameLocation::Global(BindingKind::Var));
return;
}
}
- setCacthAll(NameLocation::Dynamic());
+ setCatchAll(NameLocation::Dynamic());
return;
case ScopeKind::Global:
- setCacthAll(NameLocation::Global(BindingKind::Var));
+ setCatchAll(NameLocation::Global(BindingKind::Var));
return;
case ScopeKind::With:
case ScopeKind::NonSyntactic:
- setCacthAll(NameLocation::Dynamic());
+ setCatchAll(NameLocation::Dynamic());
return;
case ScopeKind::WasmInstance:
diff --git a/js/src/frontend/SyntaxParseHandler.h b/js/src/frontend/SyntaxParseHandler.h
index aa06eaa246..fa63b1e9d3 100644
--- a/js/src/frontend/SyntaxParseHandler.h
+++ b/js/src/frontend/SyntaxParseHandler.h
@@ -57,8 +57,9 @@ enum SyntaxParseHandlerNode {
// casing.
NodeName,
- // Nodes representing the names "arguments" and "eval".
+ // Nodes representing the names "arguments", "length" and "eval".
NodeArgumentsName,
+ NodeLengthName,
NodeEvalName,
// Node representing the "async" name, which may actually be a
@@ -77,6 +78,10 @@ enum SyntaxParseHandlerNode {
NodePrivateMemberAccess,
NodeOptionalPrivateMemberAccess,
+ // Node representing the compound Arguments.length expression;
+ // Used only for property access, not assignment.
+ NodeArgumentsLength,
+
// Destructuring target patterns can't be parenthesized: |([a]) = [3];|
// must be a syntax error. (We can't use NodeGeneric instead of these
// because that would trigger invalid-left-hand-side ReferenceError
@@ -164,7 +169,7 @@ class SyntaxParseHandler {
bool isPropertyOrPrivateMemberAccess(Node node) {
return node == NodeDottedProperty || node == NodeElement ||
- node == NodePrivateMemberAccess;
+ node == NodePrivateMemberAccess || node == NodeArgumentsLength;
}
bool isOptionalPropertyOrPrivateMemberAccess(Node node) {
@@ -572,6 +577,9 @@ class SyntaxParseHandler {
NameNodeResult newPropertyName(TaggedParserAtomIndex name,
const TokenPos& pos) {
lastAtom = name;
+ if (name == TaggedParserAtomIndex::WellKnown::length()) {
+ return NodeLengthName;
+ }
return NodeGeneric;
}
@@ -579,6 +587,10 @@ class SyntaxParseHandler {
return NodeDottedProperty;
}
+ PropertyAccessResult newArgumentsLength(Node expr, NameNodeType key) {
+ return NodeArgumentsLength;
+ }
+
PropertyAccessResult newOptionalPropertyAccess(Node expr, NameNodeType key) {
return NodeOptionalDottedProperty;
}
@@ -777,13 +789,17 @@ class SyntaxParseHandler {
bool isName(Node node) {
return node == NodeName || node == NodeArgumentsName ||
- node == NodeEvalName || node == NodePotentialAsyncKeyword;
+ node == NodeLengthName || node == NodeEvalName ||
+ node == NodePotentialAsyncKeyword;
}
bool isArgumentsName(Node node) { return node == NodeArgumentsName; }
+ bool isLengthName(Node node) { return node == NodeLengthName; }
bool isEvalName(Node node) { return node == NodeEvalName; }
bool isAsyncKeyword(Node node) { return node == NodePotentialAsyncKeyword; }
+ bool isArgumentsLength(Node node) { return node == NodeArgumentsLength; }
+
bool isPrivateName(Node node) { return node == NodePrivateName; }
bool isPrivateMemberAccess(Node node) {
return node == NodePrivateMemberAccess;
@@ -795,7 +811,8 @@ class SyntaxParseHandler {
// |this|. It's not really eligible for the funapply/funcall
// optimizations as they're currently implemented (assuming a single
// value is used for both retrieval and |this|).
- if (node != NodeDottedProperty && node != NodeOptionalDottedProperty) {
+ if (node != NodeDottedProperty && node != NodeOptionalDottedProperty &&
+ node != NodeArgumentsLength) {
return TaggedParserAtomIndex::null();
}
return lastAtom;
diff --git a/js/src/frontend/UsedNameTracker.h b/js/src/frontend/UsedNameTracker.h
index 2a52208128..f118d6101b 100644
--- a/js/src/frontend/UsedNameTracker.h
+++ b/js/src/frontend/UsedNameTracker.h
@@ -160,6 +160,10 @@ class UsedNameTracker {
return !uses_.empty() && uses_.back().scriptId >= scriptId;
}
+ bool isClosedOver(uint32_t scriptId) const {
+ return !uses_.empty() && uses_.back().scriptId > scriptId;
+ }
+
// To allow disambiguating public and private symbols
bool isPublic() { return visibility_ == NameVisibility::Public; }
diff --git a/js/src/frontend/align_stack_comment.py b/js/src/frontend/align_stack_comment.py
index 28d5d8cf7f..6e279a90c6 100755
--- a/js/src/frontend/align_stack_comment.py
+++ b/js/src/frontend/align_stack_comment.py
@@ -22,7 +22,7 @@ ALIGNMENT_COLUMN = 20
# The maximum column for comment
MAX_CHARS_PER_LINE = 80
-stack_comment_pat = re.compile("^( *//) *(\[stack\].*)$")
+stack_comment_pat = re.compile(r"^( *//) *(\[stack\].*)$")
def align_stack_comment(path):
diff --git a/js/src/fuzz-tests/testWasm.cpp b/js/src/fuzz-tests/testWasm.cpp
index 719c38174a..d7ba0511b4 100644
--- a/js/src/fuzz-tests/testWasm.cpp
+++ b/js/src/fuzz-tests/testWasm.cpp
@@ -9,6 +9,7 @@
#include "fuzz-tests/tests.h"
#include "js/CallAndConstruct.h"
+#include "js/Prefs.h"
#include "js/PropertyAndElement.h" // JS_Enumerate, JS_GetProperty, JS_GetPropertyById, JS_HasProperty, JS_SetProperty
#include "vm/GlobalObject.h"
#include "vm/Interpreter.h"
@@ -40,13 +41,11 @@ static int testWasmInit(int* argc, char*** argv) {
MOZ_CRASH("Wasm is not supported");
}
- JS::ContextOptionsRef(gCx)
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, FLAG_FUZZ_ON, SHELL, PREF) \
- .setWasm##NAME(FLAG_FUZZ_ON)
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, FLAG_PRED, \
+ FLAG_FORCE_ON, FLAG_FUZZ_ON, PREF) \
+ JS::Prefs::setAtStartup_wasm_##PREF(FLAG_FUZZ_ON);
+ JS_FOR_WASM_FEATURES(WASM_FEATURE)
#undef WASM_FEATURE
- ;
if (!GlobalObject::getOrCreateConstructor(gCx, JSProto_WebAssembly)) {
MOZ_CRASH("Failed to initialize wasm engine");
diff --git a/js/src/gc/AllocKind.h b/js/src/gc/AllocKind.h
index cb3d063f89..f73352e557 100644
--- a/js/src/gc/AllocKind.h
+++ b/js/src/gc/AllocKind.h
@@ -197,13 +197,14 @@ constexpr auto SomeAllocKinds(AllocKind first = AllocKind::FIRST,
// with each index corresponding to a particular alloc kind.
template <typename ValueType>
using AllAllocKindArray =
- mozilla::EnumeratedArray<AllocKind, AllocKind::LIMIT, ValueType>;
+ mozilla::EnumeratedArray<AllocKind, ValueType, size_t(AllocKind::LIMIT)>;
// ObjectAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
// with each index corresponding to a particular object alloc kind.
template <typename ValueType>
using ObjectAllocKindArray =
- mozilla::EnumeratedArray<AllocKind, AllocKind::OBJECT_LIMIT, ValueType>;
+ mozilla::EnumeratedArray<AllocKind, ValueType,
+ size_t(AllocKind::OBJECT_LIMIT)>;
/*
* Map from C++ type to alloc kind for non-object types. JSObject does not have
diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp
index 7ec63a571d..c01dfe3660 100644
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -930,6 +930,8 @@ void GCRuntime::finish() {
}
#endif
+ releaseMarkingThreads();
+
#ifdef JS_GC_ZEAL
// Free memory associated with GC verification.
finishVerifier();
@@ -1064,9 +1066,8 @@ bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
compactingEnabled = value != 0;
break;
case JSGC_PARALLEL_MARKING_ENABLED:
- // Not supported on workers.
- parallelMarkingEnabled = rt->isMainRuntime() && value != 0;
- return initOrDisableParallelMarking();
+ setParallelMarkingEnabled(value != 0);
+ break;
case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
for (auto& marker : markers) {
marker->incrementalWeakMapMarkingEnabled = value != 0;
@@ -1151,8 +1152,7 @@ void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
compactingEnabled = TuningDefaults::CompactingEnabled;
break;
case JSGC_PARALLEL_MARKING_ENABLED:
- parallelMarkingEnabled = TuningDefaults::ParallelMarkingEnabled;
- initOrDisableParallelMarking();
+ setParallelMarkingEnabled(TuningDefaults::ParallelMarkingEnabled);
break;
case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
for (auto& marker : markers) {
@@ -1350,16 +1350,56 @@ void GCRuntime::assertNoMarkingWork() const {
}
#endif
+bool GCRuntime::setParallelMarkingEnabled(bool enabled) {
+ if (enabled == parallelMarkingEnabled) {
+ return true;
+ }
+
+ parallelMarkingEnabled = enabled;
+ return initOrDisableParallelMarking();
+}
+
bool GCRuntime::initOrDisableParallelMarking() {
- // Attempt to initialize parallel marking state or disable it on failure.
+ // Attempt to initialize parallel marking state or disable it on failure. This
+ // is called when parallel marking is enabled or disabled.
MOZ_ASSERT(markers.length() != 0);
- if (!updateMarkersVector()) {
- parallelMarkingEnabled = false;
+ if (updateMarkersVector()) {
+ return true;
+ }
+
+ // Failed to initialize parallel marking so disable it instead.
+ MOZ_ASSERT(parallelMarkingEnabled);
+ parallelMarkingEnabled = false;
+ MOZ_ALWAYS_TRUE(updateMarkersVector());
+ return false;
+}
+
+void GCRuntime::releaseMarkingThreads() {
+ MOZ_ALWAYS_TRUE(reserveMarkingThreads(0));
+}
+
+bool GCRuntime::reserveMarkingThreads(size_t newCount) {
+ if (reservedMarkingThreads == newCount) {
+ return true;
+ }
+
+ // Update the helper thread system's global count by subtracting this
+ // runtime's current contribution |reservedMarkingThreads| and adding the new
+ // contribution |newCount|.
+
+ AutoLockHelperThreadState lock;
+ auto& globalCount = HelperThreadState().gcParallelMarkingThreads;
+ MOZ_ASSERT(globalCount >= reservedMarkingThreads);
+ size_t newGlobalCount = globalCount - reservedMarkingThreads + newCount;
+ if (newGlobalCount > HelperThreadState().threadCount) {
+ // Not enough total threads.
return false;
}
+ globalCount = newGlobalCount;
+ reservedMarkingThreads = newCount;
return true;
}
@@ -1378,6 +1418,16 @@ bool GCRuntime::updateMarkersVector() {
// concurrently, otherwise one thread can deadlock waiting on another.
size_t targetCount = std::min(markingWorkerCount(), getMaxParallelThreads());
+ if (rt->isMainRuntime()) {
+ // For the main runtime, reserve helper threads as long as parallel marking
+ // is enabled. Worker runtimes may not mark in parallel if there are
+ // insufficient threads available at the time.
+ size_t threadsToReserve = targetCount > 1 ? targetCount : 0;
+ if (!reserveMarkingThreads(threadsToReserve)) {
+ return false;
+ }
+ }
+
if (markers.length() > targetCount) {
return markers.resize(targetCount);
}
@@ -2870,7 +2920,7 @@ void GCRuntime::beginMarkPhase(AutoGCSession& session) {
stats().measureInitialHeapSize();
useParallelMarking = SingleThreadedMarking;
- if (canMarkInParallel() && initParallelMarkers()) {
+ if (canMarkInParallel() && initParallelMarking()) {
useParallelMarking = AllowParallelMarking;
}
@@ -2989,9 +3039,19 @@ inline bool GCRuntime::canMarkInParallel() const {
tunables.parallelMarkingThresholdBytes();
}
-bool GCRuntime::initParallelMarkers() {
+bool GCRuntime::initParallelMarking() {
+ // This is called at the start of collection.
+
MOZ_ASSERT(canMarkInParallel());
+ // Reserve/release helper threads for worker runtimes. These are released at
+ // the end of sweeping. If there are not enough helper threads because
+ // other runtimes are marking in parallel then parallel marking will not be
+ // used.
+ if (!rt->isMainRuntime() && !reserveMarkingThreads(markers.length())) {
+ return false;
+ }
+
// Allocate stack for parallel markers. The first marker always has stack
// allocated. Other markers have their stack freed in
// GCRuntime::finishCollection.
diff --git a/js/src/gc/GC.h b/js/src/gc/GC.h
index 3b7dec3201..4e4634d804 100644
--- a/js/src/gc/GC.h
+++ b/js/src/gc/GC.h
@@ -69,12 +69,12 @@ class TenuredChunk;
_("parallelMarkingEnabled", JSGC_PARALLEL_MARKING_ENABLED, true) \
_("parallelMarkingThresholdMB", JSGC_PARALLEL_MARKING_THRESHOLD_MB, true) \
_("minLastDitchGCPeriod", JSGC_MIN_LAST_DITCH_GC_PERIOD, true) \
- _("nurseryFreeThresholdForIdleCollection", \
- JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION, true) \
- _("nurseryFreeThresholdForIdleCollectionPercent", \
- JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT, true) \
- _("nurseryTimeoutForIdleCollectionMS", \
- JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS, true) \
+ _("nurseryEagerCollectionThresholdKB", \
+ JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB, true) \
+ _("nurseryEagerCollectionThresholdPercent", \
+ JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT, true) \
+ _("nurseryEagerCollectionTimeoutMS", \
+ JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS, true) \
_("zoneAllocDelayKB", JSGC_ZONE_ALLOC_DELAY_KB, true) \
_("mallocThresholdBase", JSGC_MALLOC_THRESHOLD_BASE, true) \
_("urgentThreshold", JSGC_URGENT_THRESHOLD_MB, true) \
diff --git a/js/src/gc/GCAPI.cpp b/js/src/gc/GCAPI.cpp
index ab6c3c297a..293bfce80d 100644
--- a/js/src/gc/GCAPI.cpp
+++ b/js/src/gc/GCAPI.cpp
@@ -817,11 +817,17 @@ JS_PUBLIC_API void js::gc::SetPerformanceHint(JSContext* cx,
AutoSelectGCHeap::AutoSelectGCHeap(JSContext* cx,
size_t allowedNurseryCollections)
: cx_(cx), allowedNurseryCollections_(allowedNurseryCollections) {
- JS::AddGCNurseryCollectionCallback(cx, &NurseryCollectionCallback, this);
+ if (!JS::AddGCNurseryCollectionCallback(cx, &NurseryCollectionCallback,
+ this)) {
+ cx_ = nullptr;
+ }
}
AutoSelectGCHeap::~AutoSelectGCHeap() {
- JS::RemoveGCNurseryCollectionCallback(cx_, &NurseryCollectionCallback, this);
+ if (cx_) {
+ JS::RemoveGCNurseryCollectionCallback(cx_, &NurseryCollectionCallback,
+ this);
+ }
}
/* static */
diff --git a/js/src/gc/GCEnum.h b/js/src/gc/GCEnum.h
index 6b1a00f4db..d60cfaea76 100644
--- a/js/src/gc/GCEnum.h
+++ b/js/src/gc/GCEnum.h
@@ -120,6 +120,7 @@ enum class GCAbortReason {
_(PropMapTable) \
_(ModuleBindingMap) \
_(ModuleCyclicFields) \
+ _(ModuleSyntheticFields) \
_(ModuleExports) \
_(BaselineScript) \
_(IonScript) \
diff --git a/js/src/gc/GCMarker.h b/js/src/gc/GCMarker.h
index 2d47349794..9d34d0a0dc 100644
--- a/js/src/gc/GCMarker.h
+++ b/js/src/gc/GCMarker.h
@@ -156,11 +156,10 @@ class MarkStack {
MarkStack();
~MarkStack();
- explicit MarkStack(const MarkStack& other);
- MarkStack& operator=(const MarkStack& other);
+ MarkStack(const MarkStack& other) = delete;
+ MarkStack& operator=(const MarkStack& other) = delete;
- MarkStack(MarkStack&& other) noexcept;
- MarkStack& operator=(MarkStack&& other) noexcept;
+ void swap(MarkStack& other);
// The unit for MarkStack::capacity() is mark stack words.
size_t capacity() { return stack().length(); }
diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h
index a7198f5bbc..c9f660b4d7 100644
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -640,6 +640,7 @@ class GCRuntime {
const AutoLockHelperThreadState& lock);
// Parallel marking.
+ bool setParallelMarkingEnabled(bool enabled);
bool initOrDisableParallelMarking();
[[nodiscard]] bool updateMarkersVector();
size_t markingWorkerCount() const;
@@ -799,9 +800,12 @@ class GCRuntime {
ParallelMarking allowParallelMarking = SingleThreadedMarking,
ShouldReportMarkTime reportTime = ReportMarkTime);
bool canMarkInParallel() const;
- bool initParallelMarkers();
+ bool initParallelMarking();
void finishParallelMarkers();
+ bool reserveMarkingThreads(size_t count);
+ void releaseMarkingThreads();
+
bool hasMarkingWork(MarkColor color) const;
void drainMarkStack();
@@ -1120,6 +1124,13 @@ class GCRuntime {
/* Incremented on every GC slice. */
MainThreadData<uint64_t> sliceNumber;
+ /*
+ * This runtime's current contribution to the global number of helper threads
+ * 'reserved' for parallel marking. Does not affect other uses of helper
+ * threads.
+ */
+ MainThreadData<size_t> reservedMarkingThreads;
+
/* Whether the currently running GC can finish in multiple slices. */
MainThreadOrGCTaskData<bool> isIncremental;
diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp
index 78fcc3dedc..6b8742c980 100644
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -1006,7 +1006,6 @@ void js::gc::PerformIncrementalPreWriteBarrier(TenuredCell* cell) {
// runtime for cells in atoms zone.
Zone* zone = cell->zoneFromAnyThread();
- MOZ_ASSERT(zone->needsIncrementalBarrier());
MOZ_ASSERT(cell);
if (cell->isMarkedBlack()) {
@@ -1023,6 +1022,7 @@ void js::gc::PerformIncrementalPreWriteBarrier(TenuredCell* cell) {
return;
}
+ MOZ_ASSERT(zone->needsIncrementalBarrier());
MOZ_ASSERT(CurrentThreadIsMainThread());
MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
@@ -1809,29 +1809,15 @@ MarkStack::MarkStack() { MOZ_ASSERT(isEmpty()); }
MarkStack::~MarkStack() { MOZ_ASSERT(isEmpty()); }
-MarkStack::MarkStack(const MarkStack& other) {
- MOZ_CRASH("Compiler requires this but doesn't call it");
-}
-
-MarkStack& MarkStack::operator=(const MarkStack& other) {
- new (this) MarkStack(other);
- return *this;
-}
-
-MarkStack::MarkStack(MarkStack&& other) noexcept
- : stack_(std::move(other.stack_.ref())),
- topIndex_(other.topIndex_.ref())
+void MarkStack::swap(MarkStack& other) {
+ std::swap(stack_, other.stack_);
+ std::swap(topIndex_, other.topIndex_);
#ifdef JS_GC_ZEAL
- ,
- maxCapacity_(other.maxCapacity_)
+ std::swap(maxCapacity_, other.maxCapacity_);
+#endif
+#ifdef DEBUG
+ std::swap(elementsRangesAreValid, other.elementsRangesAreValid);
#endif
-{
- other.topIndex_ = 0;
-}
-
-MarkStack& MarkStack::operator=(MarkStack&& other) noexcept {
- new (this) MarkStack(std::move(other));
- return *this;
}
bool MarkStack::init() { return resetStackCapacity(); }
@@ -2186,7 +2172,7 @@ void GCMarker::setMarkColor(gc::MarkColor newColor) {
// Switch stacks. We only need to do this if there are any stack entries (as
// empty stacks are interchangeable) or to swtich back to the original stack.
if (!isDrained() || haveSwappedStacks) {
- std::swap(stack, otherStack);
+ stack.swap(otherStack);
haveSwappedStacks = !haveSwappedStacks;
}
}
diff --git a/js/src/gc/MaybeRooted.h b/js/src/gc/MaybeRooted.h
index fbeb0c553c..6b38172472 100644
--- a/js/src/gc/MaybeRooted.h
+++ b/js/src/gc/MaybeRooted.h
@@ -35,7 +35,7 @@ class MOZ_RAII FakeRooted : public RootedOperations<T, FakeRooted<T>> {
explicit FakeRooted(JSContext* cx)
: ptr(JS::SafelyInitialized<T>::create()) {}
- FakeRooted(JSContext* cx, T initial) : ptr(initial) {}
+ FakeRooted(JSContext* cx, const T& initial) : ptr(initial) {}
FakeRooted(const FakeRooted&) = delete;
@@ -44,6 +44,8 @@ class MOZ_RAII FakeRooted : public RootedOperations<T, FakeRooted<T>> {
DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr);
DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr);
+ operator JS::Handle<T>() { return JS::Handle<T>::fromMarkedLocation(&ptr); }
+
private:
T ptr;
diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp
index a78db5cc9a..660daa8d4c 100644
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -1058,7 +1058,7 @@ TimeStamp js::Nursery::lastCollectionEndTime() const {
return previousGC.endTime;
}
-bool js::Nursery::shouldCollect() const {
+bool js::Nursery::wantEagerCollection() const {
if (!isEnabled()) {
return false;
}
@@ -1071,8 +1071,7 @@ bool js::Nursery::shouldCollect() const {
return true;
}
- // Eagerly collect the nursery in idle time if it's nearly full.
- if (isNearlyFull()) {
+ if (freeSpaceIsBelowEagerThreshold()) {
return true;
}
@@ -1081,32 +1080,27 @@ bool js::Nursery::shouldCollect() const {
return isUnderused();
}
-inline bool js::Nursery::isNearlyFull() const {
- bool belowBytesThreshold =
- freeSpace() < tunables().nurseryFreeThresholdForIdleCollection();
- bool belowFractionThreshold =
- double(freeSpace()) / double(capacity()) <
- tunables().nurseryFreeThresholdForIdleCollectionFraction();
-
- // We want to use belowBytesThreshold when the nursery is sufficiently large,
- // and belowFractionThreshold when it's small.
- //
- // When the nursery is small then belowBytesThreshold is a lower threshold
- // (triggered earlier) than belowFractionThreshold. So if the fraction
- // threshold is true, the bytes one will be true also. The opposite is true
- // when the nursery is large.
- //
- // Therefore, by the time we cross the threshold we care about, we've already
- // crossed the other one, and we can boolean AND to use either condition
- // without encoding any "is the nursery big/small" test/threshold. The point
- // at which they cross is when the nursery is: BytesThreshold /
- // FractionThreshold large.
- //
- // With defaults that's:
+inline bool js::Nursery::freeSpaceIsBelowEagerThreshold() const {
+ // The threshold is specified in terms of free space so that it doesn't depend
+ // on the size of the nursery.
//
- // 1MB = 256KB / 0.25
+ // There two thresholds, an absolute free bytes threshold and a free space
+ // fraction threshold. Two thresholds are used so that we don't collect too
+ // eagerly for small nurseries (or even all the time if nursery size is less
+ // than the free bytes threshold) or too eagerly for large nurseries (where a
+ // fractional threshold may leave a significant amount of nursery unused).
//
- return belowBytesThreshold && belowFractionThreshold;
+ // Since the aim is making this less eager we require both thresholds to be
+ // met.
+
+ size_t freeBytes = freeSpace();
+ double freeFraction = double(freeBytes) / double(capacity());
+
+ size_t bytesThreshold = tunables().nurseryEagerCollectionThresholdBytes();
+ double fractionThreshold =
+ tunables().nurseryEagerCollectionThresholdPercent();
+
+ return freeBytes < bytesThreshold && freeFraction < fractionThreshold;
}
inline bool js::Nursery::isUnderused() const {
@@ -1124,7 +1118,7 @@ inline bool js::Nursery::isUnderused() const {
// simplest.
TimeDuration timeSinceLastCollection =
TimeStamp::NowLoRes() - previousGC.endTime;
- return timeSinceLastCollection > tunables().nurseryTimeoutForIdleCollection();
+ return timeSinceLastCollection > tunables().nurseryEagerCollectionTimeout();
}
void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
@@ -1874,7 +1868,7 @@ size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) {
// If the nursery is completely unused then minimise it.
if (hasRecentGrowthData && previousGC.nurseryUsedBytes == 0 &&
now - lastCollectionEndTime() >
- tunables().nurseryTimeoutForIdleCollection() &&
+ tunables().nurseryEagerCollectionTimeout() &&
!js::SupportDifferentialTesting()) {
clearRecentGrowthData();
return 0;
diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h
index 2bab1623b0..0d7b607ff8 100644
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -328,9 +328,7 @@ class Nursery {
}
JS::GCReason minorGCTriggerReason() const { return minorGCTriggerReason_; }
- bool shouldCollect() const;
- bool isNearlyFull() const;
- bool isUnderused() const;
+ bool wantEagerCollection() const;
bool enableProfiling() const { return enableProfiling_; }
@@ -383,12 +381,11 @@ class Nursery {
KeyCount
};
- using ProfileTimes =
- mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount,
- mozilla::TimeStamp>;
+ using ProfileTimes = mozilla::EnumeratedArray<ProfileKey, mozilla::TimeStamp,
+ size_t(ProfileKey::KeyCount)>;
using ProfileDurations =
- mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount,
- mozilla::TimeDuration>;
+ mozilla::EnumeratedArray<ProfileKey, mozilla::TimeDuration,
+ size_t(ProfileKey::KeyCount)>;
// Calculate the promotion rate of the most recent minor GC.
// The valid_for_tenuring parameter is used to return whether this
@@ -445,6 +442,9 @@ class Nursery {
[[nodiscard]] bool moveToNextChunk();
+ bool freeSpaceIsBelowEagerThreshold() const;
+ bool isUnderused() const;
+
struct CollectionResult {
size_t tenuredBytes;
size_t tenuredCells;
diff --git a/js/src/gc/Scheduling.h b/js/src/gc/Scheduling.h
index 09a9f834eb..cbaeb1f353 100644
--- a/js/src/gc/Scheduling.h
+++ b/js/src/gc/Scheduling.h
@@ -447,23 +447,26 @@
NoCheck, 16 * 1024 * 1024) \
\
/* \
- * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION \
- * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_FRACTION \
- * JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS \
+ * JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB \
+ * JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT \
+ * JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS \
* \
- * Attempt to run a minor GC in the idle time if the free space falls below \
- * this threshold or if it hasn't been collected for too long. The absolute \
- * threshold is used when the nursery is large and the percentage when it is \
- * small. See Nursery::shouldCollect(). \
+ * JS::MaybeRunNurseryCollection will run a minor GC if the free space falls \
+ * below a threshold or if it hasn't been collected for too long. \
+ * \
+ * To avoid making this too eager, two thresholds must be met. The free \
+ * space must fall below a size threshold and the fraction of free space \
+ * remaining must also fall below a threshold. \
+ * \
+ * See Nursery::wantEagerCollection() for more details. \
*/ \
- _(JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION, size_t, \
- nurseryFreeThresholdForIdleCollection, ConvertSize, NoCheck, \
- ChunkSize / 4) \
- _(JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT, double, \
- nurseryFreeThresholdForIdleCollectionFraction, ConvertTimes100, \
+ _(JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_KB, size_t, \
+ nurseryEagerCollectionThresholdBytes, ConvertKB, NoCheck, ChunkSize / 4) \
+ _(JSGC_NURSERY_EAGER_COLLECTION_THRESHOLD_PERCENT, double, \
+ nurseryEagerCollectionThresholdPercent, ConvertTimes100, \
CheckNonZeroUnitRange, 0.25) \
- _(JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS, mozilla::TimeDuration, \
- nurseryTimeoutForIdleCollection, ConvertMillis, NoCheck, \
+ _(JSGC_NURSERY_EAGER_COLLECTION_TIMEOUT_MS, mozilla::TimeDuration, \
+ nurseryEagerCollectionTimeout, ConvertMillis, NoCheck, \
mozilla::TimeDuration::FromSeconds(5)) \
\
/* \
diff --git a/js/src/gc/StableCellHasher-inl.h b/js/src/gc/StableCellHasher-inl.h
index b4054342c0..af0caaad89 100644
--- a/js/src/gc/StableCellHasher-inl.h
+++ b/js/src/gc/StableCellHasher-inl.h
@@ -137,7 +137,7 @@ inline void TransferUniqueId(Cell* tgt, Cell* src) {
MOZ_ASSERT(src->zone() == tgt->zone());
Zone* zone = tgt->zone();
- MOZ_ASSERT(!zone->uniqueIds().has(tgt));
+ MOZ_ASSERT_IF(zone->uniqueIds().has(src), !zone->uniqueIds().has(tgt));
zone->uniqueIds().rekeyIfMoved(src, tgt);
}
diff --git a/js/src/gc/Statistics.cpp b/js/src/gc/Statistics.cpp
index e50e7500cf..c12d44db97 100644
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -154,11 +154,11 @@ struct PhaseInfo {
};
// A table of PhaseInfo indexed by Phase.
-using PhaseTable = EnumeratedArray<Phase, Phase::LIMIT, PhaseInfo>;
+using PhaseTable = EnumeratedArray<Phase, PhaseInfo, size_t(Phase::LIMIT)>;
// A table of PhaseKindInfo indexed by PhaseKind.
using PhaseKindTable =
- EnumeratedArray<PhaseKind, PhaseKind::LIMIT, PhaseKindInfo>;
+ EnumeratedArray<PhaseKind, PhaseKindInfo, size_t(PhaseKind::LIMIT)>;
#include "gc/StatsPhasesGenerated.inc"
@@ -595,7 +595,7 @@ UniqueChars Statistics::formatDetailedTotals() const {
void Statistics::formatJsonSlice(size_t sliceNum, JSONPrinter& json) const {
/*
* We number each of the slice properties to keep the code in
- * GCTelemetry.jsm in sync. See MAX_SLICE_KEYS.
+ * GCTelemetry.sys.mjs in sync. See MAX_SLICE_KEYS.
*/
json.beginObject();
formatJsonSliceDescription(sliceNum, slices_[sliceNum], json); // # 1-11
diff --git a/js/src/gc/Statistics.h b/js/src/gc/Statistics.h
index f03bc2ea38..bc6d7bf5dd 100644
--- a/js/src/gc/Statistics.h
+++ b/js/src/gc/Statistics.h
@@ -136,19 +136,19 @@ struct Statistics {
template <typename T, size_t Length>
using Array = mozilla::Array<T, Length>;
- template <typename IndexType, IndexType SizeAsEnumValue, typename ValueType>
+ template <typename IndexType, typename ValueType, IndexType SizeAsEnumValue>
using EnumeratedArray =
- mozilla::EnumeratedArray<IndexType, SizeAsEnumValue, ValueType>;
+ mozilla::EnumeratedArray<IndexType, ValueType, size_t(SizeAsEnumValue)>;
using TimeDuration = mozilla::TimeDuration;
using TimeStamp = mozilla::TimeStamp;
// Create types for tables of times, by phase and phase kind.
- using PhaseTimes = EnumeratedArray<Phase, Phase::LIMIT, TimeDuration>;
+ using PhaseTimes = EnumeratedArray<Phase, TimeDuration, Phase::LIMIT>;
using PhaseKindTimes =
- EnumeratedArray<PhaseKind, PhaseKind::LIMIT, TimeDuration>;
+ EnumeratedArray<PhaseKind, TimeDuration, PhaseKind::LIMIT>;
- using PhaseTimeStamps = EnumeratedArray<Phase, Phase::LIMIT, TimeStamp>;
+ using PhaseTimeStamps = EnumeratedArray<Phase, TimeStamp, Phase::LIMIT>;
[[nodiscard]] static bool initialize();
@@ -370,12 +370,12 @@ struct Statistics {
TimeDuration totalGCTime_;
/* Number of events of this type for this GC. */
- EnumeratedArray<Count, COUNT_LIMIT,
- mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>>
+ EnumeratedArray<Count, mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>,
+ COUNT_LIMIT>
counts;
/* Other GC statistics. */
- EnumeratedArray<Stat, STAT_LIMIT, uint32_t> stats;
+ EnumeratedArray<Stat, uint32_t, STAT_LIMIT> stats;
/*
* These events cannot be kept in the above array, we need to take their
@@ -440,7 +440,7 @@ struct Statistics {
};
using ProfileDurations =
- EnumeratedArray<ProfileKey, ProfileKey::KeyCount, TimeDuration>;
+ EnumeratedArray<ProfileKey, TimeDuration, ProfileKey::KeyCount>;
bool enableProfiling_ = false;
bool profileWorkers_ = false;
diff --git a/js/src/gc/Sweeping.cpp b/js/src/gc/Sweeping.cpp
index 3686695978..123b2c9650 100644
--- a/js/src/gc/Sweeping.cpp
+++ b/js/src/gc/Sweeping.cpp
@@ -2394,6 +2394,14 @@ void GCRuntime::endSweepPhase(bool destroyingRuntime) {
MOZ_ASSERT_IF(destroyingRuntime, !useBackgroundThreads);
+ // Release parallel marking threads for worker runtimes now we've finished
+ // marking. The main thread keeps the reservation as long as parallel marking
+ // is enabled.
+ if (!rt->isMainRuntime()) {
+ MOZ_ASSERT_IF(useParallelMarking, reservedMarkingThreads != 0);
+ releaseMarkingThreads();
+ }
+
{
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY);
diff --git a/js/src/gc/Tenuring.cpp b/js/src/gc/Tenuring.cpp
index 84526e2109..a9506cfa14 100644
--- a/js/src/gc/Tenuring.cpp
+++ b/js/src/gc/Tenuring.cpp
@@ -74,6 +74,14 @@ void TenuringTracer::onObjectEdge(JSObject** objp, const char* name) {
return;
}
+ onNonForwardedNurseryObjectEdge(objp);
+}
+
+void TenuringTracer::onNonForwardedNurseryObjectEdge(JSObject** objp) {
+ JSObject* obj = *objp;
+ MOZ_ASSERT(IsInsideNursery(obj));
+ MOZ_ASSERT(!obj->isForwarded());
+
UpdateAllocSiteOnTenure(obj);
// Take a fast path for tenuring a plain object which is by far the most
@@ -98,6 +106,14 @@ void TenuringTracer::onStringEdge(JSString** strp, const char* name) {
return;
}
+ onNonForwardedNurseryStringEdge(strp);
+}
+
+void TenuringTracer::onNonForwardedNurseryStringEdge(JSString** strp) {
+ JSString* str = *strp;
+ MOZ_ASSERT(IsInsideNursery(str));
+ MOZ_ASSERT(!str->isForwarded());
+
UpdateAllocSiteOnTenure(str);
*strp = moveToTenured(str);
@@ -115,6 +131,14 @@ void TenuringTracer::onBigIntEdge(JS::BigInt** bip, const char* name) {
return;
}
+ onNonForwardedNurseryBigIntEdge(bip);
+}
+
+void TenuringTracer::onNonForwardedNurseryBigIntEdge(JS::BigInt** bip) {
+ JS::BigInt* bi = *bip;
+ MOZ_ASSERT(IsInsideNursery(bi));
+ MOZ_ASSERT(!bi->isForwarded());
+
UpdateAllocSiteOnTenure(bi);
*bip = moveToTenured(bi);
@@ -137,37 +161,52 @@ void TenuringTracer::traverse(JS::Value* thingp) {
Value value = *thingp;
CheckTracedThing(this, value);
+ if (!value.isGCThing()) {
+ return;
+ }
+
+ Cell* cell = value.toGCThing();
+ if (!IsInsideNursery(cell)) {
+ return;
+ }
+
+ if (cell->isForwarded()) {
+ const gc::RelocationOverlay* overlay =
+ gc::RelocationOverlay::fromCell(cell);
+ thingp->changeGCThingPayload(overlay->forwardingAddress());
+ return;
+ }
+
// We only care about a few kinds of GC thing here and this generates much
// tighter code than using MapGCThingTyped.
- Value post;
if (value.isObject()) {
JSObject* obj = &value.toObject();
- onObjectEdge(&obj, "value");
- post = JS::ObjectValue(*obj);
+ onNonForwardedNurseryObjectEdge(&obj);
+ MOZ_ASSERT(obj != &value.toObject());
+ *thingp = JS::ObjectValue(*obj);
+ return;
}
#ifdef ENABLE_RECORD_TUPLE
- else if (value.isExtendedPrimitive()) {
+ if (value.isExtendedPrimitive()) {
JSObject* obj = &value.toExtendedPrimitive();
- onObjectEdge(&obj, "value");
- post = JS::ExtendedPrimitiveValue(*obj);
+ onNonForwardedNurseryObjectEdge(&obj);
+ MOZ_ASSERT(obj != &value.toExtendedPrimitive());
+ *thingp = JS::ExtendedPrimitiveValue(*obj);
+ return;
}
#endif
- else if (value.isString()) {
+ if (value.isString()) {
JSString* str = value.toString();
- onStringEdge(&str, "value");
- post = JS::StringValue(str);
- } else if (value.isBigInt()) {
- JS::BigInt* bi = value.toBigInt();
- onBigIntEdge(&bi, "value");
- post = JS::BigIntValue(bi);
- } else {
- MOZ_ASSERT_IF(value.isGCThing(), !IsInsideNursery(value.toGCThing()));
+ onNonForwardedNurseryStringEdge(&str);
+ MOZ_ASSERT(str != value.toString());
+ *thingp = JS::StringValue(str);
return;
}
-
- if (post != value) {
- *thingp = post;
- }
+ MOZ_ASSERT(value.isBigInt());
+ JS::BigInt* bi = value.toBigInt();
+ onNonForwardedNurseryBigIntEdge(&bi);
+ MOZ_ASSERT(bi != value.toBigInt());
+ *thingp = JS::BigIntValue(bi);
}
void TenuringTracer::traverse(wasm::AnyRef* thingp) {
diff --git a/js/src/gc/Tenuring.h b/js/src/gc/Tenuring.h
index 560d98d178..3eca5f4bc3 100644
--- a/js/src/gc/Tenuring.h
+++ b/js/src/gc/Tenuring.h
@@ -92,6 +92,10 @@ class TenuringTracer final : public JSTracer {
void traceBigInt(JS::BigInt* bi);
private:
+ MOZ_ALWAYS_INLINE void onNonForwardedNurseryObjectEdge(JSObject** objp);
+ MOZ_ALWAYS_INLINE void onNonForwardedNurseryStringEdge(JSString** strp);
+ MOZ_ALWAYS_INLINE void onNonForwardedNurseryBigIntEdge(JS::BigInt** bip);
+
// The dependent string chars needs to be relocated if the base which it's
// using chars from has been deduplicated.
template <typename CharT>
diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
index e2c67aee7b..d0586d5d56 100644
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -632,11 +632,13 @@ void Zone::purgeAtomCache() {
}
void Zone::addSizeOfIncludingThis(
- mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* regexpZone,
- size_t* jitZone, size_t* cacheIRStubs, size_t* uniqueIdMap,
- size_t* initialPropMapTable, size_t* shapeTables, size_t* atomsMarkBitmaps,
- size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
- size_t* compartmentsPrivateData, size_t* scriptCountsMapArg) {
+ mozilla::MallocSizeOf mallocSizeOf, size_t* zoneObject, JS::CodeSizes* code,
+ size_t* regexpZone, size_t* jitZone, size_t* cacheIRStubs,
+ size_t* uniqueIdMap, size_t* initialPropMapTable, size_t* shapeTables,
+ size_t* atomsMarkBitmaps, size_t* compartmentObjects,
+ size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
+ size_t* scriptCountsMapArg) {
+ *zoneObject += mallocSizeOf(this);
*regexpZone += regExps().sizeOfIncludingThis(mallocSizeOf);
if (jitZone_) {
jitZone_->addSizeOfIncludingThis(mallocSizeOf, code, jitZone, cacheIRStubs);
diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h
index 457e586cea..fd91de8626 100644
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -375,15 +375,13 @@ class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
bool registerObjectWithWeakPointers(JSObject* obj);
void sweepObjectsWithWeakPointers(JSTracer* trc);
- void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
- JS::CodeSizes* code, size_t* regexpZone,
- size_t* jitZone, size_t* cacheIRStubs,
- size_t* uniqueIdMap, size_t* initialPropMapTable,
- size_t* shapeTables, size_t* atomsMarkBitmaps,
- size_t* compartmentObjects,
- size_t* crossCompartmentWrappersTables,
- size_t* compartmentsPrivateData,
- size_t* scriptCountsMapArg);
+ void addSizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf, size_t* zoneObject,
+ JS::CodeSizes* code, size_t* regexpZone, size_t* jitZone,
+ size_t* cacheIRStubs, size_t* uniqueIdMap, size_t* initialPropMapTable,
+ size_t* shapeTables, size_t* atomsMarkBitmaps, size_t* compartmentObjects,
+ size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
+ size_t* scriptCountsMapArg);
// Iterate over all cells in the zone. See the definition of ZoneCellIter
// in gc/GC-inl.h for the possible arguments and documentation.
diff --git a/js/src/gdb/mozilla/prettyprinters.py b/js/src/gdb/mozilla/prettyprinters.py
index b533d1f1f5..12847cae7c 100644
--- a/js/src/gdb/mozilla/prettyprinters.py
+++ b/js/src/gdb/mozilla/prettyprinters.py
@@ -271,7 +271,7 @@ def implemented_types(t):
yield t2
-template_regexp = re.compile("([\w_:]+)<")
+template_regexp = re.compile(r"([\w_:]+)<")
def is_struct_or_union(t):
diff --git a/js/src/gdb/run-tests.py b/js/src/gdb/run-tests.py
index cb9501e904..063a550b17 100644
--- a/js/src/gdb/run-tests.py
+++ b/js/src/gdb/run-tests.py
@@ -40,9 +40,9 @@ def _relpath(path, start=None):
os.path.relpath = _relpath
# Characters that need to be escaped when used in shell words.
-shell_need_escapes = re.compile("[^\w\d%+,-./:=@'\"]", re.DOTALL)
+shell_need_escapes = re.compile("[^\\w\\d%+,-./:=@'\"]", re.DOTALL)
# Characters that need to be escaped within double-quoted strings.
-shell_dquote_escapes = re.compile('[^\w\d%+,-./:=@"]', re.DOTALL)
+shell_dquote_escapes = re.compile('[^\\w\\d%+,-./:=@"]', re.DOTALL)
def make_shell_cmd(l):
diff --git a/js/src/gdb/tests/test-ExecutableAllocator.py b/js/src/gdb/tests/test-ExecutableAllocator.py
index bec2dda623..6a3ae4aae1 100644
--- a/js/src/gdb/tests/test-ExecutableAllocator.py
+++ b/js/src/gdb/tests/test-ExecutableAllocator.py
@@ -13,10 +13,10 @@ run_fragment("ExecutableAllocator.onepool")
reExecPool = "ExecutablePool [a-f0-9]{8,}-[a-f0-9]{8,}"
assert_regexp_pretty("pool", reExecPool)
-assert_regexp_pretty("execAlloc", "ExecutableAllocator\(\[" + reExecPool + "\]\)")
+assert_regexp_pretty("execAlloc", r"ExecutableAllocator\(\[" + reExecPool + r"\]\)")
run_fragment("ExecutableAllocator.twopools")
assert_regexp_pretty(
- "execAlloc", "ExecutableAllocator\(\[" + reExecPool + ", " + reExecPool + "\]\)"
+ "execAlloc", r"ExecutableAllocator\(\[" + reExecPool + ", " + reExecPool + r"\]\)"
)
diff --git a/js/src/intgemm/IntegerGemmIntrinsic.cpp b/js/src/intgemm/IntegerGemmIntrinsic.cpp
index aebb0f5f02..0e79ccad36 100644
--- a/js/src/intgemm/IntegerGemmIntrinsic.cpp
+++ b/js/src/intgemm/IntegerGemmIntrinsic.cpp
@@ -22,10 +22,9 @@
#if defined(USE_AVX512BW)
# if defined(USE_AVX512VNNI)
-# define SUPPORTED_ARCHS \
- xsimd::arch_list<xsimd::avx512vnni<xsimd::avx512bw>, \
- xsimd::avx512bw, xsimd::avx2, \
- xsimd::ssse3, xsimd::sse2>
+# define SUPPORTED_ARCHS \
+ xsimd::arch_list<xsimd::avx512vnni<xsimd::avx512bw>, xsimd::avx512bw, \
+ xsimd::avx2, xsimd::ssse3, xsimd::sse2>
# elif defined(USE_AVXVNNI)
# define SUPPORTED_ARCHS \
xsimd::arch_list<xsimd::avx512bw, xsimd::avxvnni, xsimd::avx2, \
@@ -45,7 +44,12 @@
#elif defined(USE_SSE2)
# define SUPPORTED_ARCHS xsimd::arch_list<xsimd::sse2>
#elif defined(USE_NEON) and defined(XSIMD_WITH_NEON64)
-# define SUPPORTED_ARCHS xsimd::arch_list<xsimd::neon64>
+# if defined(USE_NEON_I8MM)
+# define SUPPORTED_ARCHS \
+ xsimd::arch_list<xsimd::i8mm<xsimd::neon64>, xsimd::neon64>
+# else
+# define SUPPORTED_ARCHS xsimd::arch_list<xsimd::neon64>
+# endif
#else
# error no supported architecture
#endif
diff --git a/js/src/intgemm/moz.build b/js/src/intgemm/moz.build
index 9249938cbb..af4450b4e6 100644
--- a/js/src/intgemm/moz.build
+++ b/js/src/intgemm/moz.build
@@ -64,6 +64,12 @@ if CONFIG["TARGET_CPU"] == "aarch64":
"NEON_FLAGS"
]
+ DEFINES["USE_NEON_I8MM"] = True
+ SOURCES += ["/third_party/gemmology/kernels/GemmologyEngineNeon64I8mm.cpp"]
+ SOURCES[
+ "/third_party/gemmology/kernels/GemmologyEngineNeon64I8mm.cpp"
+ ].flags += CONFIG["NEON_I8MM_FLAGS"]
+
SOURCES += [
"IntegerGemmIntrinsic.cpp",
]
diff --git a/js/src/irregexp/RegExpAPI.cpp b/js/src/irregexp/RegExpAPI.cpp
index f1ba1fbc4b..39a6f8ccc9 100644
--- a/js/src/irregexp/RegExpAPI.cpp
+++ b/js/src/irregexp/RegExpAPI.cpp
@@ -632,7 +632,7 @@ enum class AssembleResult {
// RegExpShared.
ByteArray bytecode =
v8::internal::ByteArray::cast(*result.code).takeOwnership(cx->isolate);
- uint32_t length = bytecode->length;
+ uint32_t length = bytecode->length();
re->setByteCode(bytecode.release(), isLatin1);
js::AddCellMemory(re, length, MemoryUse::RegExpSharedBytecode);
}
@@ -773,7 +773,7 @@ bool CompilePattern(JSContext* cx, MutableHandleRegExpShared re,
bool isLatin1 = input->hasLatin1Chars();
SampleCharacters(input, compiler);
- data.node = compiler.PreprocessRegExp(&data, flags, isLatin1);
+ data.node = compiler.PreprocessRegExp(&data, isLatin1);
data.error = AnalyzeRegExp(cx->isolate, isLatin1, flags, data.node);
if (data.error != RegExpError::kNone) {
MOZ_ASSERT(data.error == RegExpError::kAnalysisStackOverflow);
diff --git a/js/src/irregexp/RegExpNativeMacroAssembler.cpp b/js/src/irregexp/RegExpNativeMacroAssembler.cpp
index 2a8b1749c2..99cfc31bfc 100644
--- a/js/src/irregexp/RegExpNativeMacroAssembler.cpp
+++ b/js/src/irregexp/RegExpNativeMacroAssembler.cpp
@@ -247,8 +247,8 @@ void SMRegExpMacroAssembler::CheckCharacterNotInRange(base::uc16 from,
bool SMRegExpMacroAssembler::IsCharacterInRangeArray(uint32_t c,
ByteArrayData* ranges) {
js::AutoUnsafeCallWithABI unsafe;
- MOZ_ASSERT(ranges->length % sizeof(uint16_t) == 0);
- uint32_t length = ranges->length / sizeof(uint16_t);
+ MOZ_ASSERT(ranges->length() % sizeof(uint16_t) == 0);
+ uint32_t length = ranges->length() / sizeof(uint16_t);
MOZ_ASSERT(length > 0);
// Fast paths.
diff --git a/js/src/irregexp/RegExpShim.cpp b/js/src/irregexp/RegExpShim.cpp
index 2b2c3cd4a0..da388e0057 100644
--- a/js/src/irregexp/RegExpShim.cpp
+++ b/js/src/irregexp/RegExpShim.cpp
@@ -227,13 +227,13 @@ Handle<ByteArray> Isolate::NewByteArray(int length, AllocationType alloc) {
js::AutoEnterOOMUnsafeRegion oomUnsafe;
- size_t alloc_size = sizeof(uint32_t) + length;
+ size_t alloc_size = sizeof(ByteArrayData) + length;
ByteArrayData* data =
static_cast<ByteArrayData*>(allocatePseudoHandle(alloc_size));
if (!data) {
oomUnsafe.crash("Irregexp NewByteArray");
}
- data->length = length;
+ new (data) ByteArrayData(length);
return Handle<ByteArray>(JS::PrivateValue(data), this);
}
@@ -261,7 +261,7 @@ Handle<FixedIntegerArray<T>> Isolate::NewFixedIntegerArray(uint32_t length) {
if (!data) {
oomUnsafe.crash("Irregexp NewFixedIntegerArray");
}
- data->length = rawLength;
+ new (data) ByteArrayData(rawLength);
return Handle<FixedIntegerArray<T>>(JS::PrivateValue(data), this);
}
diff --git a/js/src/irregexp/RegExpShim.h b/js/src/irregexp/RegExpShim.h
index 3f85413421..4d32c84920 100644
--- a/js/src/irregexp/RegExpShim.h
+++ b/js/src/irregexp/RegExpShim.h
@@ -586,15 +586,6 @@ class Object {
// IsCharacterInRangeArray in regexp-macro-assembler.cc.
Object(uintptr_t raw) : asBits_(raw) { MOZ_CRASH("unused"); }
- // Used in regexp-interpreter.cc to check the return value of
- // isolate->stack_guard()->HandleInterrupts(). We want to handle
- // interrupts in the caller, so we always return false from
- // HandleInterrupts and true here.
- inline bool IsException(Isolate*) const {
- MOZ_ASSERT(!value().toBoolean());
- return true;
- }
-
JS::Value value() const { return JS::Value::fromRawBits(asBits_); }
inline static Object cast(Object object) { return object; }
@@ -604,6 +595,14 @@ class Object {
uint64_t asBits_;
} JS_HAZ_GC_POINTER;
+// Used in regexp-interpreter.cc to check the return value of
+// isolate->stack_guard()->HandleInterrupts(). We want to handle
+// interrupts in the caller, so we return a magic value from
+// HandleInterrupts and check for it here.
+inline bool IsException(Object obj, Isolate*) {
+ return obj.value().isMagic(JS_INTERRUPT_REGEXP);
+}
+
class Smi : public Object {
public:
static Smi FromInt(int32_t value) {
@@ -626,6 +625,27 @@ class HeapObject : public Object {
}
};
+// V8's values use low-bit tagging. If the LSB is 0, it's a small
+// integer. If the LSB is 1, it's a pointer to some GC thing. In V8,
+// this wrapper class is used to represent a pointer that has the low
+// bit set, or a small integer that has been shifted left by one
+// bit. We don't use the same tagging system, so all we need is a
+// transparent wrapper that automatically converts to/from the wrapped
+// type.
+template <typename T>
+class Tagged {
+ public:
+ Tagged() {}
+ MOZ_IMPLICIT Tagged(const T& value) : value_(value) {}
+ MOZ_IMPLICIT Tagged(T&& value) : value_(std::move(value)) {}
+
+ T* operator->() { return &value_; }
+ constexpr operator T() const { return value_; }
+
+ private:
+ T value_;
+};
+
// A fixed-size array with Objects (aka Values) as element types.
// Implemented using the dense elements of an ArrayObject.
// Used for named captures.
@@ -668,13 +688,13 @@ T* ByteArrayData::typedData() {
template <typename T>
T ByteArrayData::getTyped(uint32_t index) {
- MOZ_ASSERT(index < length / sizeof(T));
+ MOZ_ASSERT(index < length() / sizeof(T));
return typedData<T>()[index];
}
template <typename T>
void ByteArrayData::setTyped(uint32_t index, T value) {
- MOZ_ASSERT(index < length / sizeof(T));
+ MOZ_ASSERT(index < length() / sizeof(T));
typedData<T>()[index] = value;
}
@@ -684,6 +704,7 @@ class ByteArray : public HeapObject {
ByteArrayData* inner() const {
return static_cast<ByteArrayData*>(value().toPrivate());
}
+ friend bool IsByteArray(Object obj);
public:
PseudoHandle<ByteArrayData> takeOwnership(Isolate* isolate);
@@ -692,8 +713,8 @@ class ByteArray : public HeapObject {
uint8_t get(uint32_t index) { return inner()->get(index); }
void set(uint32_t index, uint8_t val) { inner()->set(index, val); }
- uint32_t length() const { return inner()->length; }
- uint8_t* GetDataStartAddress() { return inner()->data(); }
+ uint32_t length() const { return inner()->length(); }
+ uint8_t* begin() { return inner()->data(); }
static ByteArray cast(Object object) {
ByteArray b;
@@ -701,11 +722,17 @@ class ByteArray : public HeapObject {
return b;
}
- bool IsByteArray() const { return true; }
-
friend class SMRegExpMacroAssembler;
};
+// This is only used in assertions. In debug builds, we put a magic value
+// in the header of each ByteArrayData, and assert here that it matches.
+inline bool IsByteArray(Object obj) {
+ MOZ_ASSERT(ByteArray::cast(obj).inner()->magic() ==
+ ByteArrayData::ExpectedMagic);
+ return true;
+}
+
// This is a convenience class used in V8 for treating a ByteArray as an array
// of fixed-size integers. This version supports integral types up to 32 bits.
template <typename T>
@@ -1030,6 +1057,7 @@ class JSRegExp : public HeapObject {
};
using RegExpFlags = JS::RegExpFlags;
+using RegExpFlag = JS::RegExpFlags::Flag;
inline bool IsUnicode(RegExpFlags flags) { return flags.unicode(); }
inline bool IsGlobal(RegExpFlags flags) { return flags.global(); }
@@ -1042,6 +1070,22 @@ inline bool IsEitherUnicode(RegExpFlags flags) {
return flags.unicode() || flags.unicodeSets();
}
+inline base::Optional<RegExpFlag> TryRegExpFlagFromChar(char c) {
+ RegExpFlag flag;
+
+ // The parser only calls this after verifying that it's a supported flag.
+ MOZ_ALWAYS_TRUE(JS::MaybeParseRegExpFlag(c, &flag));
+
+ return base::Optional(flag);
+}
+
+inline bool operator==(const RegExpFlags& lhs, const int& rhs) {
+ return lhs.value() == rhs;
+}
+inline bool operator!=(const RegExpFlags& lhs, const int& rhs) {
+ return !(lhs == rhs);
+}
+
class Histogram {
public:
inline void AddSample(int sample) {}
@@ -1126,9 +1170,11 @@ class Isolate {
// This is called from inside no-GC code. V8 runs the interrupt
// inside the no-GC code and then "manually relocates unhandlified
- // references" afterwards. We just return false and let the caller
- // handle interrupts.
- Object HandleInterrupts() { return Object(JS::BooleanValue(false)); }
+ // references" afterwards. We just return a magic value and let the
+ // caller handle interrupts.
+ Object HandleInterrupts() {
+ return Object(JS::MagicValue(JS_INTERRUPT_REGEXP));
+ }
JSContext* cx() const { return cx_; }
diff --git a/js/src/irregexp/RegExpTypes.h b/js/src/irregexp/RegExpTypes.h
index e2a619689c..620fac4ed5 100644
--- a/js/src/irregexp/RegExpTypes.h
+++ b/js/src/irregexp/RegExpTypes.h
@@ -21,15 +21,17 @@ namespace internal {
class ByteArrayData {
public:
- uint32_t length;
+ ByteArrayData(uint32_t length) : length_(length) {}
+
+ uint32_t length() { return length_; };
uint8_t* data();
uint8_t get(uint32_t index) {
- MOZ_ASSERT(index < length);
+ MOZ_ASSERT(index < length());
return data()[index];
}
void set(uint32_t index, uint8_t val) {
- MOZ_ASSERT(index < length);
+ MOZ_ASSERT(index < length());
data()[index] = val;
}
@@ -39,9 +41,19 @@ class ByteArrayData {
template <typename T>
void setTyped(uint32_t index, T value);
+#ifdef DEBUG
+ const static uint32_t ExpectedMagic = 0x12344321;
+ uint32_t magic() const { return magic_; }
+
+ private:
+ uint32_t magic_ = ExpectedMagic;
+#endif
+
private:
template <typename T>
T* typedData();
+
+ uint32_t length_;
};
class Isolate;
diff --git a/js/src/irregexp/imported/gen-regexp-special-case.cc b/js/src/irregexp/imported/gen-regexp-special-case.cc
index 8f6557ed30..0875568250 100644
--- a/js/src/irregexp/imported/gen-regexp-special-case.cc
+++ b/js/src/irregexp/imported/gen-regexp-special-case.cc
@@ -8,7 +8,6 @@
#include <sstream>
#include "irregexp/imported/special-case.h"
-#include "unicode/usetiter.h"
namespace v8 {
namespace internal {
@@ -126,52 +125,6 @@ void PrintSpecial(std::ofstream& out) {
PrintSet(out, "SpecialAddSet", special_add);
}
-void PrintUnicodeSpecial(std::ofstream& out) {
- icu::UnicodeSet non_simple_folding;
- icu::UnicodeSet current;
- UErrorCode status = U_ZERO_ERROR;
- // Look at all characters except white spaces.
- icu::UnicodeSet interestingCP(u"[^[:White_Space:]]", status);
- CHECK_EQ(status, U_ZERO_ERROR);
- icu::UnicodeSetIterator iter(interestingCP);
- while (iter.next()) {
- UChar32 c = iter.getCodepoint();
- current.set(c, c);
- current.closeOver(USET_CASE_INSENSITIVE).removeAllStrings();
- CHECK(!current.isBogus());
- // Remove characters from the closeover that have a simple case folding.
- icu::UnicodeSet toRemove;
- icu::UnicodeSetIterator closeOverIter(current);
- while (closeOverIter.next()) {
- UChar32 closeOverChar = closeOverIter.getCodepoint();
- UChar32 closeOverSCF = u_foldCase(closeOverChar, U_FOLD_CASE_DEFAULT);
- if (closeOverChar != closeOverSCF) {
- toRemove.add(closeOverChar);
- }
- }
- CHECK(!toRemove.isBogus());
- current.removeAll(toRemove);
-
- // The current character and its simple case folding are also always OK.
- UChar32 scf = u_foldCase(c, U_FOLD_CASE_DEFAULT);
- current.remove(c);
- current.remove(scf);
-
- // If there are any characters remaining, they were added due to full case
- // foldings and shouldn't match the current charcter according to the spec.
- if (!current.isEmpty()) {
- // Ensure that the character doesn't have a simple case folding.
- // Otherwise the current approach of simply removing the character from
- // the set before calling closeOver won't work.
- CHECK_EQ(c, scf);
- non_simple_folding.add(c);
- }
- }
- CHECK(!non_simple_folding.isBogus());
-
- PrintSet(out, "UnicodeNonSimpleCloseOverSet", non_simple_folding);
-}
-
void WriteHeader(const char* header_filename) {
std::ofstream out(header_filename);
out << std::hex << std::setfill('0') << std::setw(4);
@@ -192,7 +145,6 @@ void WriteHeader(const char* header_filename) {
<< "namespace internal {\n\n";
PrintSpecial(out);
- PrintUnicodeSpecial(out);
out << "\n"
<< "} // namespace internal\n"
diff --git a/js/src/irregexp/imported/regexp-ast.cc b/js/src/irregexp/imported/regexp-ast.cc
index 63eeb5c05d..34946bd80c 100644
--- a/js/src/irregexp/imported/regexp-ast.cc
+++ b/js/src/irregexp/imported/regexp-ast.cc
@@ -307,7 +307,7 @@ void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
}
void* RegExpUnparser::VisitGroup(RegExpGroup* that, void* data) {
- os_ << "(?: ";
+ os_ << "(?" << that->flags() << ": ";
that->body()->Accept(this, data);
os_ << ")";
return nullptr;
@@ -325,7 +325,11 @@ void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
void* data) {
- os_ << "(<- " << that->index() << ")";
+ os_ << "(<- " << that->captures()->first()->index();
+ for (int i = 1; i < that->captures()->length(); ++i) {
+ os_ << "," << that->captures()->at(i)->index();
+ }
+ os_ << ")";
return nullptr;
}
@@ -406,10 +410,17 @@ RegExpClassSetExpression::RegExpClassSetExpression(
may_contain_strings_(may_contain_strings),
operands_(operands) {
DCHECK_NOT_NULL(operands);
- DCHECK_IMPLIES(is_negated_, !may_contain_strings_);
- max_match_ = 0;
- for (auto op : *operands) {
- max_match_ = std::max(max_match_, op->max_match());
+ if (is_negated) {
+ DCHECK(!may_contain_strings_);
+ // We don't know anything about max matches for negated classes.
+ // As there are no strings involved, assume that we can match a unicode
+ // character (2 code points).
+ max_match_ = 2;
+ } else {
+ max_match_ = 0;
+ for (auto op : *operands) {
+ max_match_ = std::max(max_match_, op->max_match());
+ }
}
}
diff --git a/js/src/irregexp/imported/regexp-ast.h b/js/src/irregexp/imported/regexp-ast.h
index af90b1dda3..b2b88515d3 100644
--- a/js/src/irregexp/imported/regexp-ast.h
+++ b/js/src/irregexp/imported/regexp-ast.h
@@ -130,12 +130,6 @@ class CharacterRange {
static void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges,
Zone* zone);
-#ifdef V8_INTL_SUPPORT
- // Creates the closeOver of the given UnicodeSet, removing all
- // characters/strings that can't be derived via simple case folding.
- static void UnicodeSimpleCloseOver(icu::UnicodeSet& set);
-#endif // V8_INTL_SUPPORT
-
bool Contains(base::uc32 i) const { return from_ <= i && i <= to_; }
base::uc32 from() const { return from_; }
base::uc32 to() const { return to_; }
@@ -311,9 +305,12 @@ class RegExpClassRanges final : public RegExpTree {
// the specified ranges.
// CONTAINS_SPLIT_SURROGATE: The character class contains part of a split
// surrogate and should not be unicode-desugared (crbug.com/641091).
+ // IS_CASE_FOLDED: If case folding is required (/i), it was already
+ // performed on individual ranges and should not be applied again.
enum Flag {
NEGATED = 1 << 0,
CONTAINS_SPLIT_SURROGATE = 1 << 1,
+ IS_CASE_FOLDED = 1 << 2,
};
using ClassRangesFlags = base::Flags<Flag>;
@@ -356,6 +353,9 @@ class RegExpClassRanges final : public RegExpTree {
bool contains_split_surrogate() const {
return (class_ranges_flags_ & CONTAINS_SPLIT_SURROGATE) != 0;
}
+ bool is_case_folded() const {
+ return (class_ranges_flags_ & IS_CASE_FOLDED) != 0;
+ }
private:
CharacterSet set_;
@@ -626,8 +626,9 @@ class RegExpCapture final : public RegExpTree {
class RegExpGroup final : public RegExpTree {
public:
- explicit RegExpGroup(RegExpTree* body)
+ explicit RegExpGroup(RegExpTree* body, RegExpFlags flags)
: body_(body),
+ flags_(flags),
min_match_(body->min_match()),
max_match_(body->max_match()) {}
@@ -639,9 +640,11 @@ class RegExpGroup final : public RegExpTree {
int max_match() override { return max_match_; }
Interval CaptureRegisters() override { return body_->CaptureRegisters(); }
RegExpTree* body() const { return body_; }
+ RegExpFlags flags() const { return flags_; }
private:
RegExpTree* body_;
+ const RegExpFlags flags_;
int min_match_;
int max_match_;
};
@@ -651,12 +654,13 @@ class RegExpLookaround final : public RegExpTree {
enum Type { LOOKAHEAD, LOOKBEHIND };
RegExpLookaround(RegExpTree* body, bool is_positive, int capture_count,
- int capture_from, Type type)
+ int capture_from, Type type, int index)
: body_(body),
is_positive_(is_positive),
capture_count_(capture_count),
capture_from_(capture_from),
- type_(type) {}
+ type_(type),
+ index_(index) {}
DECL_BOILERPLATE(Lookaround);
@@ -669,6 +673,7 @@ class RegExpLookaround final : public RegExpTree {
int capture_count() const { return capture_count_; }
int capture_from() const { return capture_from_; }
Type type() const { return type_; }
+ int index() const { return index_; }
class Builder {
public:
@@ -692,14 +697,17 @@ class RegExpLookaround final : public RegExpTree {
int capture_count_;
int capture_from_;
Type type_;
+ int index_;
};
class RegExpBackReference final : public RegExpTree {
public:
- explicit RegExpBackReference(RegExpFlags flags) : flags_(flags) {}
- RegExpBackReference(RegExpCapture* capture, RegExpFlags flags)
- : capture_(capture), flags_(flags) {}
+ explicit RegExpBackReference(Zone* zone) : captures_(1, zone) {}
+ explicit RegExpBackReference(RegExpCapture* capture, Zone* zone)
+ : captures_(1, zone) {
+ captures_.Add(capture, zone);
+ }
DECL_BOILERPLATE(BackReference);
@@ -707,16 +715,16 @@ class RegExpBackReference final : public RegExpTree {
// The back reference may be recursive, e.g. /(\2)(\1)/. To avoid infinite
// recursion, we give up. Ignorance is bliss.
int max_match() override { return kInfinity; }
- int index() const { return capture_->index(); }
- RegExpCapture* capture() const { return capture_; }
- void set_capture(RegExpCapture* capture) { capture_ = capture; }
+ const ZoneList<RegExpCapture*>* captures() const { return &captures_; }
+ void add_capture(RegExpCapture* capture, Zone* zone) {
+ captures_.Add(capture, zone);
+ }
const ZoneVector<base::uc16>* name() const { return name_; }
void set_name(const ZoneVector<base::uc16>* name) { name_ = name; }
private:
- RegExpCapture* capture_ = nullptr;
+ ZoneList<RegExpCapture*> captures_;
const ZoneVector<base::uc16>* name_ = nullptr;
- const RegExpFlags flags_;
};
diff --git a/js/src/irregexp/imported/regexp-bytecode-generator.cc b/js/src/irregexp/imported/regexp-bytecode-generator.cc
index c83e10a598..251ed1cda5 100644
--- a/js/src/irregexp/imported/regexp-bytecode-generator.cc
+++ b/js/src/irregexp/imported/regexp-bytecode-generator.cc
@@ -383,7 +383,7 @@ Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
isolate_, zone(), source, buffer_.data(), length(), jump_edges_);
} else {
array = isolate_->factory()->NewByteArray(length());
- Copy(array->GetDataStartAddress());
+ Copy(array->begin());
}
return array;
diff --git a/js/src/irregexp/imported/regexp-bytecode-peephole.cc b/js/src/irregexp/imported/regexp-bytecode-peephole.cc
index ec8dcf1108..0ef0bab702 100644
--- a/js/src/irregexp/imported/regexp-bytecode-peephole.cc
+++ b/js/src/irregexp/imported/regexp-bytecode-peephole.cc
@@ -1012,13 +1012,13 @@ Handle<ByteArray> RegExpBytecodePeepholeOptimization::OptimizeBytecode(
RegExpBytecodePeephole peephole(zone, length, jump_edges);
bool did_optimize = peephole.OptimizeBytecode(bytecode, length);
Handle<ByteArray> array = isolate->factory()->NewByteArray(peephole.Length());
- peephole.CopyOptimizedBytecode(array->GetDataStartAddress());
+ peephole.CopyOptimizedBytecode(array->begin());
if (did_optimize && v8_flags.trace_regexp_peephole_optimization) {
PrintF("Original Bytecode:\n");
RegExpBytecodeDisassemble(bytecode, length, source->ToCString().get());
PrintF("Optimized Bytecode:\n");
- RegExpBytecodeDisassemble(array->GetDataStartAddress(), peephole.Length(),
+ RegExpBytecodeDisassemble(array->begin(), peephole.Length(),
source->ToCString().get());
}
diff --git a/js/src/irregexp/imported/regexp-compiler-tonode.cc b/js/src/irregexp/imported/regexp-compiler-tonode.cc
index f5087bdb08..b1340123d8 100644
--- a/js/src/irregexp/imported/regexp-compiler-tonode.cc
+++ b/js/src/irregexp/imported/regexp-compiler-tonode.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "irregexp/imported/regexp-compiler.h"
-
#include "irregexp/imported/regexp.h"
#ifdef V8_INTL_SUPPORT
@@ -418,27 +417,6 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
} // namespace
-#ifdef V8_INTL_SUPPORT
-// static
-void CharacterRange::UnicodeSimpleCloseOver(icu::UnicodeSet& set) {
- // Remove characters for which closeOver() adds full-case-folding equivalents
- // because we should work only with simple case folding mappings.
- icu::UnicodeSet non_simple = icu::UnicodeSet(set);
- non_simple.retainAll(RegExpCaseFolding::UnicodeNonSimpleCloseOverSet());
- set.removeAll(non_simple);
-
- set.closeOver(USET_CASE_INSENSITIVE);
- // Full case folding maps single characters to multiple characters.
- // Those are represented as strings in the set. Remove them so that
- // we end up with only simple and common case mappings.
- set.removeAllStrings();
-
- // Add characters that have non-simple case foldings again (they match
- // themselves).
- set.addAll(non_simple);
-}
-#endif // V8_INTL_SUPPORT
-
// static
void CharacterRange::AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges,
Zone* zone) {
@@ -460,8 +438,7 @@ void CharacterRange::AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges,
}
// Clear the ranges list without freeing the backing store.
ranges->Rewind(0);
-
- UnicodeSimpleCloseOver(set);
+ set.closeOver(USET_SIMPLE_CASE_INSENSITIVE);
for (int i = 0; i < set.getRangeCount(); i++) {
ranges->Add(Range(set.getRangeStart(i), set.getRangeEnd(i)), zone);
}
@@ -476,7 +453,9 @@ RegExpNode* RegExpClassRanges::ToNode(RegExpCompiler* compiler,
Zone* const zone = compiler->zone();
ZoneList<CharacterRange>* ranges = this->ranges(zone);
- if (NeedsUnicodeCaseEquivalents(compiler->flags())) {
+ const bool needs_case_folding =
+ NeedsUnicodeCaseEquivalents(compiler->flags()) && !is_case_folded();
+ if (needs_case_folding) {
CharacterRange::AddUnicodeCaseEquivalents(ranges, zone);
}
@@ -487,8 +466,7 @@ RegExpNode* RegExpClassRanges::ToNode(RegExpCompiler* compiler,
if (is_negated()) {
// With /v, character classes are never negated.
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-compileatom
+ // https://tc39.es/ecma262/#sec-compileatom
// Atom :: CharacterClass
// 4. Assert: cc.[[Invert]] is false.
// Instead the complement is created when evaluating the class set.
@@ -561,7 +539,12 @@ RegExpNode* RegExpClassSetOperand::ToNode(RegExpCompiler* compiler,
}
}
if (!ranges()->is_empty()) {
- alternatives->Add(zone->template New<RegExpClassRanges>(zone, ranges()),
+ // In unicode sets mode case folding has to be done at precise locations
+ // (e.g. before building complements).
+ // It is therefore the parsers responsibility to case fold (sub-) ranges
+ // before creating ClassSetOperands.
+ alternatives->Add(zone->template New<RegExpClassRanges>(
+ zone, ranges(), RegExpClassRanges::IS_CASE_FOLDED),
zone);
}
if (empty_string != nullptr) {
@@ -1034,9 +1017,8 @@ namespace {
// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
- RegExpAssertion::Type type,
- RegExpFlags flags) {
- CHECK(NeedsUnicodeCaseEquivalents(flags));
+ RegExpAssertion::Type type) {
+ CHECK(NeedsUnicodeCaseEquivalents(compiler->flags()));
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* word_range =
zone->New<ZoneList<CharacterRange>>(2, zone);
@@ -1080,14 +1062,13 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
return AssertionNode::AtStart(on_success);
case Type::BOUNDARY:
return NeedsUnicodeCaseEquivalents(compiler->flags())
- ? BoundaryAssertionAsLookaround(
- compiler, on_success, Type::BOUNDARY, compiler->flags())
+ ? BoundaryAssertionAsLookaround(compiler, on_success,
+ Type::BOUNDARY)
: AssertionNode::AtBoundary(on_success);
case Type::NON_BOUNDARY:
return NeedsUnicodeCaseEquivalents(compiler->flags())
? BoundaryAssertionAsLookaround(compiler, on_success,
- Type::NON_BOUNDARY,
- compiler->flags())
+ Type::NON_BOUNDARY)
: AssertionNode::AtNonBoundary(on_success);
case Type::END_OF_INPUT:
return AssertionNode::AtEnd(on_success);
@@ -1130,10 +1111,17 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return compiler->zone()->New<BackReferenceNode>(
- RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()), flags_, compiler->read_backward(),
- on_success);
+ RegExpNode* backref_node = on_success;
+ // Only one of the captures in the list can actually match. Since
+ // back-references to unmatched captures are treated as empty, we can simply
+ // create back-references to all possible captures.
+ for (auto capture : *captures()) {
+ backref_node = compiler->zone()->New<BackReferenceNode>(
+ RegExpCapture::StartRegister(capture->index()),
+ RegExpCapture::EndRegister(capture->index()), compiler->read_backward(),
+ backref_node);
+ }
+ return backref_node;
}
RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
@@ -1141,9 +1129,40 @@ RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
return on_success;
}
+namespace {
+
+class V8_NODISCARD ModifiersScope {
+ public:
+ ModifiersScope(RegExpCompiler* compiler, RegExpFlags flags)
+ : compiler_(compiler), previous_flags_(compiler->flags()) {
+ compiler->set_flags(flags);
+ }
+ ~ModifiersScope() { compiler_->set_flags(previous_flags_); }
+
+ private:
+ RegExpCompiler* compiler_;
+ const RegExpFlags previous_flags_;
+};
+
+} // namespace
+
RegExpNode* RegExpGroup::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return body_->ToNode(compiler, on_success);
+ // If no flags are modified, simply convert and return the body.
+ if (flags() == compiler->flags()) {
+ return body_->ToNode(compiler, on_success);
+ }
+ // Reset flags for successor node.
+ const RegExpFlags old_flags = compiler->flags();
+ on_success = ActionNode::ModifyFlags(old_flags, on_success);
+
+ // Convert body using modifier.
+ ModifiersScope modifiers_scope(compiler, flags());
+ RegExpNode* body = body_->ToNode(compiler, on_success);
+
+ // Wrap body into modifier node.
+ RegExpNode* modified_body = ActionNode::ModifyFlags(flags(), body);
+ return modified_body;
}
RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
diff --git a/js/src/irregexp/imported/regexp-compiler.cc b/js/src/irregexp/imported/regexp-compiler.cc
index 514975d8ed..73dfe1d2ad 100644
--- a/js/src/irregexp/imported/regexp-compiler.cc
+++ b/js/src/irregexp/imported/regexp-compiler.cc
@@ -707,6 +707,13 @@ ActionNode* ActionNode::EmptyMatchCheck(int start_register,
return result;
}
+ActionNode* ActionNode::ModifyFlags(RegExpFlags flags, RegExpNode* on_success) {
+ ActionNode* result =
+ on_success->zone()->New<ActionNode>(MODIFY_FLAGS, on_success);
+ result->data_.u_modify_flags.flags = flags;
+ return result;
+}
+
#define DEFINE_ACCEPT(Type) \
void Type##Node::Accept(NodeVisitor* visitor) { visitor->Visit##Type(this); }
FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
@@ -1377,6 +1384,9 @@ void ActionNode::GetQuickCheckDetails(QuickCheckDetails* details,
on_success()->GetQuickCheckDetailsFromLoopEntry(details, compiler,
filled_in, not_at_start);
} else {
+ if (action_type() == MODIFY_FLAGS) {
+ compiler->set_flags(flags());
+ }
on_success()->GetQuickCheckDetails(details, compiler, filled_in,
not_at_start);
}
@@ -2867,7 +2877,7 @@ int BoyerMooreLookahead::GetSkipTable(int min_lookahead, int max_lookahead,
const int kSkipArrayEntry = 0;
const int kDontSkipArrayEntry = 1;
- std::memset(boolean_skip_table->GetDataStartAddress(), kSkipArrayEntry,
+ std::memset(boolean_skip_table->begin(), kSkipArrayEntry,
boolean_skip_table->length());
for (int i = max_lookahead; i >= min_lookahead; i--) {
@@ -3454,6 +3464,11 @@ void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
assembler->Backtrack();
return;
}
+ case MODIFY_FLAGS: {
+ compiler->set_flags(flags());
+ on_success()->Emit(compiler, trace);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3473,8 +3488,8 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (IsIgnoreCase(flags_)) {
- bool unicode = IsEitherUnicode(flags_);
+ if (IsIgnoreCase(compiler->flags())) {
+ bool unicode = IsEitherUnicode(compiler->flags());
assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
unicode, trace->backtrack());
} else {
@@ -3485,7 +3500,7 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
// Check that the back reference does not end inside a surrogate pair.
- if (IsEitherUnicode(flags_) && !compiler->one_byte()) {
+ if (IsEitherUnicode(compiler->flags()) && !compiler->one_byte()) {
assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
}
on_success()->Emit(compiler, trace);
@@ -3707,7 +3722,7 @@ class Analysis : public NodeVisitor {
} while (false)
void VisitText(TextNode* that) override {
- that->MakeCaseIndependent(isolate(), is_one_byte_, flags_);
+ that->MakeCaseIndependent(isolate(), is_one_byte_, flags());
EnsureAnalyzed(that->on_success());
if (has_failed()) return;
that->CalculateOffsets();
@@ -3715,6 +3730,9 @@ class Analysis : public NodeVisitor {
}
void VisitAction(ActionNode* that) override {
+ if (that->action_type() == ActionNode::MODIFY_FLAGS) {
+ set_flags(that->flags());
+ }
EnsureAnalyzed(that->on_success());
if (has_failed()) return;
STATIC_FOR_EACH(Propagators::VisitAction(that));
@@ -3773,9 +3791,12 @@ class Analysis : public NodeVisitor {
#undef STATIC_FOR_EACH
private:
+ RegExpFlags flags() const { return flags_; }
+ void set_flags(RegExpFlags flags) { flags_ = flags; }
+
Isolate* isolate_;
const bool is_one_byte_;
- const RegExpFlags flags_;
+ RegExpFlags flags_;
RegExpError error_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
@@ -3903,13 +3924,12 @@ RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
}
RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
- RegExpFlags flags,
bool is_one_byte) {
// Wrap the body of the regexp in capture #0.
RegExpNode* captured_body =
RegExpCapture::ToNode(data->tree, 0, this, accept());
RegExpNode* node = captured_body;
- if (!data->tree->IsAnchoredAtStart() && !IsSticky(flags)) {
+ if (!data->tree->IsAnchoredAtStart() && !IsSticky(flags())) {
// Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning or sticky.
RegExpNode* loop_node = RegExpQuantifier::ToNode(
@@ -3931,13 +3951,14 @@ RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
}
}
if (is_one_byte) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, flags);
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, flags());
// Do it again to propagate the new nodes to places where they were not
// put because they had not been calculated yet.
if (node != nullptr) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, flags);
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, flags());
}
- } else if (IsEitherUnicode(flags) && (IsGlobal(flags) || IsSticky(flags))) {
+ } else if (IsEitherUnicode(flags()) &&
+ (IsGlobal(flags()) || IsSticky(flags()))) {
node = OptionallyStepBackToLeadSurrogate(node);
}
diff --git a/js/src/irregexp/imported/regexp-compiler.h b/js/src/irregexp/imported/regexp-compiler.h
index 91dd43ab8a..7a369430bb 100644
--- a/js/src/irregexp/imported/regexp-compiler.h
+++ b/js/src/irregexp/imported/regexp-compiler.h
@@ -501,8 +501,7 @@ class RegExpCompiler {
// - Inserting the implicit .* before/after the regexp if necessary.
// - If the input is a one-byte string, filtering out nodes that can't match.
// - Fixing up regexp matches that start within a surrogate pair.
- RegExpNode* PreprocessRegExp(RegExpCompileData* data, RegExpFlags flags,
- bool is_one_byte);
+ RegExpNode* PreprocessRegExp(RegExpCompileData* data, bool is_one_byte);
// If the regexp matching starts within a surrogate pair, step back to the
// lead surrogate and start matching from there.
@@ -527,7 +526,8 @@ class RegExpCompiler {
inline void IncrementRecursionDepth() { recursion_depth_++; }
inline void DecrementRecursionDepth() { recursion_depth_--; }
- RegExpFlags flags() const { return flags_; }
+ inline RegExpFlags flags() const { return flags_; }
+ inline void set_flags(RegExpFlags flags) { flags_ = flags; }
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
@@ -571,7 +571,7 @@ class RegExpCompiler {
int unicode_lookaround_position_register_;
ZoneVector<RegExpNode*>* work_list_;
int recursion_depth_;
- const RegExpFlags flags_;
+ RegExpFlags flags_;
RegExpMacroAssembler* macro_assembler_;
bool one_byte_;
bool reg_exp_too_big_;
diff --git a/js/src/irregexp/imported/regexp-dotprinter.cc b/js/src/irregexp/imported/regexp-dotprinter.cc
index 6746992a0a..cd0ca5dea8 100644
--- a/js/src/irregexp/imported/regexp-dotprinter.cc
+++ b/js/src/irregexp/imported/regexp-dotprinter.cc
@@ -231,6 +231,10 @@ void DotPrinterImpl::VisitAction(ActionNode* that) {
<< "\", shape=septagon";
break;
}
+ case ActionNode::MODIFY_FLAGS: {
+ os_ << "label=\"flags $" << that->flags() << "\", shape=septagon";
+ break;
+ }
}
os_ << "];\n";
PrintAttributes(that);
diff --git a/js/src/irregexp/imported/regexp-interpreter.cc b/js/src/irregexp/imported/regexp-interpreter.cc
index 43c8a4a5a4..2de1b12968 100644
--- a/js/src/irregexp/imported/regexp-interpreter.cc
+++ b/js/src/irregexp/imported/regexp-interpreter.cc
@@ -88,8 +88,7 @@ int32_t Load32Aligned(const uint8_t* pc) {
return *reinterpret_cast<const int32_t*>(pc);
}
-// TODO(jgruber): Rename to Load16AlignedUnsigned.
-uint32_t Load16Aligned(const uint8_t* pc) {
+uint32_t Load16AlignedUnsigned(const uint8_t* pc) {
DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 1);
return *reinterpret_cast<const uint16_t*>(pc);
}
@@ -221,17 +220,17 @@ IrregexpInterpreter::Result MaybeThrowStackOverflow(
template <typename Char>
void UpdateCodeAndSubjectReferences(
Isolate* isolate, Handle<ByteArray> code_array,
- Handle<String> subject_string, ByteArray* code_array_out,
+ Handle<String> subject_string, Tagged<ByteArray>* code_array_out,
const uint8_t** code_base_out, const uint8_t** pc_out,
- String* subject_string_out,
+ Tagged<String>* subject_string_out,
base::Vector<const Char>* subject_string_vector_out) {
DisallowGarbageCollection no_gc;
- if (*code_base_out != code_array->GetDataStartAddress()) {
+ if (*code_base_out != code_array->begin()) {
*code_array_out = *code_array;
const intptr_t pc_offset = *pc_out - *code_base_out;
DCHECK_GT(pc_offset, 0);
- *code_base_out = code_array->GetDataStartAddress();
+ *code_base_out = code_array->begin();
*pc_out = *code_base_out + pc_offset;
}
@@ -244,8 +243,9 @@ void UpdateCodeAndSubjectReferences(
// necessary.
template <typename Char>
IrregexpInterpreter::Result HandleInterrupts(
- Isolate* isolate, RegExp::CallOrigin call_origin, ByteArray* code_array_out,
- String* subject_string_out, const uint8_t** code_base_out,
+ Isolate* isolate, RegExp::CallOrigin call_origin,
+ Tagged<ByteArray>* code_array_out, Tagged<String>* subject_string_out,
+ const uint8_t** code_base_out,
base::Vector<const Char>* subject_string_vector_out,
const uint8_t** pc_out) {
DisallowGarbageCollection no_gc;
@@ -276,12 +276,12 @@ IrregexpInterpreter::Result HandleInterrupts(
} else if (check.InterruptRequested()) {
const bool was_one_byte =
String::IsOneByteRepresentationUnderneath(*subject_string_out);
- Object result;
+ Tagged<Object> result;
{
AllowGarbageCollection yes_gc;
result = isolate->stack_guard()->HandleInterrupts();
}
- if (result.IsException(isolate)) {
+ if (IsException(result, isolate)) {
return IrregexpInterpreter::EXCEPTION;
}
@@ -375,10 +375,10 @@ bool IndexIsInBounds(int index, int length) {
template <typename Char>
IrregexpInterpreter::Result RawMatch(
- Isolate* isolate, ByteArray code_array, String subject_string,
- base::Vector<const Char> subject, int* output_registers,
- int output_register_count, int total_register_count, int current,
- uint32_t current_char, RegExp::CallOrigin call_origin,
+ Isolate* isolate, Tagged<ByteArray> code_array,
+ Tagged<String> subject_string, base::Vector<const Char> subject,
+ int* output_registers, int output_register_count, int total_register_count,
+ int current, uint32_t current_char, RegExp::CallOrigin call_origin,
const uint32_t backtrack_limit) {
DisallowGarbageCollection no_gc;
@@ -430,7 +430,7 @@ IrregexpInterpreter::Result RawMatch(
#endif // V8_USE_COMPUTED_GOTO
- const uint8_t* pc = code_array.GetDataStartAddress();
+ const uint8_t* pc = code_array->begin();
const uint8_t* code_base = pc;
InterpreterRegisters registers(total_register_count, output_registers,
@@ -702,8 +702,8 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
uint32_t c = LoadPacked24Unsigned(insn);
- uint32_t minus = Load16Aligned(pc + 4);
- uint32_t mask = Load16Aligned(pc + 6);
+ uint32_t minus = Load16AlignedUnsigned(pc + 4);
+ uint32_t mask = Load16AlignedUnsigned(pc + 6);
if (c != ((current_char - minus) & mask)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
@@ -712,8 +712,8 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_CHAR_IN_RANGE) {
- uint32_t from = Load16Aligned(pc + 4);
- uint32_t to = Load16Aligned(pc + 6);
+ uint32_t from = Load16AlignedUnsigned(pc + 4);
+ uint32_t to = Load16AlignedUnsigned(pc + 6);
if (from <= current_char && current_char <= to) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
@@ -722,8 +722,8 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_CHAR_NOT_IN_RANGE) {
- uint32_t from = Load16Aligned(pc + 4);
- uint32_t to = Load16Aligned(pc + 6);
+ uint32_t from = Load16AlignedUnsigned(pc + 4);
+ uint32_t to = Load16AlignedUnsigned(pc + 6);
if (from > current_char || current_char > to) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
@@ -914,7 +914,7 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(SKIP_UNTIL_CHAR) {
int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
- uint32_t c = Load16Aligned(pc + 6);
+ uint32_t c = Load16AlignedUnsigned(pc + 6);
while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
if (c == current_char) {
@@ -929,7 +929,7 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(SKIP_UNTIL_CHAR_AND) {
int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
- uint16_t c = Load16Aligned(pc + 6);
+ uint16_t c = Load16AlignedUnsigned(pc + 6);
uint32_t mask = Load32Aligned(pc + 8);
int32_t maximum_offset = Load32Aligned(pc + 12);
while (static_cast<uintptr_t>(current + maximum_offset) <=
@@ -947,7 +947,7 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(SKIP_UNTIL_CHAR_POS_CHECKED) {
int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
- uint16_t c = Load16Aligned(pc + 6);
+ uint16_t c = Load16AlignedUnsigned(pc + 6);
int32_t maximum_offset = Load32Aligned(pc + 8);
while (static_cast<uintptr_t>(current + maximum_offset) <=
static_cast<uintptr_t>(subject.length())) {
@@ -979,7 +979,7 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE) {
int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
- uint16_t limit = Load16Aligned(pc + 6);
+ uint16_t limit = Load16AlignedUnsigned(pc + 6);
const uint8_t* table = pc + 8;
while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
@@ -999,8 +999,8 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(SKIP_UNTIL_CHAR_OR_CHAR) {
int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load32Aligned(pc + 4);
- uint16_t c = Load16Aligned(pc + 8);
- uint16_t c2 = Load16Aligned(pc + 10);
+ uint16_t c = Load16AlignedUnsigned(pc + 8);
+ uint16_t c2 = Load16AlignedUnsigned(pc + 10);
while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
// The two if-statements below are split up intentionally, as combining
@@ -1047,29 +1047,29 @@ IrregexpInterpreter::Result RawMatch(
// static
IrregexpInterpreter::Result IrregexpInterpreter::Match(
- Isolate* isolate, JSRegExp regexp, String subject_string,
+ Isolate* isolate, Tagged<JSRegExp> regexp, Tagged<String> subject_string,
int* output_registers, int output_register_count, int start_position,
RegExp::CallOrigin call_origin) {
- if (v8_flags.regexp_tier_up) regexp.TierUpTick();
+ if (v8_flags.regexp_tier_up) regexp->TierUpTick();
bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string);
- ByteArray code_array = ByteArray::cast(regexp.bytecode(is_one_byte));
- int total_register_count = regexp.max_register_count();
+ Tagged<ByteArray> code_array = ByteArray::cast(regexp->bytecode(is_one_byte));
+ int total_register_count = regexp->max_register_count();
return MatchInternal(isolate, code_array, subject_string, output_registers,
output_register_count, total_register_count,
- start_position, call_origin, regexp.backtrack_limit());
+ start_position, call_origin, regexp->backtrack_limit());
}
IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
- Isolate* isolate, ByteArray code_array, String subject_string,
- int* output_registers, int output_register_count, int total_register_count,
- int start_position, RegExp::CallOrigin call_origin,
- uint32_t backtrack_limit) {
- DCHECK(subject_string.IsFlat());
+ Isolate* isolate, Tagged<ByteArray> code_array,
+ Tagged<String> subject_string, int* output_registers,
+ int output_register_count, int total_register_count, int start_position,
+ RegExp::CallOrigin call_origin, uint32_t backtrack_limit) {
+ DCHECK(subject_string->IsFlat());
// TODO(chromium:1262676): Remove this CHECK once fixed.
- CHECK(code_array.IsByteArray());
+ CHECK(IsByteArray(code_array));
// Note: Heap allocation *is* allowed in two situations if calling from
// Runtime:
@@ -1080,7 +1080,7 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
DisallowGarbageCollection no_gc;
base::uc16 previous_char = '\n';
- String::FlatContent subject_content = subject_string.GetFlatContent(no_gc);
+ String::FlatContent subject_content = subject_string->GetFlatContent(no_gc);
// Because interrupts can result in GC and string content relocation, the
// checksum verification in FlatContent may fail even though this code is
// safe. See (2) above.
@@ -1122,10 +1122,10 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- String subject_string = String::cast(Object(subject));
- JSRegExp regexp_obj = JSRegExp::cast(Object(regexp));
+ Tagged<String> subject_string = String::cast(Tagged<Object>(subject));
+ Tagged<JSRegExp> regexp_obj = JSRegExp::cast(Tagged<Object>(regexp));
- if (regexp_obj.MarkedForTierUp()) {
+ if (regexp_obj->MarkedForTierUp()) {
// Returning RETRY will re-enter through runtime, where actual recompilation
// for tier-up takes place.
return IrregexpInterpreter::RETRY;
diff --git a/js/src/irregexp/imported/regexp-interpreter.h b/js/src/irregexp/imported/regexp-interpreter.h
index bc55be2b8c..825916291f 100644
--- a/js/src/irregexp/imported/regexp-interpreter.h
+++ b/js/src/irregexp/imported/regexp-interpreter.h
@@ -49,17 +49,18 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
- static Result MatchInternal(Isolate* isolate, ByteArray code_array,
- String subject_string, int* output_registers,
- int output_register_count,
+ static Result MatchInternal(Isolate* isolate, Tagged<ByteArray> code_array,
+ Tagged<String> subject_string,
+ int* output_registers, int output_register_count,
int total_register_count, int start_position,
RegExp::CallOrigin call_origin,
uint32_t backtrack_limit);
private:
- static Result Match(Isolate* isolate, JSRegExp regexp, String subject_string,
- int* output_registers, int output_register_count,
- int start_position, RegExp::CallOrigin call_origin);
+ static Result Match(Isolate* isolate, Tagged<JSRegExp> regexp,
+ Tagged<String> subject_string, int* output_registers,
+ int output_register_count, int start_position,
+ RegExp::CallOrigin call_origin);
};
} // namespace internal
diff --git a/js/src/irregexp/imported/regexp-macro-assembler.cc b/js/src/irregexp/imported/regexp-macro-assembler.cc
index b4d99bf775..b99c08424e 100644
--- a/js/src/irregexp/imported/regexp-macro-assembler.cc
+++ b/js/src/irregexp/imported/regexp-macro-assembler.cc
@@ -182,24 +182,25 @@ uint32_t RegExpMacroAssembler::IsCharacterInRangeArray(uint32_t current_char,
static constexpr uint32_t kTrue = 1;
static constexpr uint32_t kFalse = 0;
- FixedUInt16Array ranges = FixedUInt16Array::cast(Object(raw_byte_array));
- DCHECK_GE(ranges.length(), 1);
+ Tagged<FixedUInt16Array> ranges =
+ FixedUInt16Array::cast(Tagged<Object>(raw_byte_array));
+ DCHECK_GE(ranges->length(), 1);
// Shortcut for fully out of range chars.
- if (current_char < ranges.get(0)) return kFalse;
- if (current_char >= ranges.get(ranges.length() - 1)) {
+ if (current_char < ranges->get(0)) return kFalse;
+ if (current_char >= ranges->get(ranges->length() - 1)) {
// The last range may be open-ended.
- return (ranges.length() % 2) == 0 ? kFalse : kTrue;
+ return (ranges->length() % 2) == 0 ? kFalse : kTrue;
}
// Binary search for the matching range. `ranges` is encoded as
// [from0, to0, from1, to1, ..., fromN, toN], or
// [from0, to0, from1, to1, ..., fromN] (open-ended last interval).
- int mid, lower = 0, upper = ranges.length();
+ int mid, lower = 0, upper = ranges->length();
do {
mid = lower + (upper - lower) / 2;
- const base::uc16 elem = ranges.get(mid);
+ const base::uc16 elem = ranges->get(mid);
if (current_char < elem) {
upper = mid;
} else if (current_char > elem) {
@@ -210,7 +211,7 @@ uint32_t RegExpMacroAssembler::IsCharacterInRangeArray(uint32_t current_char,
}
} while (lower < upper);
- const bool current_char_ge_last_elem = current_char >= ranges.get(mid);
+ const bool current_char_ge_last_elem = current_char >= ranges->get(mid);
const int current_range_start_index =
current_char_ge_last_elem ? mid : mid - 1;
@@ -277,15 +278,16 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() const {
// static
int NativeRegExpMacroAssembler::CheckStackGuardState(
Isolate* isolate, int start_index, RegExp::CallOrigin call_origin,
- Address* return_address, InstructionStream re_code, Address* subject,
- const uint8_t** input_start, const uint8_t** input_end) {
+ Address* return_address, Tagged<InstructionStream> re_code,
+ Address* subject, const uint8_t** input_start, const uint8_t** input_end,
+ uintptr_t gap) {
DisallowGarbageCollection no_gc;
Address old_pc = PointerAuthentication::AuthenticatePC(return_address, 0);
- DCHECK_LE(re_code.instruction_start(), old_pc);
- DCHECK_LE(old_pc, re_code.code(kAcquireLoad).instruction_end());
+ DCHECK_LE(re_code->instruction_start(), old_pc);
+ DCHECK_LE(old_pc, re_code->code(kAcquireLoad)->instruction_end());
StackLimitCheck check(isolate);
- bool js_has_overflowed = check.JsHasOverflowed();
+ bool js_has_overflowed = check.JsHasOverflowed(gap);
if (call_origin == RegExp::CallOrigin::kFromJs) {
// Direct calls from JavaScript can be interrupted in two ways:
@@ -310,7 +312,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
// Prepare for possible GC.
HandleScope handles(isolate);
Handle<InstructionStream> code_handle(re_code, isolate);
- Handle<String> subject_handle(String::cast(Object(*subject)), isolate);
+ Handle<String> subject_handle(String::cast(Tagged<Object>(*subject)),
+ isolate);
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject_handle);
int return_value = 0;
@@ -322,8 +325,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
return_value = EXCEPTION;
} else if (check.InterruptRequested()) {
AllowGarbageCollection yes_gc;
- Object result = isolate->stack_guard()->HandleInterrupts();
- if (result.IsException(isolate)) return_value = EXCEPTION;
+ Tagged<Object> result = isolate->stack_guard()->HandleInterrupts();
+ if (IsException(result, isolate)) return_value = EXCEPTION;
}
// We are not using operator == here because it does a slow DCHECK
@@ -371,34 +374,34 @@ int NativeRegExpMacroAssembler::Match(Handle<JSRegExp> regexp,
// DisallowGarbageCollection, since regexps might be preempted, and another
// thread might do allocation anyway.
- String subject_ptr = *subject;
+ Tagged<String> subject_ptr = *subject;
// Character offsets into string.
int start_offset = previous_index;
- int char_length = subject_ptr.length() - start_offset;
+ int char_length = subject_ptr->length() - start_offset;
int slice_offset = 0;
// The string has been flattened, so if it is a cons string it contains the
// full string in the first part.
if (StringShape(subject_ptr).IsCons()) {
- DCHECK_EQ(0, ConsString::cast(subject_ptr).second().length());
- subject_ptr = ConsString::cast(subject_ptr).first();
+ DCHECK_EQ(0, ConsString::cast(subject_ptr)->second()->length());
+ subject_ptr = ConsString::cast(subject_ptr)->first();
} else if (StringShape(subject_ptr).IsSliced()) {
- SlicedString slice = SlicedString::cast(subject_ptr);
- subject_ptr = slice.parent();
- slice_offset = slice.offset();
+ Tagged<SlicedString> slice = SlicedString::cast(subject_ptr);
+ subject_ptr = slice->parent();
+ slice_offset = slice->offset();
}
if (StringShape(subject_ptr).IsThin()) {
- subject_ptr = ThinString::cast(subject_ptr).actual();
+ subject_ptr = ThinString::cast(subject_ptr)->actual();
}
// Ensure that an underlying string has the same representation.
- bool is_one_byte = subject_ptr.IsOneByteRepresentation();
- DCHECK(subject_ptr.IsExternalString() || subject_ptr.IsSeqString());
+ bool is_one_byte = subject_ptr->IsOneByteRepresentation();
+ DCHECK(IsExternalString(subject_ptr) || IsSeqString(subject_ptr));
// String is now either Sequential or External
int char_size_shift = is_one_byte ? 0 : 1;
DisallowGarbageCollection no_gc;
const uint8_t* input_start =
- subject_ptr.AddressOfCharacterAt(start_offset + slice_offset, no_gc);
+ subject_ptr->AddressOfCharacterAt(start_offset + slice_offset, no_gc);
int byte_length = char_length << char_size_shift;
const uint8_t* input_end = input_start + byte_length;
return Execute(*subject, start_offset, input_start, input_end, offsets_vector,
@@ -407,9 +410,9 @@ int NativeRegExpMacroAssembler::Match(Handle<JSRegExp> regexp,
// static
int NativeRegExpMacroAssembler::ExecuteForTesting(
- String input, int start_offset, const uint8_t* input_start,
+ Tagged<String> input, int start_offset, const uint8_t* input_start,
const uint8_t* input_end, int* output, int output_size, Isolate* isolate,
- JSRegExp regexp) {
+ Tagged<JSRegExp> regexp) {
return Execute(input, start_offset, input_start, input_end, output,
output_size, isolate, regexp);
}
@@ -419,13 +422,14 @@ int NativeRegExpMacroAssembler::ExecuteForTesting(
// the signature of the interpreter. We should get rid of JS objects passed to
// internal methods.
int NativeRegExpMacroAssembler::Execute(
- String input, // This needs to be the unpacked (sliced, cons) string.
+ Tagged<String>
+ input, // This needs to be the unpacked (sliced, cons) string.
int start_offset, const uint8_t* input_start, const uint8_t* input_end,
- int* output, int output_size, Isolate* isolate, JSRegExp regexp) {
+ int* output, int output_size, Isolate* isolate, Tagged<JSRegExp> regexp) {
RegExpStackScope stack_scope(isolate);
bool is_one_byte = String::IsOneByteRepresentationUnderneath(input);
- Code code = Code::cast(regexp.code(is_one_byte));
+ Tagged<Code> code = Code::cast(regexp->code(isolate, is_one_byte));
RegExp::CallOrigin call_origin = RegExp::CallOrigin::kFromRuntime;
using RegexpMatcherSig =
@@ -439,7 +443,7 @@ int NativeRegExpMacroAssembler::Execute(
output, output_size, call_origin, isolate, regexp.ptr());
DCHECK_GE(result, SMALLEST_REGEXP_RESULT);
- if (result == EXCEPTION && !isolate->has_pending_exception()) {
+ if (result == EXCEPTION && !isolate->has_exception()) {
// We detected a stack overflow (on the backtrack stack) in RegExp code,
// but haven't created the exception yet. Additionally, we allow heap
// allocation because even though it invalidates {input_start} and
diff --git a/js/src/irregexp/imported/regexp-macro-assembler.h b/js/src/irregexp/imported/regexp-macro-assembler.h
index af7e4f5297..6863adbaff 100644
--- a/js/src/irregexp/imported/regexp-macro-assembler.h
+++ b/js/src/irregexp/imported/regexp-macro-assembler.h
@@ -301,12 +301,10 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int* offsets_vector, int offsets_vector_length,
int previous_index, Isolate* isolate);
- V8_EXPORT_PRIVATE static int ExecuteForTesting(String input, int start_offset,
- const uint8_t* input_start,
- const uint8_t* input_end,
- int* output, int output_size,
- Isolate* isolate,
- JSRegExp regexp);
+ V8_EXPORT_PRIVATE static int ExecuteForTesting(
+ Tagged<String> input, int start_offset, const uint8_t* input_start,
+ const uint8_t* input_end, int* output, int output_size, Isolate* isolate,
+ Tagged<JSRegExp> regexp);
bool CanReadUnaligned() const override;
@@ -330,9 +328,9 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
static int CheckStackGuardState(Isolate* isolate, int start_index,
RegExp::CallOrigin call_origin,
Address* return_address,
- InstructionStream re_code, Address* subject,
- const uint8_t** input_start,
- const uint8_t** input_end);
+ Tagged<InstructionStream> re_code,
+ Address* subject, const uint8_t** input_start,
+ const uint8_t** input_end, uintptr_t gap);
static Address word_character_map_address() {
return reinterpret_cast<Address>(&word_character_map[0]);
@@ -348,9 +346,10 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
private:
// Returns a {Result} sentinel, or the number of successful matches.
- static int Execute(String input, int start_offset, const uint8_t* input_start,
- const uint8_t* input_end, int* output, int output_size,
- Isolate* isolate, JSRegExp regexp);
+ static int Execute(Tagged<String> input, int start_offset,
+ const uint8_t* input_start, const uint8_t* input_end,
+ int* output, int output_size, Isolate* isolate,
+ Tagged<JSRegExp> regexp);
ZoneUnorderedMap<uint32_t, Handle<FixedUInt16Array>> range_array_cache_;
};
diff --git a/js/src/irregexp/imported/regexp-nodes.h b/js/src/irregexp/imported/regexp-nodes.h
index 9407f1c5ec..f3d7e6c58f 100644
--- a/js/src/irregexp/imported/regexp-nodes.h
+++ b/js/src/irregexp/imported/regexp-nodes.h
@@ -318,7 +318,8 @@ class ActionNode : public SeqRegExpNode {
BEGIN_NEGATIVE_SUBMATCH,
POSITIVE_SUBMATCH_SUCCESS,
EMPTY_MATCH_CHECK,
- CLEAR_CAPTURES
+ CLEAR_CAPTURES,
+ MODIFY_FLAGS
};
static ActionNode* SetRegisterForLoop(int reg, int val,
RegExpNode* on_success);
@@ -341,6 +342,7 @@ class ActionNode : public SeqRegExpNode {
int repetition_register,
int repetition_limit,
RegExpNode* on_success);
+ static ActionNode* ModifyFlags(RegExpFlags flags, RegExpNode* on_success);
void Accept(NodeVisitor* visitor) override;
void Emit(RegExpCompiler* compiler, Trace* trace) override;
void GetQuickCheckDetails(QuickCheckDetails* details,
@@ -353,6 +355,10 @@ class ActionNode : public SeqRegExpNode {
int GreedyLoopTextLength() override {
return kNodeIsTooComplexForGreedyLoops;
}
+ RegExpFlags flags() {
+ DCHECK_EQ(action_type(), MODIFY_FLAGS);
+ return RegExpFlags{data_.u_modify_flags.flags};
+ }
private:
union {
@@ -382,9 +388,13 @@ class ActionNode : public SeqRegExpNode {
int range_from;
int range_to;
} u_clear_captures;
+ struct {
+ int flags;
+ } u_modify_flags;
} data_;
ActionNode(ActionType action_type, RegExpNode* on_success)
: SeqRegExpNode(on_success), action_type_(action_type) {}
+
ActionType action_type_;
friend class DotPrinterImpl;
friend Zone;
@@ -499,12 +509,11 @@ class AssertionNode : public SeqRegExpNode {
class BackReferenceNode : public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg, int end_reg, RegExpFlags flags,
- bool read_backward, RegExpNode* on_success)
+ BackReferenceNode(int start_reg, int end_reg, bool read_backward,
+ RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
end_reg_(end_reg),
- flags_(flags),
read_backward_(read_backward) {}
void Accept(NodeVisitor* visitor) override;
int start_register() { return start_reg_; }
@@ -522,7 +531,6 @@ class BackReferenceNode : public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
- RegExpFlags flags_;
bool read_backward_;
};
diff --git a/js/src/irregexp/imported/regexp-parser.cc b/js/src/irregexp/imported/regexp-parser.cc
index ea2a6c6d7a..965fc567b7 100644
--- a/js/src/irregexp/imported/regexp-parser.cc
+++ b/js/src/irregexp/imported/regexp-parser.cc
@@ -13,7 +13,7 @@
#include "unicode/unistr.h"
#include "unicode/usetiter.h"
#include "unicode/utf16.h" // For U16_NEXT
-#endif // V8_INTL_SUPPORT
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
@@ -67,8 +67,7 @@ class RegExpTextBuilder {
bool ignore_case() const { return IsIgnoreCase(flags_); }
bool IsUnicodeMode() const {
// Either /v or /u enable UnicodeMode
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-parsepattern
+ // https://tc39.es/ecma262/#sec-parsepattern
return IsUnicode(flags_) || IsUnicodeSets(flags_);
}
Zone* zone() const { return zone_; }
@@ -264,7 +263,7 @@ RegExpTree* RegExpTextBuilder::PopLastAtom() {
characters_ = nullptr;
atom = zone()->New<RegExpAtom>(char_vector);
return atom;
- } else if (text_.size() > 0) {
+ } else if (!text_.empty()) {
atom = text_.back();
text_.pop_back();
return atom;
@@ -315,8 +314,7 @@ class RegExpBuilder {
void FlushTerms();
bool IsUnicodeMode() const {
// Either /v or /u enable UnicodeMode
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-parsepattern
+ // https://tc39.es/ecma262/#sec-parsepattern
return IsUnicode(flags_) || IsUnicodeSets(flags_);
}
Zone* zone() const { return zone_; }
@@ -354,7 +352,12 @@ class RegExpParserState : public ZoneObject {
group_type_(group_type),
lookaround_type_(lookaround_type),
disjunction_capture_index_(disjunction_capture_index),
- capture_name_(capture_name) {}
+ capture_name_(capture_name) {
+ if (previous_state != nullptr) {
+ non_participating_capture_group_interval_ =
+ previous_state->non_participating_capture_group_interval();
+ }
+ }
// Parser state of containing expression, if any.
RegExpParserState* previous_state() const { return previous_state_; }
bool IsSubexpression() { return previous_state_ != nullptr; }
@@ -371,6 +374,9 @@ class RegExpParserState : public ZoneObject {
// The name of the current sub-expression, if group_type is CAPTURE. Only
// used for named captures.
const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
+ std::pair<int, int> non_participating_capture_group_interval() const {
+ return non_participating_capture_group_interval_;
+ }
bool IsNamedCapture() const { return capture_name_ != nullptr; }
@@ -398,6 +404,18 @@ class RegExpParserState : public ZoneObject {
return false;
}
+ void NewAlternative(int captures_started) {
+ if (non_participating_capture_group_interval().second != 0) {
+ // Extend the non-participating interval.
+ non_participating_capture_group_interval_.second = captures_started;
+ } else {
+ // Create new non-participating interval from the start of the current
+ // enclosing group to all captures created within that group so far.
+ non_participating_capture_group_interval_ =
+ std::make_pair(capture_index(), captures_started);
+ }
+ }
+
private:
// Linked list implementation of stack of states.
RegExpParserState* const previous_state_;
@@ -411,6 +429,11 @@ class RegExpParserState : public ZoneObject {
const int disjunction_capture_index_;
// Stored capture name (if any).
const ZoneVector<base::uc16>* const capture_name_;
+ // Interval of (named) capture indices ]from, to] that are not participating
+ // in the current state (i.e. they cannot match).
+ // Capture indices are not participating if they were created in a different
+ // alternative.
+ std::pair<int, int> non_participating_capture_group_interval_;
};
template <class CharT>
@@ -463,17 +486,22 @@ class RegExpParserImpl final {
RegExpTree* ParseClassSetOperand(const RegExpBuilder* builder,
ClassSetOperandType* type_out,
ZoneList<CharacterRange>* ranges,
- CharacterClassStrings* strings);
+ CharacterClassStrings* strings,
+ base::uc32* character);
base::uc32 ParseClassSetCharacter();
// Parses and returns a single escaped character.
base::uc32 ParseCharacterEscape(InClassEscapeState in_class_escape_state,
bool* is_escaped_unicode_character);
+ void AddMaybeSimpleCaseFoldedRange(ZoneList<CharacterRange>* ranges,
+ CharacterRange new_range);
+
RegExpTree* ParseClassUnion(const RegExpBuilder* builder, bool is_negated,
RegExpTree* first_operand,
ClassSetOperandType first_operand_type,
ZoneList<CharacterRange>* ranges,
- CharacterClassStrings* strings);
+ CharacterClassStrings* strings,
+ base::uc32 first_character);
RegExpTree* ParseClassIntersection(const RegExpBuilder* builder,
bool is_negated, RegExpTree* first_operand,
ClassSetOperandType first_operand_type);
@@ -504,11 +532,10 @@ class RegExpParserImpl final {
int captures_started() const { return captures_started_; }
int position() const { return next_pos_ - 1; }
bool failed() const { return failed_; }
- RegExpFlags flags() const { return top_level_flags_; }
+ RegExpFlags flags() const { return flags_; }
bool IsUnicodeMode() const {
// Either /v or /u enable UnicodeMode
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-parsepattern
+ // https://tc39.es/ecma262/#sec-parsepattern
return IsUnicode(flags()) || IsUnicodeSets(flags()) || force_unicode_;
}
bool unicode_sets() const { return IsUnicodeSets(flags()); }
@@ -528,7 +555,7 @@ class RegExpParserImpl final {
// Creates a new named capture at the specified index. Must be called exactly
// once for each named capture. Fails if a capture with the same name is
// encountered.
- bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
+ bool CreateNamedCaptureAtIndex(const RegExpParserState* state, int index);
// Parses the name of a capture group (?<name>pattern). The name must adhere
// to IdentifierName in the ECMAScript standard.
@@ -543,7 +570,7 @@ class RegExpParserImpl final {
// to avoid complicating cases in which references comes before the capture.
void PatchNamedBackReferences();
- ZoneVector<RegExpCapture*>* GetNamedCaptures() const;
+ ZoneVector<RegExpCapture*>* GetNamedCaptures();
// Returns true iff the pattern contains named captures. May call
// ScanForCaptures to look ahead at the remaining pattern.
@@ -593,16 +620,20 @@ class RegExpParserImpl final {
RegExpError error_ = RegExpError::kNone;
int error_pos_ = 0;
ZoneList<RegExpCapture*>* captures_;
- ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
+ // Maps capture names to a list of capture indices with this name.
+ ZoneMap<RegExpCapture*, ZoneList<int>*, RegExpCaptureNameLess>*
+ named_captures_;
ZoneList<RegExpBackReference*>* named_back_references_;
+ ZoneList<CharacterRange>* temp_ranges_;
const CharT* const input_;
const int input_length_;
base::uc32 current_;
- const RegExpFlags top_level_flags_;
+ RegExpFlags flags_;
bool force_unicode_ = false; // Force parser to act as if unicode were set.
int next_pos_;
int captures_started_;
int capture_count_; // Only valid after we have scanned for captures.
+ int lookaround_count_; // Only valid after we have scanned for lookbehinds.
bool has_more_;
bool simple_;
bool contains_anchor_;
@@ -625,10 +656,11 @@ RegExpParserImpl<CharT>::RegExpParserImpl(
input_(input),
input_length_(input_length),
current_(kEndMarker),
- top_level_flags_(flags),
+ flags_(flags),
next_pos_(0),
captures_started_(0),
capture_count_(0),
+ lookaround_count_(0),
has_more_(true),
simple_(false),
contains_anchor_(false),
@@ -909,21 +941,21 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
// Build result of subexpression.
if (group_type == CAPTURE) {
if (state->IsNamedCapture()) {
- CreateNamedCaptureAtIndex(state->capture_name(),
- capture_index CHECK_FAILED);
+ CreateNamedCaptureAtIndex(state, capture_index CHECK_FAILED);
}
RegExpCapture* capture = GetCapture(capture_index);
capture->set_body(body);
body = capture;
} else if (group_type == GROUPING) {
- body = zone()->template New<RegExpGroup>(body);
+ body = zone()->template New<RegExpGroup>(body, builder->flags());
} else {
DCHECK(group_type == POSITIVE_LOOKAROUND ||
group_type == NEGATIVE_LOOKAROUND);
bool is_positive = (group_type == POSITIVE_LOOKAROUND);
body = zone()->template New<RegExpLookaround>(
body, is_positive, end_capture_index - capture_index,
- capture_index, state->lookaround_type());
+ capture_index, state->lookaround_type(), lookaround_count_);
+ lookaround_count_++;
}
// Restore previous state.
@@ -937,6 +969,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
}
case '|': {
Advance();
+ state->NewAlternative(captures_started());
builder->NewAlternative();
continue;
}
@@ -984,6 +1017,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
case '(': {
state = ParseOpenParenthesis(state CHECK_FAILED);
builder = state->builder();
+ flags_ = builder->flags();
continue;
}
case '[': {
@@ -1037,8 +1071,8 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
builder->AddEmpty();
} else {
RegExpCapture* capture = GetCapture(index);
- RegExpTree* atom = zone()->template New<RegExpBackReference>(
- capture, builder->flags());
+ RegExpTree* atom =
+ zone()->template New<RegExpBackReference>(capture, zone());
builder->AddAtom(atom);
}
break;
@@ -1246,43 +1280,91 @@ RegExpParserState* RegExpParserImpl<CharT>::ParseOpenParenthesis(
bool is_named_capture = false;
const ZoneVector<base::uc16>* capture_name = nullptr;
SubexpressionType subexpr_type = CAPTURE;
+ RegExpFlags flags = state->builder()->flags();
+ bool parsing_modifiers = false;
+ bool modifiers_polarity = true;
+ RegExpFlags modifiers;
Advance();
if (current() == '?') {
- switch (Next()) {
- case ':':
- Advance(2);
- subexpr_type = GROUPING;
- break;
- case '=':
- Advance(2);
- lookaround_type = RegExpLookaround::LOOKAHEAD;
- subexpr_type = POSITIVE_LOOKAROUND;
- break;
- case '!':
- Advance(2);
- lookaround_type = RegExpLookaround::LOOKAHEAD;
- subexpr_type = NEGATIVE_LOOKAROUND;
- break;
- case '<':
- Advance();
- if (Next() == '=') {
+ do {
+ switch (Next()) {
+ case '-':
+ if (!v8_flags.js_regexp_modifiers) {
+ ReportError(RegExpError::kInvalidGroup);
+ return nullptr;
+ }
+ Advance();
+ parsing_modifiers = true;
+ if (modifiers_polarity == false) {
+ ReportError(RegExpError::kMultipleFlagDashes);
+ return nullptr;
+ }
+ modifiers_polarity = false;
+ break;
+ case 'm':
+ case 'i':
+ case 's': {
+ if (!v8_flags.js_regexp_modifiers) {
+ ReportError(RegExpError::kInvalidGroup);
+ return nullptr;
+ }
+ Advance();
+ parsing_modifiers = true;
+ RegExpFlag flag = TryRegExpFlagFromChar(current()).value();
+ if ((modifiers & flag) != 0) {
+ ReportError(RegExpError::kRepeatedFlag);
+ return nullptr;
+ }
+ modifiers |= flag;
+ flags.set(flag, modifiers_polarity);
+ break;
+ }
+ case ':':
+ Advance(2);
+ parsing_modifiers = false;
+ subexpr_type = GROUPING;
+ break;
+ case '=':
Advance(2);
- lookaround_type = RegExpLookaround::LOOKBEHIND;
+ parsing_modifiers = false;
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
subexpr_type = POSITIVE_LOOKAROUND;
break;
- } else if (Next() == '!') {
+ case '!':
Advance(2);
- lookaround_type = RegExpLookaround::LOOKBEHIND;
+ parsing_modifiers = false;
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
subexpr_type = NEGATIVE_LOOKAROUND;
break;
- }
- is_named_capture = true;
- has_named_captures_ = true;
- Advance();
- break;
- default:
- ReportError(RegExpError::kInvalidGroup);
- return nullptr;
+ case '<':
+ Advance();
+ parsing_modifiers = false;
+ if (Next() == '=') {
+ Advance(2);
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ } else if (Next() == '!') {
+ Advance(2);
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ }
+ is_named_capture = true;
+ has_named_captures_ = true;
+ Advance();
+ break;
+ default:
+ ReportError(RegExpError::kInvalidGroup);
+ return nullptr;
+ }
+ } while (parsing_modifiers);
+ }
+ if (modifiers_polarity == false) {
+ // We encountered a dash.
+ if (modifiers == 0) {
+ ReportError(RegExpError::kInvalidFlagGroup);
+ return nullptr;
}
}
if (subexpr_type == CAPTURE) {
@@ -1299,7 +1381,7 @@ RegExpParserState* RegExpParserImpl<CharT>::ParseOpenParenthesis(
// Store current state and begin new disjunction parsing.
return zone()->template New<RegExpParserState>(
state, subexpr_type, lookaround_type, captures_started_, capture_name,
- state->builder()->flags(), zone());
+ flags, zone());
}
// In order to know whether an escape is a backreference or not we have to scan
@@ -1511,7 +1593,10 @@ const ZoneVector<base::uc16>* RegExpParserImpl<CharT>::ParseCaptureGroupName() {
template <class CharT>
bool RegExpParserImpl<CharT>::CreateNamedCaptureAtIndex(
- const ZoneVector<base::uc16>* name, int index) {
+ const RegExpParserState* state, int index) {
+ const ZoneVector<base::uc16>* name = state->capture_name();
+ const std::pair<int, int> non_participating_capture_group_interval =
+ state->non_participating_capture_group_interval();
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
@@ -1521,21 +1606,33 @@ bool RegExpParserImpl<CharT>::CreateNamedCaptureAtIndex(
capture->set_name(name);
if (named_captures_ == nullptr) {
- named_captures_ =
- zone_->template New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(
- zone());
+ named_captures_ = zone_->template New<
+ ZoneMap<RegExpCapture*, ZoneList<int>*, RegExpCaptureNameLess>>(zone());
} else {
// Check for duplicates and bail if we find any.
-
const auto& named_capture_it = named_captures_->find(capture);
if (named_capture_it != named_captures_->end()) {
- ReportError(RegExpError::kDuplicateCaptureGroupName);
- return false;
+ if (v8_flags.js_regexp_duplicate_named_groups) {
+ ZoneList<int>* named_capture_indices = named_capture_it->second;
+ DCHECK_NOT_NULL(named_capture_indices);
+ DCHECK(!named_capture_indices->is_empty());
+ for (int named_index : *named_capture_indices) {
+ if (named_index < non_participating_capture_group_interval.first ||
+ named_index > non_participating_capture_group_interval.second) {
+ ReportError(RegExpError::kDuplicateCaptureGroupName);
+ return false;
+ }
+ }
+ } else {
+ ReportError(RegExpError::kDuplicateCaptureGroupName);
+ return false;
+ }
}
}
- named_captures_->emplace(capture);
-
+ auto entry = named_captures_->try_emplace(
+ capture, zone()->template New<ZoneList<int>>(1, zone()));
+ entry.first->second->Add(index, zone());
return true;
}
@@ -1558,7 +1655,7 @@ bool RegExpParserImpl<CharT>::ParseNamedBackReference(
builder->AddEmpty();
} else {
RegExpBackReference* atom =
- zone()->template New<RegExpBackReference>(builder->flags());
+ zone()->template New<RegExpBackReference>(zone());
atom->set_name(name);
builder->AddAtom(atom);
@@ -1595,16 +1692,17 @@ void RegExpParserImpl<CharT>::PatchNamedBackReferences() {
DCHECK_NULL(search_capture->name());
search_capture->set_name(ref->name());
- int index = -1;
const auto& capture_it = named_captures_->find(search_capture);
- if (capture_it != named_captures_->end()) {
- index = (*capture_it)->index();
- } else {
+ if (capture_it == named_captures_->end()) {
ReportError(RegExpError::kInvalidNamedCaptureReference);
return;
}
- ref->set_capture(GetCapture(index));
+ DCHECK_IMPLIES(!v8_flags.js_regexp_duplicate_named_groups,
+ capture_it->second->length() == 1);
+ for (int index : *capture_it->second) {
+ ref->add_capture(GetCapture(index), zone());
+ }
}
}
@@ -1627,13 +1725,22 @@ RegExpCapture* RegExpParserImpl<CharT>::GetCapture(int index) {
}
template <class CharT>
-ZoneVector<RegExpCapture*>* RegExpParserImpl<CharT>::GetNamedCaptures() const {
- if (named_captures_ == nullptr || named_captures_->empty()) {
+ZoneVector<RegExpCapture*>* RegExpParserImpl<CharT>::GetNamedCaptures() {
+ if (named_captures_ == nullptr) {
return nullptr;
}
+ DCHECK(!named_captures_->empty());
- return zone()->template New<ZoneVector<RegExpCapture*>>(
- named_captures_->begin(), named_captures_->end(), zone());
+ ZoneVector<RegExpCapture*>* flattened_named_captures =
+ zone()->template New<ZoneVector<RegExpCapture*>>(zone());
+ for (auto capture : *named_captures_) {
+ DCHECK_IMPLIES(!v8_flags.js_regexp_duplicate_named_groups,
+ capture.second->length() == 1);
+ for (int index : *capture.second) {
+ flattened_named_captures->push_back(GetCapture(index));
+ }
+ }
+ return flattened_named_captures;
}
template <class CharT>
@@ -1890,7 +1997,7 @@ bool LookupPropertyValueName(UProperty property,
ExtractStringsFromUnicodeSet(set, result_strings, flags, zone);
}
const bool needs_case_folding = IsUnicodeSets(flags) && IsIgnoreCase(flags);
- if (needs_case_folding) CharacterRange::UnicodeSimpleCloseOver(set);
+ if (needs_case_folding) set.closeOver(USET_SIMPLE_CASE_INSENSITIVE);
set.removeAllStrings();
if (negate) set.complement();
for (int i = 0; i < set.getRangeCount(); i++) {
@@ -2096,13 +2203,22 @@ bool RegExpParserImpl<CharT>::AddPropertyClassRange(
if (!IsSupportedBinaryProperty(property, unicode_sets())) return false;
if (!IsExactPropertyAlias(name, property)) return false;
// Negation of properties with strings is not allowed.
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
// See
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#sec-static-semantics-maycontainstrings
+ // https://tc39.es/ecma262/#sec-static-semantics-maycontainstrings
if (negate && IsBinaryPropertyOfStrings(property)) return false;
- return LookupPropertyValueName(property, negate ? "N" : "Y", false,
- add_to_ranges, add_to_strings, flags(),
- zone());
+ if (unicode_sets()) {
+ // In /v mode we can't simple lookup the "false" binary property values,
+ // as the spec requires us to perform case folding before calculating the
+ // complement.
+ // See https://tc39.es/ecma262/#sec-compiletocharset
+ // UnicodePropertyValueExpression :: LoneUnicodePropertyNameOrValue
+ return LookupPropertyValueName(property, "Y", negate, add_to_ranges,
+ add_to_strings, flags(), zone());
+ } else {
+ return LookupPropertyValueName(property, negate ? "N" : "Y", false,
+ add_to_ranges, add_to_strings, flags(),
+ zone());
+ }
} else {
// Both property name and value name are specified. Attempt to interpret
// the property name as enumerated property.
@@ -2325,8 +2441,7 @@ base::uc32 RegExpParserImpl<CharT>::ParseCharacterEscape(
return c;
}
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassRanges
+// https://tc39.es/ecma262/#prod-ClassRanges
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassRanges(
ZoneList<CharacterRange>* ranges, bool add_unicode_case_equivalents) {
@@ -2475,8 +2590,7 @@ void AddClassString(ZoneList<base::uc32>* normalized_string,
} // namespace
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassStringDisjunction
+// https://tc39.es/ecma262/#prod-ClassStringDisjunction
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassStringDisjunction(
ZoneList<CharacterRange>* ranges, CharacterClassStrings* strings) {
@@ -2526,8 +2640,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassStringDisjunction(
return nullptr;
}
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSetOperand
+// https://tc39.es/ecma262/#prod-ClassSetOperand
// Tree returned based on type_out:
// * kNestedClass: RegExpClassSetExpression
// * For all other types: RegExpClassSetOperand
@@ -2538,12 +2651,13 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassSetOperand(
zone()->template New<ZoneList<CharacterRange>>(1, zone());
CharacterClassStrings* strings =
zone()->template New<CharacterClassStrings>(zone());
- RegExpTree* tree =
- ParseClassSetOperand(builder, type_out, ranges, strings CHECK_FAILED);
+ base::uc32 character;
+ RegExpTree* tree = ParseClassSetOperand(builder, type_out, ranges, strings,
+ &character CHECK_FAILED);
DCHECK_IMPLIES(*type_out != ClassSetOperandType::kNestedClass,
tree == nullptr);
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kClassSetCharacter,
- ranges->length() == 1);
+ ranges->is_empty());
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kClassSetCharacter,
strings->empty());
DCHECK_IMPLIES(*type_out == ClassSetOperandType::kNestedClass,
@@ -2558,21 +2672,27 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassSetOperand(
// CharacterClassEscape includes \p{}, which can contain ranges, strings or
// both and \P{}, which could contain nothing (i.e. \P{Any}).
if (tree == nullptr) {
+ if (*type_out == ClassSetOperandType::kClassSetCharacter) {
+ AddMaybeSimpleCaseFoldedRange(ranges,
+ CharacterRange::Singleton(character));
+ }
tree = zone()->template New<RegExpClassSetOperand>(ranges, strings);
}
return tree;
}
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSetOperand
-// Based on |type_out| either a tree is returned or ranges/strings modified.
-// If a tree is returned, ranges/strings are not modified.
-// If |type_out| is kNestedClass, a tree of type RegExpClassSetExpression is
-// returned. For all other types, ranges is modified and nullptr is returned.
+// https://tc39.es/ecma262/#prod-ClassSetOperand
+// Based on |type_out| either a tree is returned or
+// |ranges|/|strings|/|character| modified. If a tree is returned,
+// ranges/strings are not modified. If |type_out| is kNestedClass, a tree of
+// type RegExpClassSetExpression is returned. If | type_out| is
+// kClassSetCharacter, |character| is set and nullptr returned. For all other
+// types, |ranges|/|strings|/|character| is modified and nullptr is returned.
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassSetOperand(
const RegExpBuilder* builder, ClassSetOperandType* type_out,
- ZoneList<CharacterRange>* ranges, CharacterClassStrings* strings) {
+ ZoneList<CharacterRange>* ranges, CharacterClassStrings* strings,
+ base::uc32* character) {
DCHECK(unicode_sets());
base::uc32 c = current();
if (c == '\\') {
@@ -2599,7 +2719,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassSetOperand(
*type_out = ClassSetOperandType::kClassSetCharacter;
c = ParseClassSetCharacter(CHECK_FAILED);
- ranges->Add(CharacterRange::Singleton(c), zone());
+ *character = c;
return nullptr;
}
@@ -2653,13 +2773,28 @@ bool MayContainStrings(ClassSetOperandType type, RegExpTree* operand) {
} // namespace
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassUnion
+template <class CharT>
+void RegExpParserImpl<CharT>::AddMaybeSimpleCaseFoldedRange(
+ ZoneList<CharacterRange>* ranges, CharacterRange new_range) {
+ DCHECK(unicode_sets());
+ if (ignore_case()) {
+ ZoneList<CharacterRange>* new_ranges =
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
+ new_ranges->Add(new_range, zone());
+ CharacterRange::AddUnicodeCaseEquivalents(new_ranges, zone());
+ ranges->AddAll(*new_ranges, zone());
+ } else {
+ ranges->Add(new_range, zone());
+ }
+ CharacterRange::Canonicalize(ranges);
+}
+
+// https://tc39.es/ecma262/#prod-ClassUnion
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand,
ClassSetOperandType first_operand_type, ZoneList<CharacterRange>* ranges,
- CharacterClassStrings* strings) {
+ CharacterClassStrings* strings, base::uc32 character) {
DCHECK(unicode_sets());
ZoneList<RegExpTree*>* operands =
zone()->template New<ZoneList<RegExpTree*>>(2, zone());
@@ -2673,7 +2808,6 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
operands->Add(first_operand, zone());
}
ClassSetOperandType last_type = first_operand_type;
- const bool needs_case_folding = ignore_case();
while (has_more() && current() != ']') {
if (current() == '-') {
// Mix of ClassSetRange and ClassSubtraction is not allowed.
@@ -2690,42 +2824,36 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
// represent a character range.
// In case one of them is not a ClassSetCharacter, it is a syntax error,
// as '-' can not be used unescaped within a class with /v.
- // TODO(v8:11935): Change permalink once proposal is in stage 4.
// See
- // https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSetRange
+ // https://tc39.es/ecma262/#prod-ClassSetRange
if (last_type != ClassSetOperandType::kClassSetCharacter) {
return ReportError(RegExpError::kInvalidCharacterClass);
}
- ParseClassSetOperand(builder, &last_type, ranges, strings CHECK_FAILED);
+ base::uc32 from = character;
+ ParseClassSetOperand(builder, &last_type, ranges, strings,
+ &character CHECK_FAILED);
if (last_type != ClassSetOperandType::kClassSetCharacter) {
return ReportError(RegExpError::kInvalidCharacterClass);
}
- // Remove the last two singleton characters added to ranges, and combine
- // them into a range.
- auto rhs_ranges = ranges->RemoveLast();
- auto lhs_ranges = ranges->RemoveLast();
- DCHECK(lhs_ranges.IsSingleton());
- DCHECK(rhs_ranges.IsSingleton());
- base::uc32 from = lhs_ranges.from();
- base::uc32 to = rhs_ranges.from();
- if (from > to) {
+ if (from > character) {
return ReportError(RegExpError::kOutOfOrderCharacterClass);
}
- ranges->Add(CharacterRange::Range(from, to), zone());
+ AddMaybeSimpleCaseFoldedRange(ranges,
+ CharacterRange::Range(from, character));
last_type = ClassSetOperandType::kClassSetRange;
} else {
DCHECK_NE(current(), '-');
- RegExpTree* operand = ParseClassSetOperand(builder, &last_type, ranges,
- strings CHECK_FAILED);
+ if (last_type == ClassSetOperandType::kClassSetCharacter) {
+ AddMaybeSimpleCaseFoldedRange(ranges,
+ CharacterRange::Singleton(character));
+ }
+ RegExpTree* operand = ParseClassSetOperand(
+ builder, &last_type, ranges, strings, &character CHECK_FAILED);
if (operand != nullptr) {
may_contain_strings |= MayContainStrings(last_type, operand);
// Add the range we started building as operand and reset the current
// range.
if (!ranges->is_empty() || !strings->empty()) {
- if (needs_case_folding) {
- CharacterRange::Canonicalize(ranges);
- CharacterRange::AddUnicodeCaseEquivalents(ranges, zone());
- }
may_contain_strings |= !strings->empty();
operands->Add(
zone()->template New<RegExpClassSetOperand>(ranges, strings),
@@ -2742,12 +2870,12 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
return ReportError(RegExpError::kUnterminatedCharacterClass);
}
+ if (last_type == ClassSetOperandType::kClassSetCharacter) {
+ AddMaybeSimpleCaseFoldedRange(ranges, CharacterRange::Singleton(character));
+ }
+
// Add the range we started building as operand.
if (!ranges->is_empty() || !strings->empty()) {
- if (needs_case_folding) {
- CharacterRange::Canonicalize(ranges);
- CharacterRange::AddUnicodeCaseEquivalents(ranges, zone());
- }
may_contain_strings |= !strings->empty();
operands->Add(zone()->template New<RegExpClassSetOperand>(ranges, strings),
zone());
@@ -2773,8 +2901,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassUnion(
may_contain_strings, operands);
}
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassIntersection
+// https://tc39.es/ecma262/#prod-ClassIntersection
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassIntersection(
const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand,
@@ -2815,8 +2942,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseClassIntersection(
may_contain_strings, operands);
}
-// TODO(v8:11935): Change permalink once proposal is in stage 4.
-// https://arai-a.github.io/ecma262-compare/snapshot.html?pr=2418#prod-ClassSubtraction
+// https://tc39.es/ecma262/#prod-ClassSubtraction
template <class CharT>
RegExpTree* RegExpParserImpl<CharT>::ParseClassSubtraction(
const RegExpBuilder* builder, bool is_negated, RegExpTree* first_operand,
@@ -2891,12 +3017,16 @@ RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
ClassSetOperandType operand_type;
CharacterClassStrings* strings =
zone()->template New<CharacterClassStrings>(zone());
- RegExpTree* operand = ParseClassSetOperand(builder, &operand_type, ranges,
- strings CHECK_FAILED);
+ base::uc32 character;
+ RegExpTree* operand = ParseClassSetOperand(
+ builder, &operand_type, ranges, strings, &character CHECK_FAILED);
switch (current()) {
case '-':
if (Next() == '-') {
if (operand == nullptr) {
+ if (operand_type == ClassSetOperandType::kClassSetCharacter) {
+ ranges->Add(CharacterRange::Singleton(character), zone());
+ }
operand =
zone()->template New<RegExpClassSetOperand>(ranges, strings);
}
@@ -2908,6 +3038,9 @@ RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
case '&':
if (Next() == '&') {
if (operand == nullptr) {
+ if (operand_type == ClassSetOperandType::kClassSetCharacter) {
+ ranges->Add(CharacterRange::Singleton(character), zone());
+ }
operand =
zone()->template New<RegExpClassSetOperand>(ranges, strings);
}
@@ -2916,7 +3049,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
}
}
return ParseClassUnion(builder, is_negated, operand, operand_type, ranges,
- strings);
+ strings, character);
}
}
@@ -3047,7 +3180,7 @@ bool RegExpBuilder::AddQuantifierToAtom(
RegExpTree* atom = text_builder().PopLastAtom();
if (atom != nullptr) {
FlushText();
- } else if (terms_.size() > 0) {
+ } else if (!terms_.empty()) {
atom = terms_.back();
terms_.pop_back();
if (atom->IsLookaround()) {
diff --git a/js/src/irregexp/imported/regexp.h b/js/src/irregexp/imported/regexp.h
index 50269a4b71..5dc9070ed9 100644
--- a/js/src/irregexp/imported/regexp.h
+++ b/js/src/irregexp/imported/regexp.h
@@ -87,8 +87,8 @@ class RegExp final : public AllStatic {
RegExpFlags flags, uint32_t backtrack_limit);
// Ensures that a regexp is fully compiled and ready to be executed on a
- // subject string. Returns true on success. Return false on failure, and
- // then an exception will be pending.
+ // subject string. Returns true on success. Throw and return false on
+ // failure.
V8_WARN_UNUSED_RESULT static bool EnsureFullyCompiled(Isolate* isolate,
Handle<JSRegExp> re,
Handle<String> subject);
@@ -211,14 +211,16 @@ class RegExpResultsCache final : public AllStatic {
// Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
// On success, the returned result is guaranteed to be a COW-array.
- static Object Lookup(Heap* heap, String key_string, Object key_pattern,
- FixedArray* last_match_out, ResultsCacheType type);
+ static Tagged<Object> Lookup(Heap* heap, Tagged<String> key_string,
+ Tagged<Object> key_pattern,
+ Tagged<FixedArray>* last_match_out,
+ ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
static void Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern, Handle<FixedArray> value_array,
Handle<FixedArray> last_match_cache, ResultsCacheType type);
- static void Clear(FixedArray cache);
+ static void Clear(Tagged<FixedArray> cache);
static constexpr int kRegExpResultsCacheSize = 0x100;
diff --git a/js/src/irregexp/imported/special-case.cc b/js/src/irregexp/imported/special-case.cc
index f5a9928b3a..d40ada6bb9 100644
--- a/js/src/irregexp/imported/special-case.cc
+++ b/js/src/irregexp/imported/special-case.cc
@@ -82,29 +82,6 @@ const icu::UnicodeSet& RegExpCaseFolding::SpecialAddSet() {
return set.Pointer()->set;
}
-icu::UnicodeSet BuildUnicodeNonSimpleCloseOverSet() {
- icu::UnicodeSet set;
- set.add(0x390);
- set.add(0x3b0);
- set.add(0x1fd3);
- set.add(0x1fe3);
- set.add(0xfb05, 0xfb06);
- set.freeze();
- return set;
-}
-
-struct UnicodeNonSimpleCloseOverSetData {
- UnicodeNonSimpleCloseOverSetData() : set(BuildUnicodeNonSimpleCloseOverSet()) {}
- const icu::UnicodeSet set;
-};
-
-//static
-const icu::UnicodeSet& RegExpCaseFolding::UnicodeNonSimpleCloseOverSet() {
- static base::LazyInstance<UnicodeNonSimpleCloseOverSetData>::type set =
- LAZY_INSTANCE_INITIALIZER;
- return set.Pointer()->set;
-}
-
} // namespace internal
} // namespace v8
diff --git a/js/src/irregexp/imported/special-case.h b/js/src/irregexp/imported/special-case.h
index ea511af5a4..050d72a064 100644
--- a/js/src/irregexp/imported/special-case.h
+++ b/js/src/irregexp/imported/special-case.h
@@ -70,21 +70,11 @@ namespace internal {
// another character. Characters that match no other characters in
// their equivalence class are added to IgnoreSet. Characters that
// match at least one other character are added to SpecialAddSet.
-//
-// For unicode ignoreCase ("iu" and "iv"),
-// UnicodeSet::closeOver(USET_CASE_INSENSITIVE) adds all characters that are in
-// the same equivalence class. This includes characaters that are in the same
-// equivalence class using full case folding. According to the spec, only
-// simple case folding shall be considered. We therefore create
-// UnicodeNonSimpleCloseOverSet containing all characters for which
-// UnicodeSet::closeOver adds characters that are not simple case folds. This
-// set should be used similar to IgnoreSet described above.
class RegExpCaseFolding final : public AllStatic {
public:
static const icu::UnicodeSet& IgnoreSet();
static const icu::UnicodeSet& SpecialAddSet();
- static const icu::UnicodeSet& UnicodeNonSimpleCloseOverSet();
// This implements ECMAScript 2020 21.2.2.8.2 (Runtime Semantics:
// Canonicalize) step 3, which is used to determine whether
diff --git a/js/src/irregexp/moz.build b/js/src/irregexp/moz.build
index ff030ad4bd..2c363ad349 100644
--- a/js/src/irregexp/moz.build
+++ b/js/src/irregexp/moz.build
@@ -14,9 +14,13 @@ include("../js-cxxflags.mozbuild")
CXXFLAGS += ["-Wno-error=type-limits", "-Wno-error=return-type"]
-# Suppress spurious warnings in third-party code. See bug 1810584.
+# Suppress spurious warnings in third-party code.
+# See bug 1810584 and bug 1879225.
if CONFIG["CC_TYPE"] == "gcc":
- CXXFLAGS += ["-Wno-error=nonnull"]
+ CXXFLAGS += ["-Wno-error=nonnull", "-Wno-narrowing"]
+if CONFIG["CC_TYPE"] in ("clang", "clang-cl"):
+ CXXFLAGS += ["-Wno-c++11-narrowing"]
+
UNIFIED_SOURCES += [
"imported/regexp-bytecode-generator.cc",
diff --git a/js/src/irregexp/moz.yaml b/js/src/irregexp/moz.yaml
index e230a89cfd..ca44833c24 100644
--- a/js/src/irregexp/moz.yaml
+++ b/js/src/irregexp/moz.yaml
@@ -9,8 +9,8 @@ origin:
description: A fast regular expression engine from V8
url: https://v8.dev
- release: 30a887aeb92153885619d8bb9fa57cda7adf9276 (Thu Jul 06 11:42:30 2023).
- revision: 30a887aeb92153885619d8bb9fa57cda7adf9276
+ release: e50ab13bbfaaf72717fd73d9a01434e4c3c1a0a8 (Thu Feb 29 03:38:59 2024).
+ revision: e50ab13bbfaaf72717fd73d9a01434e4c3c1a0a8
license: BSD-3-Clause
license-file: LICENSE.v8
diff --git a/js/src/jit-test/etc/wasm/generate-spectests/Cargo.lock b/js/src/jit-test/etc/wasm/generate-spectests/Cargo.lock
index 129b13b7f6..756fc8e6ac 100644
--- a/js/src/jit-test/etc/wasm/generate-spectests/Cargo.lock
+++ b/js/src/jit-test/etc/wasm/generate-spectests/Cargo.lock
@@ -29,6 +29,12 @@ dependencies = [
]
[[package]]
+name = "bumpalo"
+version = "3.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
+
+[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -193,7 +199,9 @@ checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "wasm-encoder"
-version = "0.38.1"
+version = "0.41.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "972f97a5d8318f908dded23594188a90bcd09365986b1163e66d70170e5287ae"
dependencies = [
"leb128",
]
@@ -214,8 +222,11 @@ dependencies = [
[[package]]
name = "wast"
-version = "69.0.1"
+version = "71.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "647c3ac4354da32688537e8fc4d2fe6c578df51896298cb64727d98088a1fd26"
dependencies = [
+ "bumpalo",
"leb128",
"memchr",
"unicode-width",
diff --git a/js/src/jit-test/etc/wasm/generate-spectests/config.toml b/js/src/jit-test/etc/wasm/generate-spectests/config.toml
index 48e5be6e0d..d058a79986 100644
--- a/js/src/jit-test/etc/wasm/generate-spectests/config.toml
+++ b/js/src/jit-test/etc/wasm/generate-spectests/config.toml
@@ -1,6 +1,6 @@
# Standard 'directives.txt' prologues for jit-tests
harness_directive = "|jit-test| skip-if: true"
-directive = "|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js"
+directive = "|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js"
# Failing tests across all testsuites
excluded_tests = [
@@ -43,7 +43,7 @@ url = "https://github.com/WebAssembly/exception-handling"
branch = "main"
parent = "spec"
# Skip in jit-test when it's not enabled
-directive = "; --wasm-exceptions; --wasm-exnref; skip-if: !wasmExceptionsEnabled()"
+directive = "; --setpref=wasm_exnref=true; skip-if: !wasmExnRefEnabled()"
excluded_tests = [
# harness doesn't support exnref, because JS-API globals can't use it
"ref_null.wast.js"
@@ -53,7 +53,7 @@ excluded_tests = [
name = "memory64"
url = "https://github.com/mozilla-spidermonkey/memory64"
branch = "test-cases"
-directive = "; skip-if: !wasmMemory64Enabled()"
+directive = "; --setpref=wasm_memory64=true; skip-if: !wasmMemory64Enabled()"
excluded_tests = []
[[repos]]
@@ -61,7 +61,7 @@ name = "function-references"
url = "https://github.com/WebAssembly/function-references"
branch = "main"
parent = "spec"
-directive = "; --wasm-function-references; skip-if: !wasmFunctionReferencesEnabled()"
+directive = "; --setpref=wasm_gc=true; skip-if: !wasmGcEnabled()"
excluded_tests = [
# duplicate tail calls tests
"return_call.wast",
@@ -87,7 +87,7 @@ name = "relaxed-simd"
url = "https://github.com/WebAssembly/relaxed-simd"
branch = "main"
parent = "spec"
-directive = "; --wasm-relaxed-simd; skip-if: !wasmRelaxedSimdEnabled()"
+directive = "; --setpref=wasm_relaxed_simd=true; skip-if: !wasmRelaxedSimdEnabled()"
excluded_tests = []
[[repos]]
@@ -95,7 +95,7 @@ name = "extended-const"
url = "https://github.com/WebAssembly/extended-const"
branch = "main"
parent = "spec"
-directive = "; --wasm-extended-const; --no-wasm-gc; skip-if: !wasmExtendedConstEnabled()"
+directive = "; --setpref=wasm_gc=false"
excluded_tests = []
[[repos]]
@@ -103,7 +103,7 @@ name = "tail-call"
url = "https://github.com/WebAssembly/tail-call"
branch = "main"
parent = "spec"
-directive = "; --wasm-tail-calls; skip-if: !wasmTailCallsEnabled()"
+directive = "; --setpref=wasm_tail_calls=true; skip-if: !wasmTailCallsEnabled()"
excluded_tests = []
[[repos]]
@@ -111,7 +111,7 @@ name = "multi-memory"
url = "https://github.com/WebAssembly/multi-memory"
branch = "main"
parent = "spec"
-directive = "; --wasm-multi-memory; skip-if: !wasmMultiMemoryEnabled()"
+directive = "; --setpref=wasm_multi_memory=true; skip-if: !wasmMultiMemoryEnabled()"
excluded_tests = [
# Empty test fails parsing
"memory_copy1.wast",
@@ -122,7 +122,7 @@ name = "gc"
url = "https://github.com/WebAssembly/gc"
branch = "main"
parent = "function-references"
-directive = "; --wasm-gc; skip-if: !wasmGcEnabled()"
+directive = "; --setpref=wasm_gc=true; skip-if: !wasmGcEnabled()"
excluded_tests = [
# tail call tests that snuck in
"return_call.wast",
diff --git a/js/src/jit-test/etc/wasm/generate-spectests/wast2js/Cargo.toml b/js/src/jit-test/etc/wasm/generate-spectests/wast2js/Cargo.toml
index b55ae458df..1c7c3174e9 100644
--- a/js/src/jit-test/etc/wasm/generate-spectests/wast2js/Cargo.toml
+++ b/js/src/jit-test/etc/wasm/generate-spectests/wast2js/Cargo.toml
@@ -8,4 +8,4 @@ edition = "2018"
[dependencies]
anyhow = "1.0.19"
-wast = { path = "../../../../../../../../wasm-tools/crates/wast" }
+wast = "71.0.1"
diff --git a/js/src/jit-test/etc/wasm/spec-tests.patch b/js/src/jit-test/etc/wasm/spec-tests.patch
index a3c0e15676..cdd0d1aebc 100644
--- a/js/src/jit-test/etc/wasm/spec-tests.patch
+++ b/js/src/jit-test/etc/wasm/spec-tests.patch
@@ -78,6 +78,40 @@ diff --git a/js/src/jit-test/tests/wasm/spec/memory64/memory_trap64.wast.js b/js
+ value("i64", 7523094288207667809n),
+ ]);
+}
+diff --git a/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js b/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js
+--- a/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js
++++ b/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js
+@@ -27,17 +27,19 @@ let $2 = instantiate(`(module (memory i64 1 256))`);
+ // ./test/core/memory64.wast:6
+ let $3 = instantiate(`(module (memory i64 0 65536))`);
+
+-// ./test/core/memory64.wast:8
+-assert_invalid(
+- () => instantiate(`(module (memory i64 0) (memory i64 0))`),
+- `multiple memories`,
+-);
+-
+-// ./test/core/memory64.wast:9
+-assert_invalid(
+- () => instantiate(`(module (memory (import "spectest" "memory") i64 0) (memory i64 0))`),
+- `multiple memories`,
+-);
++if (!wasmMultiMemoryEnabled()) {
++ // ./test/core/memory64.wast:8
++ assert_invalid(
++ () => instantiate(`(module (memory i64 0) (memory i64 0))`),
++ `multiple memories`,
++ );
++
++ // ./test/core/memory64.wast:9
++ assert_invalid(
++ () => instantiate(`(module (memory (import "spectest" "memory") i64 0) (memory i64 0))`),
++ `multiple memories`,
++ );
++}
+
+ // ./test/core/memory64.wast:11
+ let $4 = instantiate(`(module (memory i64 (data)) (func (export "memsize") (result i64) (memory.size)))`);
diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/simd_load.wast.js b/js/src/jit-test/tests/wasm/spec/multi-memory/simd_load.wast.js
--- a/js/src/jit-test/tests/wasm/spec/multi-memory/simd_load.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/multi-memory/simd_load.wast.js
@@ -105,6 +139,94 @@ diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/simd_store.wast.js b/j
/* Copyright 2021 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
+diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js b/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js
+--- a/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js
++++ b/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js
+@@ -19,6 +19,15 @@ if (!wasmIsSupported()) {
+ quit();
+ }
+
++function partialOobWriteMayWritePartialData() {
++ let arm_native = getBuildConfiguration("arm") && !getBuildConfiguration("arm-simulator");
++ let arm64_native = getBuildConfiguration("arm64") && !getBuildConfiguration("arm64-simulator");
++ return arm_native || arm64_native;
++}
++
++let native_arm = getBuildConfiguration("arm") && !getBuildConfiguration("arm-simulator");
++let native_arm64 = getBuildConfiguration("arm64") && !getBuildConfiguration("arm64-simulator");
++
+ function bytes(type, bytes) {
+ var typedBuffer = new Uint8Array(bytes);
+ return wasmGlobalFromArrayBuffer(type, typedBuffer.buffer);
+diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js b/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js
+--- a/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js
++++ b/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js
+@@ -562,11 +562,15 @@ assert_trap(() => invoke($0, `i64.load32_u`, [-3]), `out of bounds memory access
+ // ./test/core/multi-memory/memory_trap1.wast:234
+ assert_trap(() => invoke($0, `i64.load32_u`, [-4]), `out of bounds memory access`);
+
+-// ./test/core/multi-memory/memory_trap1.wast:237
+-assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 7523094288207667809n)]);
++// Bug 1842293 - do not observe the partial store caused by bug 1666747 on
++// some native platforms.
++if (!partialOobWriteMayWritePartialData()) {
++ // ./test/core/multi-memory/memory_trap1.wast:237
++ assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 7523094288207667809n)]);
+
+-// ./test/core/multi-memory/memory_trap1.wast:238
+-assert_return(() => invoke($0, `i64.load`, [0]), [value("i64", 7523094288207667809n)]);
++ // ./test/core/multi-memory/memory_trap1.wast:238
++ assert_return(() => invoke($0, `i64.load`, [0]), [value("i64", 7523094288207667809n)]);
++}
+
+ // ./test/core/multi-memory/memory_trap1.wast:242
+ assert_return(() => invoke($0, `i64.store`, [65528, 0n]), []);
+@@ -574,14 +578,18 @@ assert_return(() => invoke($0, `i64.store`, [65528, 0n]), []);
+ // ./test/core/multi-memory/memory_trap1.wast:243
+ assert_trap(() => invoke($0, `i32.store`, [65533, 305419896]), `out of bounds memory access`);
+
+-// ./test/core/multi-memory/memory_trap1.wast:244
+-assert_return(() => invoke($0, `i32.load`, [65532]), [value("i32", 0)]);
++if (!partialOobWriteMayWritePartialData()) {
++ // ./test/core/multi-memory/memory_trap1.wast:244
++ assert_return(() => invoke($0, `i32.load`, [65532]), [value("i32", 0)]);
++}
+
+ // ./test/core/multi-memory/memory_trap1.wast:245
+ assert_trap(() => invoke($0, `i64.store`, [65529, 1311768467294899695n]), `out of bounds memory access`);
+
+-// ./test/core/multi-memory/memory_trap1.wast:246
+-assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 0n)]);
++if (!partialOobWriteMayWritePartialData()) {
++ // ./test/core/multi-memory/memory_trap1.wast:246
++ assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 0n)]);
++}
+
+ // ./test/core/multi-memory/memory_trap1.wast:247
+ assert_trap(
+@@ -589,8 +597,10 @@ assert_trap(
+ `out of bounds memory access`,
+ );
+
+-// ./test/core/multi-memory/memory_trap1.wast:248
+-assert_return(() => invoke($0, `f32.load`, [65532]), [value("f32", 0)]);
++if (!partialOobWriteMayWritePartialData()) {
++ // ./test/core/multi-memory/memory_trap1.wast:248
++ assert_return(() => invoke($0, `f32.load`, [65532]), [value("f32", 0)]);
++}
+
+ // ./test/core/multi-memory/memory_trap1.wast:249
+ assert_trap(
+@@ -598,5 +608,7 @@ assert_trap(
+ `out of bounds memory access`,
+ );
+
+-// ./test/core/multi-memory/memory_trap1.wast:250
+-assert_return(() => invoke($0, `f64.load`, [65528]), [value("f64", 0)]);
++if (!partialOobWriteMayWritePartialData()) {
++ // ./test/core/multi-memory/memory_trap1.wast:250
++ assert_return(() => invoke($0, `f64.load`, [65528]), [value("f64", 0)]);
++}
diff --git a/js/src/jit-test/tests/wasm/spec/relaxed-simd/i32x4_relaxed_trunc.wast.js b/js/src/jit-test/tests/wasm/spec/relaxed-simd/i32x4_relaxed_trunc.wast.js
--- a/js/src/jit-test/tests/wasm/spec/relaxed-simd/i32x4_relaxed_trunc.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/relaxed-simd/i32x4_relaxed_trunc.wast.js
@@ -132,6 +254,34 @@ diff --git a/js/src/jit-test/tests/wasm/spec/relaxed-simd/i32x4_relaxed_trunc.wa
),
],
);
+diff --git a/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js b/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js
+--- a/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js
++++ b/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js
+@@ -33,14 +33,16 @@ let $4 = instantiate(`(module (memory 1 256))`);
+ // ./test/core/memory.wast:8
+ let $5 = instantiate(`(module (memory 0 65536))`);
+
+-// ./test/core/memory.wast:10
+-assert_invalid(() => instantiate(`(module (memory 0) (memory 0))`), `multiple memories`);
+-
+-// ./test/core/memory.wast:11
+-assert_invalid(
+- () => instantiate(`(module (memory (import "spectest" "memory") 0) (memory 0))`),
+- `multiple memories`,
+-);
++if (!wasmMultiMemoryEnabled()) {
++ // ./test/core/memory.wast:10
++ assert_invalid(() => instantiate(`(module (memory 0) (memory 0))`), `multiple memories`);
++
++ // ./test/core/memory.wast:11
++ assert_invalid(
++ () => instantiate(`(module (memory (import "spectest" "memory") 0) (memory 0))`),
++ `multiple memories`,
++ );
++}
+
+ // ./test/core/memory.wast:13
+ let $6 = instantiate(`(module (memory (data)) (func (export "memsize") (result i32) (memory.size)))`);
diff --git a/js/src/jit-test/tests/wasm/spec/spec/simd_address.wast.js b/js/src/jit-test/tests/wasm/spec/spec/simd_address.wast.js
--- a/js/src/jit-test/tests/wasm/spec/spec/simd_address.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/spec/simd_address.wast.js
@@ -632,7 +782,7 @@ index 3ea51a8cb0ff3..71739f4a1c8e4 100644
--- a/js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js
@@ -1,3 +1,4 @@
-+// |jit-test| --wasm-tail-calls; skip-if: !wasmTailCallsEnabled()
++// |jit-test| --setpref=wasm_tail_calls=true; skip-if: !wasmTailCallsEnabled()
/* Copyright 2021 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -640,7 +790,7 @@ diff --git a/js/src/jit-test/tests/wasm/spec/spec/global.wast.js b/js/src/jit-te
--- a/js/src/jit-test/tests/wasm/spec/spec/global.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/spec/global.wast.js
@@ -1,3 +1,4 @@
-+// |jit-test| --no-wasm-gc
++// |jit-test| --setpref=wasm_gc=false
/* Copyright 2021 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-gen.js b/js/src/jit-test/lib/gen/wasm-gc-limits-gen.js
new file mode 100644
index 0000000000..01fd527cc3
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-gen.js
@@ -0,0 +1,71 @@
+// Generates large .wasm files for use in ../limits.js.
+// Make sure you are running this script from a release build or you will be sad.
+
+loadRelativeToScript("../wasm-binary.js");
+
+function moduleNRecGroupNTypes(numRecs, numTypes) {
+ let types = [];
+ for (let i = 0; i < numTypes; i++) {
+ types.push({ kind: FuncCode, args: [], ret: [] });
+ }
+ let recs = [];
+ for (let i = 0; i < numRecs; i++) {
+ recs.push(recGroup(types));
+ }
+ return new Uint8Array(compressLZ4(new Uint8Array(moduleWithSections([typeSection(recs)])).buffer));
+}
+
+os.file.writeTypedArrayToFile("wasm-gc-limits-r1M-t1.wasm", moduleNRecGroupNTypes(1_000_000, 1));
+os.file.writeTypedArrayToFile("wasm-gc-limits-r1M1-t1.wasm", moduleNRecGroupNTypes(1_000_001, 1));
+os.file.writeTypedArrayToFile("wasm-gc-limits-r1-t1M.wasm", moduleNRecGroupNTypes(1, 1_000_000));
+os.file.writeTypedArrayToFile("wasm-gc-limits-r1-t1M1.wasm", moduleNRecGroupNTypes(1, 1_000_001));
+os.file.writeTypedArrayToFile("wasm-gc-limits-r2-t500K.wasm", moduleNRecGroupNTypes(2, 500_000));
+os.file.writeTypedArrayToFile("wasm-gc-limits-r2-t500K1.wasm", moduleNRecGroupNTypes(2, 500_001));
+
+function moduleLargeStruct(size) {
+ let structInitializer = [];
+ for (let i = 0; i < size; i++) {
+ structInitializer.push(I64ConstCode);
+ structInitializer.push(...varU32(0));
+ }
+ return new Uint8Array(compressLZ4(new Uint8Array(moduleWithSections([
+ typeSection([
+ {
+ kind: StructCode,
+ fields: Array(size).fill(I64Code)
+ },
+ {
+ kind: FuncCode,
+ args: [],
+ ret: [AnyRefCode]
+ }
+ ]),
+ declSection([1, 1]),
+ exportSection([
+ {name: "makeLargeStructDefault", funcIndex: 0},
+ {name: "makeLargeStruct", funcIndex: 1}
+ ]),
+ bodySection([
+ funcBody({
+ locals: [],
+ body: [
+ GcPrefix,
+ StructNewDefault,
+ ...varU32(0)
+ ],
+ }),
+ funcBody({
+ locals: [],
+ body: [
+ ...structInitializer,
+ GcPrefix,
+ StructNew,
+ ...varU32(0)
+ ],
+ }),
+ ]),
+ ])).buffer));
+}
+
+os.file.writeTypedArrayToFile("wasm-gc-limits-s10K.wasm", moduleLargeStruct(10_000));
+os.file.writeTypedArrayToFile("wasm-gc-limits-s10K1.wasm", moduleLargeStruct(10_001));
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M.wasm
new file mode 100644
index 0000000000..f92d36f8e3
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M1.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M1.wasm
new file mode 100644
index 0000000000..190b522472
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-r1-t1M1.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-r1M-t1.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-r1M-t1.wasm
new file mode 100644
index 0000000000..5424e907ff
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-r1M-t1.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-r1M1-t1.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-r1M1-t1.wasm
new file mode 100644
index 0000000000..a96be86ef3
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-r1M1-t1.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K.wasm
new file mode 100644
index 0000000000..b9eb6ea929
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K1.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K1.wasm
new file mode 100644
index 0000000000..e44b6461fe
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-r2-t500K1.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-s10K.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-s10K.wasm
new file mode 100644
index 0000000000..5b209ab5e4
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-s10K.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/gen/wasm-gc-limits-s10K1.wasm b/js/src/jit-test/lib/gen/wasm-gc-limits-s10K1.wasm
new file mode 100644
index 0000000000..a2062abc52
--- /dev/null
+++ b/js/src/jit-test/lib/gen/wasm-gc-limits-s10K1.wasm
Binary files differ
diff --git a/js/src/jit-test/lib/prologue.js b/js/src/jit-test/lib/prologue.js
index 4e24b19836..a5c21a5a08 100644
--- a/js/src/jit-test/lib/prologue.js
+++ b/js/src/jit-test/lib/prologue.js
@@ -7,17 +7,32 @@ var appendToActual = function(s) {
actual += s + ',';
}
-// Add dummy versions of missing functions and record whether they
-// were originally present.
+// Add dummy versions of missing functions and record whether they were
+// originally present.
+//
+// This only affects the main global. Any globals created by the test will
+// lack the function.
let hasFunction = {};
-for (const name of ["gczeal",
- "schedulegc",
- "gcslice",
- "selectforgc",
- "verifyprebarriers",
- "verifypostbarriers",
- "gcPreserveCode",
- "setMarkStackLimit"]) {
+for (const name of [
+ // Functions present if JS_GC_ZEAL defined:
+ "gczeal",
+ "unsetgczeal",
+ "schedulegc",
+ "selectforgc",
+ "verifyprebarriers",
+ "verifypostbarriers",
+ "currentgc",
+ "deterministicgc",
+ "dumpGCArenaInfo",
+ "setMarkStackLimit",
+ // Functions present if DEBUG or JS_OOM_BREAKPOINT defined:
+ "oomThreadTypes",
+ "oomAfterAllocations",
+ "oomAtAllocation",
+ "resetOOMFailure",
+ "oomTest",
+ "stackTest",
+ "interruptTest"]) {
const present = name in this;
if (!present) {
this[name] = function() {};
diff --git a/js/src/jit-test/lib/wasm-binary.js b/js/src/jit-test/lib/wasm-binary.js
index c55c8185f5..fdfd2d5732 100644
--- a/js/src/jit-test/lib/wasm-binary.js
+++ b/js/src/jit-test/lib/wasm-binary.js
@@ -44,6 +44,7 @@ const F64Code = 0x7c;
const V128Code = 0x7b;
const AnyFuncCode = 0x70;
const ExternRefCode = 0x6f;
+const AnyRefCode = 0x6e;
const EqRefCode = 0x6d;
const OptRefCode = 0x63; // (ref null $t), needs heap type immediate
const RefCode = 0x64; // (ref $t), needs heap type immediate
@@ -52,6 +53,9 @@ const StructCode = 0x5f;
const ArrayCode = 0x5e;
const VoidCode = 0x40;
const BadType = 0x79; // reserved for testing
+const RecGroupCode = 0x4e;
+const SubFinalTypeCode = 0x4f;
+const SubNoFinalTypeCode = 0x50;
// Opcodes
const UnreachableCode = 0x00
@@ -159,15 +163,17 @@ const MozPrefix = 0xff;
const definedOpcodes =
[0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
- ...(wasmExceptionsEnabled() ? [0x06, 0x07, 0x08, 0x09] : []),
+ 0x06, 0x07, 0x08, 0x09,
+ ...(wasmExnRefEnabled() ? [0x0a] : []),
0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11,
...(wasmTailCallsEnabled() ? [0x12, 0x13] : []),
- ...(wasmFunctionReferencesEnabled() ? [0x14] : []),
+ ...(wasmGcEnabled() ? [0x14] : []),
...(wasmTailCallsEnabled() &&
- wasmFunctionReferencesEnabled() ? [0x15] : []),
- ...(wasmExceptionsEnabled() ? [0x18, 0x19] : []),
+ wasmGcEnabled() ? [0x15] : []),
+ 0x18, 0x19,
0x1a, 0x1b, 0x1c,
+ ...(wasmExnRefEnabled() ? [0x1f] : []),
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
@@ -218,6 +224,7 @@ const ElemDropCode = 0x0d; // Pending
const TableCopyCode = 0x0e; // Pending
const StructNew = 0x00; // UNOFFICIAL
+const StructNewDefault = 0x01; // UNOFFICIAL
const StructGet = 0x03; // UNOFFICIAL
const StructSet = 0x06; // UNOFFICIAL
@@ -232,8 +239,9 @@ const TagCode = 0x04;
const HasMaximumFlag = 0x1;
function toU8(array) {
- for (let b of array)
- assertEq(b < 256, true);
+ for (const [i, b] of array.entries()) {
+ assertEq(b < 256, true, `expected byte at index ${i} but got ${b}`);
+ }
return Uint8Array.from(array);
}
@@ -284,12 +292,14 @@ function encodedString(name, len) {
return varU32(len === undefined ? nameBytes.length : len).concat(nameBytes);
}
-function moduleWithSections(sectionArray) {
- var bytes = moduleHeaderThen();
- for (let section of sectionArray) {
+function moduleWithSections(sections) {
+ const bytes = moduleHeaderThen();
+ for (const section of sections) {
bytes.push(section.name);
bytes.push(...varU32(section.body.length));
- bytes.push(...section.body);
+ for (let byte of section.body) {
+ bytes.push(byte);
+ }
}
return toU8(bytes);
}
@@ -385,13 +395,17 @@ function typeSection(types) {
body.push(...varU32(types.length)); // technically a count of recursion groups
for (const type of types) {
if (type.isRecursionGroup) {
- body.push(0x4f);
+ body.push(RecGroupCode);
body.push(...varU32(type.types.length));
for (const t of type.types) {
- body.push(..._encodeType(t));
+ for (const byte of _encodeType(t)) {
+ body.push(byte);
+ }
}
} else {
- body.push(..._encodeType(type));
+ for (const byte of _encodeType(type)) {
+ body.push(byte);
+ }
}
}
return { name: typeId, body };
@@ -439,12 +453,12 @@ function _encodeType(typeObj) {
// Types are now final by default.
const final = typeObj.final ?? true;
if (typeObj.sub !== undefined) {
- typeBytes.push(final ? 0x4e : 0x50);
+ typeBytes.push(final ? SubFinalTypeCode : SubNoFinalTypeCode);
typeBytes.push(...varU32(1), ...varU32(typeObj.sub));
}
else if (final == false) {
// This type is extensible even if no supertype is defined.
- typeBytes.push(0x50);
+ typeBytes.push(SubNoFinalTypeCode);
typeBytes.push(0x00);
}
typeBytes.push(typeObj.kind);
@@ -514,7 +528,9 @@ function funcBody(func, withEndCode=true) {
var body = varU32(func.locals.length);
for (let local of func.locals)
body.push(...varU32(local));
- body = body.concat(...func.body);
+ for (let byte of func.body) {
+ body.push(byte);
+ }
if (withEndCode)
body.push(EndCode);
body.splice(0, 0, ...varU32(body.length));
diff --git a/js/src/jit-test/lib/wasm.js b/js/src/jit-test/lib/wasm.js
index 2b3374ebbe..a5721913e9 100644
--- a/js/src/jit-test/lib/wasm.js
+++ b/js/src/jit-test/lib/wasm.js
@@ -42,13 +42,12 @@ if (largeArrayBufferSupported()) {
}
var MaxPagesIn32BitMemory = Math.floor(MaxBytesIn32BitMemory / PageSizeInBytes);
-function wasmEvalText(str, imports) {
- let binary = wasmTextToBinary(str);
- let valid = WebAssembly.validate(binary);
+function wasmEvalBinary(binary, imports, compileOptions) {
+ let valid = WebAssembly.validate(binary, compileOptions);
let m;
try {
- m = new WebAssembly.Module(binary);
+ m = new WebAssembly.Module(binary, compileOptions);
assertEq(valid, true, "failed WebAssembly.validate but still compiled successfully");
} catch(e) {
if (!e.toString().match(/out of memory/)) {
@@ -60,8 +59,11 @@ function wasmEvalText(str, imports) {
return new WebAssembly.Instance(m, imports);
}
-function wasmValidateText(str) {
- let binary = wasmTextToBinary(str);
+function wasmEvalText(str, imports, compileOptions) {
+ return wasmEvalBinary(wasmTextToBinary(str), imports, compileOptions);
+}
+
+function wasmValidateBinary(binary) {
let valid = WebAssembly.validate(binary);
if (!valid) {
new WebAssembly.Module(binary);
@@ -70,12 +72,19 @@ function wasmValidateText(str) {
assertEq(valid, true, "wasm module was invalid");
}
-function wasmFailValidateText(str, pattern) {
- let binary = wasmTextToBinary(str);
+function wasmFailValidateBinary(binary, pattern) {
assertEq(WebAssembly.validate(binary), false, "module passed WebAssembly.validate when it should not have");
assertErrorMessage(() => new WebAssembly.Module(binary), WebAssembly.CompileError, pattern, "module failed WebAssembly.validate but did not fail to compile as expected");
}
+function wasmValidateText(str) {
+ return wasmValidateBinary(wasmTextToBinary(str));
+}
+
+function wasmFailValidateText(str, pattern) {
+ return wasmFailValidateBinary(wasmTextToBinary(str), pattern);
+}
+
// Expected compilation failure can happen in a couple of ways:
//
// - The compiler can be available but not capable of recognizing some opcodes:
diff --git a/js/src/jit-test/tests/Set/bug1729269.js b/js/src/jit-test/tests/Set/bug1729269.js
index e97f6d2a12..61a7c0e535 100644
--- a/js/src/jit-test/tests/Set/bug1729269.js
+++ b/js/src/jit-test/tests/Set/bug1729269.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var patchSet = new Set();
function checkSet(str) {
diff --git a/js/src/jit-test/tests/arguments/1883837.js b/js/src/jit-test/tests/arguments/1883837.js
new file mode 100644
index 0000000000..3fb7ac7944
--- /dev/null
+++ b/js/src/jit-test/tests/arguments/1883837.js
@@ -0,0 +1,10 @@
+let threw = false;
+try {
+ ({
+ a: arguments.length
+ } = 0);
+} catch (error) {
+ assertEq(error instanceof ReferenceError, true);
+ threw = true;
+}
+assertEq(threw, true);
diff --git a/js/src/jit-test/tests/arguments/argumentsNaming.js b/js/src/jit-test/tests/arguments/argumentsNaming.js
new file mode 100644
index 0000000000..7532172873
--- /dev/null
+++ b/js/src/jit-test/tests/arguments/argumentsNaming.js
@@ -0,0 +1,3 @@
+let arguments = {}
+arguments.length = () => { };
+assertEq(arguments.length.name, "");
diff --git a/js/src/jit-test/tests/arrays/from-async-oom.js b/js/src/jit-test/tests/arrays/from-async-oom.js
index a68fd33299..5d7ada400f 100644
--- a/js/src/jit-test/tests/arrays/from-async-oom.js
+++ b/js/src/jit-test/tests/arrays/from-async-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// Basic Smoke Test
async function* asyncGen(n) {
for (let i = 0; i < n; i++) {
diff --git a/js/src/jit-test/tests/asm.js/bug1219954.js b/js/src/jit-test/tests/asm.js/bug1219954.js
index 305369a936..c8553dde74 100644
--- a/js/src/jit-test/tests/asm.js/bug1219954.js
+++ b/js/src/jit-test/tests/asm.js/bug1219954.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
"use strict";
let g = (function() {
diff --git a/js/src/jit-test/tests/asm.js/bug1385428.js b/js/src/jit-test/tests/asm.js/bug1385428.js
index c586ec8f3e..2044b52b96 100644
--- a/js/src/jit-test/tests/asm.js/bug1385428.js
+++ b/js/src/jit-test/tests/asm.js/bug1385428.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
loadFile(`
try {
Array.prototype.splice.call({ get length() {
diff --git a/js/src/jit-test/tests/asm.js/bug1421565.js b/js/src/jit-test/tests/asm.js/bug1421565.js
index 04ab718305..54719e87f7 100644
--- a/js/src/jit-test/tests/asm.js/bug1421565.js
+++ b/js/src/jit-test/tests/asm.js/bug1421565.js
@@ -1,4 +1,4 @@
-// |jit-test| --ion-offthread-compile=off; skip-if: !isAsmJSCompilationAvailable() || !('oomTest' in this)
+// |jit-test| --ion-offthread-compile=off; skip-if: !isAsmJSCompilationAvailable()
load(libdir + "asm.js");
diff --git a/js/src/jit-test/tests/asm.js/oom-helper-thread-plus-validation-error.js b/js/src/jit-test/tests/asm.js/oom-helper-thread-plus-validation-error.js
index 2d80e98d3c..71744c40be 100644
--- a/js/src/jit-test/tests/asm.js/oom-helper-thread-plus-validation-error.js
+++ b/js/src/jit-test/tests/asm.js/oom-helper-thread-plus-validation-error.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
-
oomAfterAllocations(10, 2);
evaluate(`function mod(stdlib, ffi, heap) {
"use asm";
diff --git a/js/src/jit-test/tests/asm.js/oom-helper-thread.js b/js/src/jit-test/tests/asm.js/oom-helper-thread.js
index 18165c01b3..6e7bfc7fd5 100644
--- a/js/src/jit-test/tests/asm.js/oom-helper-thread.js
+++ b/js/src/jit-test/tests/asm.js/oom-helper-thread.js
@@ -1,4 +1,4 @@
-// |jit-test| exitstatus: 3; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| exitstatus: 3
oomAfterAllocations(50, 2);
eval("(function() {'use asm'; function f() { return +pow(.0, .0) })")
diff --git a/js/src/jit-test/tests/asm.js/testBug1255954.js b/js/src/jit-test/tests/asm.js/testBug1255954.js
index 004136bc18..2e1d38de12 100644
--- a/js/src/jit-test/tests/asm.js/testBug1255954.js
+++ b/js/src/jit-test/tests/asm.js/testBug1255954.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
const USE_ASM = '"use asm";';
function asmCompile() {
diff --git a/js/src/jit-test/tests/atomics/basic-tests.js b/js/src/jit-test/tests/atomics/basic-tests.js
index 3712dd1b6f..97dc2a37af 100644
--- a/js/src/jit-test/tests/atomics/basic-tests.js
+++ b/js/src/jit-test/tests/atomics/basic-tests.js
@@ -1,3 +1,5 @@
+// |jit-test| --enable-arraybuffer-resizable
+
// Basic functional tests for the Atomics primitives.
//
// These do not test atomicity, just that calling and coercions and
@@ -562,3 +564,21 @@ function runTests(SharedOrUnsharedArrayBuffer) {
runTests(SharedArrayBuffer);
runTests(ArrayBuffer);
+
+if (ArrayBuffer.prototype.resize) {
+ class ResizableArrayBuffer {
+ constructor(byteLength = 0) {
+ return new ArrayBuffer(byteLength, {maxByteLength: byteLength});
+ }
+ }
+ runTests(ResizableArrayBuffer);
+}
+
+if (SharedArrayBuffer.prototype.grow) {
+ class GrowableSharedArrayBuffer {
+ constructor(byteLength = 0) {
+ return new SharedArrayBuffer(byteLength, {maxByteLength: byteLength});
+ }
+ }
+ runTests(GrowableSharedArrayBuffer);
+}
diff --git a/js/src/jit-test/tests/auto-regress/bug1263558.js b/js/src/jit-test/tests/auto-regress/bug1263558.js
index 41705977ac..5a4d2df2dd 100644
--- a/js/src/jit-test/tests/auto-regress/bug1263558.js
+++ b/js/src/jit-test/tests/auto-regress/bug1263558.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this)
+// |jit-test| skip-if: !hasFunction.oomTest
evalcx(`
eval('\
diff --git a/js/src/jit-test/tests/auto-regress/bug1263865.js b/js/src/jit-test/tests/auto-regress/bug1263865.js
index e5b11769d9..4ce38d0dd9 100644
--- a/js/src/jit-test/tests/auto-regress/bug1263865.js
+++ b/js/src/jit-test/tests/auto-regress/bug1263865.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
loadFile("");
loadFile("");
loadFile("Array.prototype.splice.call(1)");
diff --git a/js/src/jit-test/tests/auto-regress/bug1263879.js b/js/src/jit-test/tests/auto-regress/bug1263879.js
index 1baba2dd3e..c2553b3124 100644
--- a/js/src/jit-test/tests/auto-regress/bug1263879.js
+++ b/js/src/jit-test/tests/auto-regress/bug1263879.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var lines = `
diff --git a/js/src/jit-test/tests/auto-regress/bug1264823.js b/js/src/jit-test/tests/auto-regress/bug1264823.js
index afac37c3c6..5005296dc7 100644
--- a/js/src/jit-test/tests/auto-regress/bug1264823.js
+++ b/js/src/jit-test/tests/auto-regress/bug1264823.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
loadFile("");
loadFile("");
loadFile(` function lalala() {}
diff --git a/js/src/jit-test/tests/auto-regress/bug1268034.js b/js/src/jit-test/tests/auto-regress/bug1268034.js
index 43f2a661fd..f48cbd0278 100644
--- a/js/src/jit-test/tests/auto-regress/bug1268034.js
+++ b/js/src/jit-test/tests/auto-regress/bug1268034.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
offThreadCompileToStencil("");
});
diff --git a/js/src/jit-test/tests/auto-regress/bug1269074.js b/js/src/jit-test/tests/auto-regress/bug1269074.js
index 6e2cc33035..74d5fde822 100644
--- a/js/src/jit-test/tests/auto-regress/bug1269074.js
+++ b/js/src/jit-test/tests/auto-regress/bug1269074.js
@@ -1,3 +1,3 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomTest
evalcx('oomTest(function() { Array(...""); })', newGlobal());
diff --git a/js/src/jit-test/tests/auto-regress/bug1375446.js b/js/src/jit-test/tests/auto-regress/bug1375446.js
index ef8ae4c640..b48475a96b 100644
--- a/js/src/jit-test/tests/auto-regress/bug1375446.js
+++ b/js/src/jit-test/tests/auto-regress/bug1375446.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
loadFile(`
disassemble(function() {
diff --git a/js/src/jit-test/tests/auto-regress/bug1462341.js b/js/src/jit-test/tests/auto-regress/bug1462341.js
index 37c4f2129c..3f77b43ba0 100644
--- a/js/src/jit-test/tests/auto-regress/bug1462341.js
+++ b/js/src/jit-test/tests/auto-regress/bug1462341.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
loadFile(`
switch (0) {
diff --git a/js/src/jit-test/tests/auto-regress/bug1466626-1.js b/js/src/jit-test/tests/auto-regress/bug1466626-1.js
index f82c4de48e..8815fafc66 100644
--- a/js/src/jit-test/tests/auto-regress/bug1466626-1.js
+++ b/js/src/jit-test/tests/auto-regress/bug1466626-1.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
for (var i = 0; i < 10; ++i) {
Promise.resolve().then();
diff --git a/js/src/jit-test/tests/auto-regress/bug1466626-2.js b/js/src/jit-test/tests/auto-regress/bug1466626-2.js
index 056ea075e2..9c1ce593a8 100644
--- a/js/src/jit-test/tests/auto-regress/bug1466626-2.js
+++ b/js/src/jit-test/tests/auto-regress/bug1466626-2.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var globals = [];
for (var i = 0; i < 24; ++i) {
var g = newGlobal();
diff --git a/js/src/jit-test/tests/auto-regress/bug1466626-3.js b/js/src/jit-test/tests/auto-regress/bug1466626-3.js
index bffecfce73..2907e8ce2f 100644
--- a/js/src/jit-test/tests/auto-regress/bug1466626-3.js
+++ b/js/src/jit-test/tests/auto-regress/bug1466626-3.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var g = newGlobal();
var i = 0;
diff --git a/js/src/jit-test/tests/auto-regress/bug1466626-4.js b/js/src/jit-test/tests/auto-regress/bug1466626-4.js
index aa02a3ba08..8efc52727c 100644
--- a/js/src/jit-test/tests/auto-regress/bug1466626-4.js
+++ b/js/src/jit-test/tests/auto-regress/bug1466626-4.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var source = "{";
for (var i = 0; i < 120; ++i)
source += `function f${i}(){}`
diff --git a/js/src/jit-test/tests/auto-regress/bug1562102.js b/js/src/jit-test/tests/auto-regress/bug1562102.js
index 78f5ef9010..5efb6fce13 100644
--- a/js/src/jit-test/tests/auto-regress/bug1562102.js
+++ b/js/src/jit-test/tests/auto-regress/bug1562102.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; allow-unhandlable-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom; allow-unhandlable-oom
oomTest(
function() {
evaluate(`
diff --git a/js/src/jit-test/tests/auto-regress/bug1652148.js b/js/src/jit-test/tests/auto-regress/bug1652148.js
index 232957edb6..51a026b92f 100644
--- a/js/src/jit-test/tests/auto-regress/bug1652148.js
+++ b/js/src/jit-test/tests/auto-regress/bug1652148.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
new AggregateError([]);
});
diff --git a/js/src/jit-test/tests/auto-regress/bug1652153.js b/js/src/jit-test/tests/auto-regress/bug1652153.js
index 875949c7ea..0eb4c52f3e 100644
--- a/js/src/jit-test/tests/auto-regress/bug1652153.js
+++ b/js/src/jit-test/tests/auto-regress/bug1652153.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
x = "x";
lFile(x);
diff --git a/js/src/jit-test/tests/auto-regress/bug1670378.js b/js/src/jit-test/tests/auto-regress/bug1670378.js
index da3c1e93dd..ddda1f9710 100644
--- a/js/src/jit-test/tests/auto-regress/bug1670378.js
+++ b/js/src/jit-test/tests/auto-regress/bug1670378.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-unhandlable-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-unhandlable-oom
const otherGlobalNewCompartment = newGlobal({newCompartment: true});
diff --git a/js/src/jit-test/tests/auto-regress/bug1791401.js b/js/src/jit-test/tests/auto-regress/bug1791401.js
index 6b3a7a5dbd..27cf95b0c0 100644
--- a/js/src/jit-test/tests/auto-regress/bug1791401.js
+++ b/js/src/jit-test/tests/auto-regress/bug1791401.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
var f = Function(`
// Don't actually enter the loop. This still causes the original bug and
diff --git a/js/src/jit-test/tests/auto-regress/bug1798883.js b/js/src/jit-test/tests/auto-regress/bug1798883.js
index 9ebe5daa2e..e0c8b52dd7 100644
--- a/js/src/jit-test/tests/auto-regress/bug1798883.js
+++ b/js/src/jit-test/tests/auto-regress/bug1798883.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// String with an initial part which doesn't need to be normalised and a tail
// which gets normalised to "\u05E9\u05BC\u05C1".
var s = "a".repeat(32) + String.fromCharCode(0xFB2C);
@@ -8,4 +6,4 @@ oomTest(function() {
// |normalize()| needs to be called at least twice to trigger the bug.
s.normalize();
s.normalize();
-}); \ No newline at end of file
+});
diff --git a/js/src/jit-test/tests/auto-regress/bug1879688.js b/js/src/jit-test/tests/auto-regress/bug1879688.js
index a05ae548b5..fecd582eff 100644
--- a/js/src/jit-test/tests/auto-regress/bug1879688.js
+++ b/js/src/jit-test/tests/auto-regress/bug1879688.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
let x = 0;
oomTest(function () {
let y = x++;
diff --git a/js/src/jit-test/tests/baseline/bug1209585.js b/js/src/jit-test/tests/baseline/bug1209585.js
index 6873fe37d5..39f6316c26 100644
--- a/js/src/jit-test/tests/baseline/bug1209585.js
+++ b/js/src/jit-test/tests/baseline/bug1209585.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: helperThreadCount() === 0 || !('oomAtAllocation' in this)
+// |jit-test| skip-if: helperThreadCount() === 0
if ("gczeal" in this)
gczeal(0);
diff --git a/js/src/jit-test/tests/baseline/bug1344334.js b/js/src/jit-test/tests/baseline/bug1344334.js
index 8245148833..b771da02fc 100644
--- a/js/src/jit-test/tests/baseline/bug1344334.js
+++ b/js/src/jit-test/tests/baseline/bug1344334.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function f(s) {
s + "x";
s.indexOf("y") === 0;
diff --git a/js/src/jit-test/tests/baseline/bug1491337.js b/js/src/jit-test/tests/baseline/bug1491337.js
index 107507155f..c297d6c9f3 100644
--- a/js/src/jit-test/tests/baseline/bug1491337.js
+++ b/js/src/jit-test/tests/baseline/bug1491337.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(new Function(`
let kJSEmbeddingMaxTypes = 1000000;
let kJSEmbeddingMaxFunctions = 1000000;
diff --git a/js/src/jit-test/tests/baseline/bug1491350.js b/js/src/jit-test/tests/baseline/bug1491350.js
index 697a39c50c..404ed8bb1e 100644
--- a/js/src/jit-test/tests/baseline/bug1491350.js
+++ b/js/src/jit-test/tests/baseline/bug1491350.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(new Function(`
var a = ['p', 'q', 'r', 's', 't'];
var o = {p:1, q:2, r:3, s:4, t:5};
diff --git a/js/src/jit-test/tests/basic/bug-1198090.js b/js/src/jit-test/tests/basic/bug-1198090.js
index 2ee6b5a2ed..d6445639cd 100644
--- a/js/src/jit-test/tests/basic/bug-1198090.js
+++ b/js/src/jit-test/tests/basic/bug-1198090.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAtAllocation' in this)
+// |jit-test| allow-oom
for (let a of [
null, function() {}, function() {}, null, function() {}, function() {},
diff --git a/js/src/jit-test/tests/basic/bug-1271507.js b/js/src/jit-test/tests/basic/bug-1271507.js
index 88097aca8d..e053da62e9 100644
--- a/js/src/jit-test/tests/basic/bug-1271507.js
+++ b/js/src/jit-test/tests/basic/bug-1271507.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: typeof oomAfterAllocations !== 'function'
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
lfcode = new Array();
oomAfterAllocations(100);
loadFile(file);
diff --git a/js/src/jit-test/tests/basic/bug-1665583.js b/js/src/jit-test/tests/basic/bug-1665583.js
index 012dc1d043..d7350053ef 100644
--- a/js/src/jit-test/tests/basic/bug-1665583.js
+++ b/js/src/jit-test/tests/basic/bug-1665583.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function parseModule(source) {
offThreadCompileModuleToStencil(source);
var stencil = finishOffThreadStencil();
diff --git a/js/src/jit-test/tests/basic/bug1207863.js b/js/src/jit-test/tests/basic/bug1207863.js
index ef079a3c29..e452694e99 100644
--- a/js/src/jit-test/tests/basic/bug1207863.js
+++ b/js/src/jit-test/tests/basic/bug1207863.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; allow-unhandlable-oom; skip-if: !("oomAtAllocation" in this && "resetOOMFailure" in this)
+// |jit-test| allow-oom; allow-unhandlable-oom
function oomTest(f) {
var i = 1;
diff --git a/js/src/jit-test/tests/basic/bug1219128-1.js b/js/src/jit-test/tests/basic/bug1219128-1.js
index 7a81d73f0a..4724059bca 100644
--- a/js/src/jit-test/tests/basic/bug1219128-1.js
+++ b/js/src/jit-test/tests/basic/bug1219128-1.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
evaluate(`
x = evalcx("lazy");
oomTest(function () {
diff --git a/js/src/jit-test/tests/basic/bug1219128-2.js b/js/src/jit-test/tests/basic/bug1219128-2.js
index 7208fc3260..1a17f5abeb 100644
--- a/js/src/jit-test/tests/basic/bug1219128-2.js
+++ b/js/src/jit-test/tests/basic/bug1219128-2.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
a = evalcx("lazy")
oomTest(() => a.toString)
diff --git a/js/src/jit-test/tests/basic/bug1219128-3.js b/js/src/jit-test/tests/basic/bug1219128-3.js
index feca3eb55c..854be23213 100644
--- a/js/src/jit-test/tests/basic/bug1219128-3.js
+++ b/js/src/jit-test/tests/basic/bug1219128-3.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
x = evalcx('lazy');
oomTest(function() {
x.eval
diff --git a/js/src/jit-test/tests/basic/bug1219128-4.js b/js/src/jit-test/tests/basic/bug1219128-4.js
index 41f8b9757a..6329b3f198 100644
--- a/js/src/jit-test/tests/basic/bug1219128-4.js
+++ b/js/src/jit-test/tests/basic/bug1219128-4.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
x = evalcx("lazy");
oomTest((function() {
evalcx("({", x);
diff --git a/js/src/jit-test/tests/basic/bug1219128-5.js b/js/src/jit-test/tests/basic/bug1219128-5.js
index 30dc56e5ce..9ce2c7ab5c 100644
--- a/js/src/jit-test/tests/basic/bug1219128-5.js
+++ b/js/src/jit-test/tests/basic/bug1219128-5.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
x = evalcx("lazy");
oomTest(function() {
x.of(new(delete y));
diff --git a/js/src/jit-test/tests/basic/bug1219128-6.js b/js/src/jit-test/tests/basic/bug1219128-6.js
index cb9f4c7170..bd02653202 100644
--- a/js/src/jit-test/tests/basic/bug1219128-6.js
+++ b/js/src/jit-test/tests/basic/bug1219128-6.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
a = evalcx("lazy")
oomTest(function() {
a.b
diff --git a/js/src/jit-test/tests/basic/bug1219128-7.js b/js/src/jit-test/tests/basic/bug1219128-7.js
index 87ec092f87..e5f660cc77 100644
--- a/js/src/jit-test/tests/basic/bug1219128-7.js
+++ b/js/src/jit-test/tests/basic/bug1219128-7.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
function main() {
const v1 = this.newGlobal();
diff --git a/js/src/jit-test/tests/basic/bug1219128-8.js b/js/src/jit-test/tests/basic/bug1219128-8.js
index a957b879dc..d8852b423f 100644
--- a/js/src/jit-test/tests/basic/bug1219128-8.js
+++ b/js/src/jit-test/tests/basic/bug1219128-8.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
a = evalcx('lazy')
oomTest(() => a < 0)
diff --git a/js/src/jit-test/tests/basic/bug1234414.js b/js/src/jit-test/tests/basic/bug1234414.js
index 2aeda2c897..5f8af8e0b6 100644
--- a/js/src/jit-test/tests/basic/bug1234414.js
+++ b/js/src/jit-test/tests/basic/bug1234414.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
var max = 400;
function f(b) {
diff --git a/js/src/jit-test/tests/basic/bug1240502.js b/js/src/jit-test/tests/basic/bug1240502.js
index 08d6619460..5baed067dc 100644
--- a/js/src/jit-test/tests/basic/bug1240502.js
+++ b/js/src/jit-test/tests/basic/bug1240502.js
@@ -1,2 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(() => eval(`Array(..."ABC")`));
diff --git a/js/src/jit-test/tests/basic/bug1263868.js b/js/src/jit-test/tests/basic/bug1263868.js
index ba678d71d5..6375cfdd65 100644
--- a/js/src/jit-test/tests/basic/bug1263868.js
+++ b/js/src/jit-test/tests/basic/bug1263868.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
function g(f, params) {
entryPoints(params);
}
diff --git a/js/src/jit-test/tests/basic/bug1264954.js b/js/src/jit-test/tests/basic/bug1264954.js
index e9d6422798..ca1a8fb179 100644
--- a/js/src/jit-test/tests/basic/bug1264954.js
+++ b/js/src/jit-test/tests/basic/bug1264954.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
function f(x) {
oomTest(() => eval(x));
}
diff --git a/js/src/jit-test/tests/basic/bug1265693.js b/js/src/jit-test/tests/basic/bug1265693.js
index 9922999d1b..d25713cf97 100644
--- a/js/src/jit-test/tests/basic/bug1265693.js
+++ b/js/src/jit-test/tests/basic/bug1265693.js
@@ -1,2 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(Function("Function.hasOwnProperty(1.1)"));
diff --git a/js/src/jit-test/tests/basic/bug1278839.js b/js/src/jit-test/tests/basic/bug1278839.js
index 3756e95d68..cff252cc7a 100644
--- a/js/src/jit-test/tests/basic/bug1278839.js
+++ b/js/src/jit-test/tests/basic/bug1278839.js
@@ -1,3 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
for (var i=0; i<2; i++)
oomTest(() => eval("setJitCompilerOption(eval + Function, 0);"));
diff --git a/js/src/jit-test/tests/basic/bug1296249.js b/js/src/jit-test/tests/basic/bug1296249.js
index 7c8b7c5df4..c5fb49bdbe 100644
--- a/js/src/jit-test/tests/basic/bug1296249.js
+++ b/js/src/jit-test/tests/basic/bug1296249.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
function f(x) {
new Int32Array(x);
}
diff --git a/js/src/jit-test/tests/basic/bug1300904.js b/js/src/jit-test/tests/basic/bug1300904.js
index 2274129366..15e62ad818 100644
--- a/js/src/jit-test/tests/basic/bug1300904.js
+++ b/js/src/jit-test/tests/basic/bug1300904.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
Object.getOwnPropertyNames(this);
oomTest(function() {
this[0] = null;
diff --git a/js/src/jit-test/tests/basic/bug1316557.js b/js/src/jit-test/tests/basic/bug1316557.js
new file mode 100644
index 0000000000..73edb23893
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug1316557.js
@@ -0,0 +1 @@
+assertEq(Math.pow(-999, -999), -0);
diff --git a/js/src/jit-test/tests/basic/bug1344265.js b/js/src/jit-test/tests/basic/bug1344265.js
index 2ffc2f7e5c..da5a882e14 100644
--- a/js/src/jit-test/tests/basic/bug1344265.js
+++ b/js/src/jit-test/tests/basic/bug1344265.js
@@ -1,3 +1,3 @@
-// |jit-test| allow-unhandlable-oom; allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-unhandlable-oom; allow-oom
oomAfterAllocations(1);
newString("a", {external: true});
diff --git a/js/src/jit-test/tests/basic/bug1348407.js b/js/src/jit-test/tests/basic/bug1348407.js
index 133a49018c..51371fefcd 100644
--- a/js/src/jit-test/tests/basic/bug1348407.js
+++ b/js/src/jit-test/tests/basic/bug1348407.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
x = evalcx("lazy");
oomTest(function () {
x.eval("1");
diff --git a/js/src/jit-test/tests/basic/bug1411294.js b/js/src/jit-test/tests/basic/bug1411294.js
index 327c808bcf..2c748768bc 100644
--- a/js/src/jit-test/tests/basic/bug1411294.js
+++ b/js/src/jit-test/tests/basic/bug1411294.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(function() {
eval(`var clonebuffer = serialize("abc");
clonebuffer.clonebuffer = "\
diff --git a/js/src/jit-test/tests/basic/bug1447996.js b/js/src/jit-test/tests/basic/bug1447996.js
index ec7cc5a25a..82c152ac15 100644
--- a/js/src/jit-test/tests/basic/bug1447996.js
+++ b/js/src/jit-test/tests/basic/bug1447996.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
-
var x = 0;
function f() {
var s = "abcdef(((((((a|b)a|b)a|b)a|b)a|b)a|b)a|b)" + x;
diff --git a/js/src/jit-test/tests/basic/bug1459258.js b/js/src/jit-test/tests/basic/bug1459258.js
index d29231a34a..b68ad115a0 100644
--- a/js/src/jit-test/tests/basic/bug1459258.js
+++ b/js/src/jit-test/tests/basic/bug1459258.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(function() {
return [0, Math.PI, NaN, Infinity, true, false, Symbol(), Math.tan,
Reflect, Proxy, print, assertEq, Array, String, Boolean, Number, parseInt,
diff --git a/js/src/jit-test/tests/basic/bug1493627.js b/js/src/jit-test/tests/basic/bug1493627.js
index ee0525128c..b8952313ff 100644
--- a/js/src/jit-test/tests/basic/bug1493627.js
+++ b/js/src/jit-test/tests/basic/bug1493627.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
stackTest(function() {
eval(`var g = newGlobal(); recomputeWrappers(this, g);`);
});
diff --git a/js/src/jit-test/tests/basic/bug1516406.js b/js/src/jit-test/tests/basic/bug1516406.js
index 9a513fc1a2..dbee2feacb 100644
--- a/js/src/jit-test/tests/basic/bug1516406.js
+++ b/js/src/jit-test/tests/basic/bug1516406.js
@@ -1,2 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(() => dumpScopeChain(eval(`b => 1`)));
diff --git a/js/src/jit-test/tests/basic/bug1532265.js b/js/src/jit-test/tests/basic/bug1532265.js
index 500b6e4a64..bf8416b78d 100644
--- a/js/src/jit-test/tests/basic/bug1532265.js
+++ b/js/src/jit-test/tests/basic/bug1532265.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
ignoreUnhandledRejections();
diff --git a/js/src/jit-test/tests/basic/bug1548759-1.js b/js/src/jit-test/tests/basic/bug1548759-1.js
index 6e6f795cad..cac4953f06 100644
--- a/js/src/jit-test/tests/basic/bug1548759-1.js
+++ b/js/src/jit-test/tests/basic/bug1548759-1.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
(function() {
oomTest(async function() {
x;
diff --git a/js/src/jit-test/tests/basic/bug1548759-2.js b/js/src/jit-test/tests/basic/bug1548759-2.js
index 5e0eef54cc..6fad23f92b 100644
--- a/js/src/jit-test/tests/basic/bug1548759-2.js
+++ b/js/src/jit-test/tests/basic/bug1548759-2.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(function() {
return {
x: async function() {
diff --git a/js/src/jit-test/tests/basic/bug1574725.js b/js/src/jit-test/tests/basic/bug1574725.js
index f7ccb27d92..03cdf897aa 100644
--- a/js/src/jit-test/tests/basic/bug1574725.js
+++ b/js/src/jit-test/tests/basic/bug1574725.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
for (let i = 0; i < 15; ++i) {
evalInWorker("for (var i = 0; i < 100; i++) {}");
}
diff --git a/js/src/jit-test/tests/basic/bug1644839-2.js b/js/src/jit-test/tests/basic/bug1644839-2.js
index cf0f7d8981..5c2e06258d 100644
--- a/js/src/jit-test/tests/basic/bug1644839-2.js
+++ b/js/src/jit-test/tests/basic/bug1644839-2.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
var code = `
(\`\${key}: \${(args[1]?.toString)?.()}\`)
`;
diff --git a/js/src/jit-test/tests/basic/bug1644839.js b/js/src/jit-test/tests/basic/bug1644839.js
index b83b662358..44fdee1ddf 100644
--- a/js/src/jit-test/tests/basic/bug1644839.js
+++ b/js/src/jit-test/tests/basic/bug1644839.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
var code = `
(\`\${key}: \${args[1]?.toString()}\`)
`;
diff --git a/js/src/jit-test/tests/basic/bug1666856.js b/js/src/jit-test/tests/basic/bug1666856.js
index ea6e6942d2..50d8505eb6 100644
--- a/js/src/jit-test/tests/basic/bug1666856.js
+++ b/js/src/jit-test/tests/basic/bug1666856.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !this.oomTest
-
let i = 10000;
oomTest(() => {
let arr = [];
diff --git a/js/src/jit-test/tests/basic/bug1877586.js b/js/src/jit-test/tests/basic/bug1877586.js
index d6ff5b1ae3..9d64f110af 100644
--- a/js/src/jit-test/tests/basic/bug1877586.js
+++ b/js/src/jit-test/tests/basic/bug1877586.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomAtAllocation' in this); allow-oom
+// |jit-test| allow-oom
try {
for (let i = 0; i < 5; i++) {
WebAssembly.instantiateStreaming(
diff --git a/js/src/jit-test/tests/basic/bug1883828.js b/js/src/jit-test/tests/basic/bug1883828.js
new file mode 100644
index 0000000000..3c63a00d2b
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug1883828.js
@@ -0,0 +1,5 @@
+const arr = [];
+arr[Symbol.toPrimitive] = quit;
+const stack = {stack: saveStack(), cause: arr};
+const bound = bindToAsyncStack(function() {}, stack);
+bound();
diff --git a/js/src/jit-test/tests/basic/bug1884706.js b/js/src/jit-test/tests/basic/bug1884706.js
new file mode 100644
index 0000000000..9bf7c1b52d
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug1884706.js
@@ -0,0 +1,5 @@
+const arr = new Int32Array(1 << 26);
+try {
+ for (const key in arr) {
+ }
+} catch {}
diff --git a/js/src/jit-test/tests/basic/date-getLocale-oom.js b/js/src/jit-test/tests/basic/date-getLocale-oom.js
index 7c0b1a7190..dc6371a35b 100644
--- a/js/src/jit-test/tests/basic/date-getLocale-oom.js
+++ b/js/src/jit-test/tests/basic/date-getLocale-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function () {
new Date(NaN).toString();
}, {keepFailing: true});
diff --git a/js/src/jit-test/tests/basic/date-late-weekday-warning.js b/js/src/jit-test/tests/basic/date-late-weekday-warning.js
deleted file mode 100644
index ca6730e56c..0000000000
--- a/js/src/jit-test/tests/basic/date-late-weekday-warning.js
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Test deprecation warning for late weekday in Date.parse
- */
-
-function testWarn(date) {
- const g = newGlobal();
- g.eval(`Date.parse("${date}")`);
- const warning = getLastWarning();
- assertEq(warning !== null, true, `warning should be caught for ${date}`);
- assertEq(warning.name, "Warning", warning.name);
-
- clearLastWarning();
-
- g.eval(`Date.parse("${date}")`);
- assertEq(getLastWarning(), null, "warning should not be caught for 2nd ocurrence");
-}
-
-function testNoWarn(date) {
- Date.parse(date);
- assertEq(getLastWarning(), null, `warning should not be caught for ${date}`);
-}
-
-enableLastWarning();
-
-testWarn("Sep 26 1995 Tues");
-testWarn("Sep 26 Tues 1995");
-testWarn("Sep 26 Tues 1995 Tues");
-testWarn("Sep 26 1995 10:Tues:00");
-
-testNoWarn("Sep 26 1995");
-testNoWarn("Tues Sep 26 1995");
-testNoWarn("Sep Tues 26 1995");
-
-disableLastWarning();
diff --git a/js/src/jit-test/tests/basic/dictionary-add-prop-oom.js b/js/src/jit-test/tests/basic/dictionary-add-prop-oom.js
index e393cfca6b..10e09ed06f 100644
--- a/js/src/jit-test/tests/basic/dictionary-add-prop-oom.js
+++ b/js/src/jit-test/tests/basic/dictionary-add-prop-oom.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
enableShapeConsistencyChecks();
oomTest(() => {
var obj = {a: 1, b: 2, c: 3};
diff --git a/js/src/jit-test/tests/basic/dumpValue.js b/js/src/jit-test/tests/basic/dumpValue.js
index 6bf4ffcb8d..b97c9e89d8 100644
--- a/js/src/jit-test/tests/basic/dumpValue.js
+++ b/js/src/jit-test/tests/basic/dumpValue.js
@@ -1,6 +1,4 @@
-// |jit-test| skip-if: typeof dumpValue !== 'function' || getBuildConfiguration("windows")
-
-// FIXME: Fix backslash handling on windows (bug 1880003).
+// |jit-test| skip-if: typeof dumpValue !== 'function'
// Try the dumpValue and dumpValueToString shell functions on various types of
// values, and make sure theyit don't crash, and the result is valid JSON.
diff --git a/js/src/jit-test/tests/basic/inflate-oom.js b/js/src/jit-test/tests/basic/inflate-oom.js
index 645980789f..52d73d66cb 100644
--- a/js/src/jit-test/tests/basic/inflate-oom.js
+++ b/js/src/jit-test/tests/basic/inflate-oom.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
function test() {
function foo() {
return 1;
diff --git a/js/src/jit-test/tests/basic/property-error-message-fix-disabled.js b/js/src/jit-test/tests/basic/property-error-message-fix-disabled.js
index 542bada65f..a011605ed9 100644
--- a/js/src/jit-test/tests/basic/property-error-message-fix-disabled.js
+++ b/js/src/jit-test/tests/basic/property-error-message-fix-disabled.js
@@ -1,4 +1,4 @@
-// |jit-test| --disable-property-error-message-fix; skip-if: getBuildConfiguration('pbl')
+// |jit-test| --setpref=property_error_message_fix=false; skip-if: getBuildConfiguration('pbl')
function check(f, message) {
let caught = false;
diff --git a/js/src/jit-test/tests/basic/property-error-message-fix.js b/js/src/jit-test/tests/basic/property-error-message-fix.js
index 6f27416496..32fe8a7408 100644
--- a/js/src/jit-test/tests/basic/property-error-message-fix.js
+++ b/js/src/jit-test/tests/basic/property-error-message-fix.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: getBuildConfiguration('pbl')
+// |jit-test| --setpref=property_error_message_fix=true; skip-if: getBuildConfiguration('pbl')
function check(f, message) {
let caught = false;
diff --git a/js/src/jit-test/tests/basic/string-substring-latin1rope-with-twobyte-children.js b/js/src/jit-test/tests/basic/string-substring-latin1rope-with-twobyte-children.js
new file mode 100644
index 0000000000..3a1d889073
--- /dev/null
+++ b/js/src/jit-test/tests/basic/string-substring-latin1rope-with-twobyte-children.js
@@ -0,0 +1,12 @@
+let right = newRope("b", "012345678901234567890123456789");
+let latin1Rope = newRope("a", right);
+let twoByteRope = newRope("\u221e", right);
+
+// Flattening |twoByteRope| changes |right| from a Latin-1 rope into a two-byte
+// dependent string. At this point, |latin1Rope| has the Latin-1 flag set, but
+// also has a two-byte rope child.
+ensureLinearString(twoByteRope);
+
+let result = latin1Rope.substring(0, 3);
+
+assertEq(result, "ab0");
diff --git a/js/src/jit-test/tests/basic/testBug756919.js b/js/src/jit-test/tests/basic/testBug756919.js
index a72f46bece..9bb0d9a2cc 100644
--- a/js/src/jit-test/tests/basic/testBug756919.js
+++ b/js/src/jit-test/tests/basic/testBug756919.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
function test(x) {
var upvar = "";
function f() { upvar += ""; }
diff --git a/js/src/jit-test/tests/basic/testNeutering.js b/js/src/jit-test/tests/basic/testDetach.js
index fc49e3f99e..fc49e3f99e 100644
--- a/js/src/jit-test/tests/basic/testNeutering.js
+++ b/js/src/jit-test/tests/basic/testDetach.js
diff --git a/js/src/jit-test/tests/basic/testNativeArgsRooting.js b/js/src/jit-test/tests/basic/testNativeArgsRooting.js
index 1ce8259f2d..5e3e8f5f12 100644
--- a/js/src/jit-test/tests/basic/testNativeArgsRooting.js
+++ b/js/src/jit-test/tests/basic/testNativeArgsRooting.js
@@ -1,4 +1,3 @@
-if ('gczeal' in this)
(function () {
(eval("\
(function () {\
diff --git a/js/src/jit-test/tests/bug1636306.js b/js/src/jit-test/tests/bug1636306.js
index e39dc84f80..ab538831fe 100644
--- a/js/src/jit-test/tests/bug1636306.js
+++ b/js/src/jit-test/tests/bug1636306.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-ion; skip-if: !('oomTest' in this)
+// |jit-test| --no-ion
oomTest(() => { eval(`
function getCallee() { return getCallee.caller; }
diff --git a/js/src/jit-test/tests/bug1681258.js b/js/src/jit-test/tests/bug1681258.js
index 4f15a8283e..d0af22c322 100644
--- a/js/src/jit-test/tests/bug1681258.js
+++ b/js/src/jit-test/tests/bug1681258.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this);--fast-warmup;--blinterp-warmup-threshold=10
+// |jit-test| --fast-warmup;--blinterp-warmup-threshold=10
ignoreUnhandledRejections();
oomTest(async function() {
diff --git a/js/src/jit-test/tests/bug1787730.js b/js/src/jit-test/tests/bug1787730.js
index a828527583..09a44496d8 100644
--- a/js/src/jit-test/tests/bug1787730.js
+++ b/js/src/jit-test/tests/bug1787730.js
@@ -1,3 +1,3 @@
-// |jit-test| --delazification-mode=concurrent-df+on-demand; skip-if: !('oomTest' in this) || isLcovEnabled()
+// |jit-test| --delazification-mode=concurrent-df+on-demand; skip-if: isLcovEnabled()
oomTest(() => evalcx(0));
diff --git a/js/src/jit-test/tests/bug1878098-serialization-log-oom.js b/js/src/jit-test/tests/bug1878098-serialization-log-oom.js
new file mode 100644
index 0000000000..d752337cfa
--- /dev/null
+++ b/js/src/jit-test/tests/bug1878098-serialization-log-oom.js
@@ -0,0 +1,8 @@
+// |jit-test| skip-if: !('oomTest' in this)
+
+x = [];
+x.keepFailing = [];
+oomTest(function () {
+ y = { z: [] };
+ makeSerializable().log;
+}, x);
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-bytelength-with-sab.js b/js/src/jit-test/tests/dataview/resizable-dataview-bytelength-with-sab.js
new file mode 100644
index 0000000000..008657d6e3
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-bytelength-with-sab.js
@@ -0,0 +1,29 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new DataView(sab, 0, i);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteLength, i);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.byteLength, i);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new DataView(sab);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteLength, i + j);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.byteLength, i + j + 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-bytelength.js b/js/src/jit-test/tests/dataview/resizable-dataview-bytelength.js
new file mode 100644
index 0000000000..851011032f
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-bytelength.js
@@ -0,0 +1,43 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+load(libdir + "asserts.js");
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new DataView(ab, 0, i);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteLength, i);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteLength, i);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertThrowsInstanceOf(() => ta.byteLength, TypeError);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new DataView(ab);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteLength, i);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteLength, i + 1);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.byteLength, i - 1);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-byteoffset-sab.js b/js/src/jit-test/tests/dataview/resizable-dataview-byteoffset-sab.js
new file mode 100644
index 0000000000..e671700586
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-byteoffset-sab.js
@@ -0,0 +1,43 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new DataView(sab);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteOffset, 0);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.byteOffset, 0);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
+
+function testResizableArrayBufferAutoLengthNonZeroOffset() {
+ for (let i = 1; i < 4 + 1; ++i) {
+ let sab = new SharedArrayBuffer(i + 1, {maxByteLength: i + 100 + 1});
+ let ta = new DataView(sab, 1);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteOffset, 1);
+
+ sab.grow(i + j + 2);
+ assertEq(ta.byteOffset, 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLengthNonZeroOffset();
+
+function testResizableArrayBufferNonZeroOffset() {
+ for (let i = 2; i < 4 + 2; ++i) {
+ let sab = new SharedArrayBuffer(i + 2, {maxByteLength: i + 100 + 2});
+ let ta = new DataView(sab, 1, 1);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteOffset, 1);
+
+ sab.grow(i + j + 3);
+ assertEq(ta.byteOffset, 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferNonZeroOffset();
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-byteoffset.js b/js/src/jit-test/tests/dataview/resizable-dataview-byteoffset.js
new file mode 100644
index 0000000000..3f0afa1169
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-byteoffset.js
@@ -0,0 +1,67 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+load(libdir + "asserts.js");
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new DataView(ab);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteOffset, 0);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteOffset, 0);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.byteOffset, 0);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
+
+function testResizableArrayBufferAutoLengthNonZeroOffset() {
+ for (let i = 1; i < 4 + 1; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new DataView(ab, 1);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i - 1);
+ if (i > 1) {
+ assertEq(ta.byteOffset, 1);
+ } else {
+ assertThrowsInstanceOf(() => ta.byteOffset, TypeError);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLengthNonZeroOffset();
+
+function testResizableArrayBufferNonZeroOffset() {
+ for (let i = 2; i < 4 + 2; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new DataView(ab, 1, 1);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i - 1);
+ if (i > 2) {
+ assertEq(ta.byteOffset, 1);
+ } else {
+ assertThrowsInstanceOf(() => ta.byteOffset, TypeError);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferNonZeroOffset();
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-get-elem-with-sab.js b/js/src/jit-test/tests/dataview/resizable-dataview-get-elem-with-sab.js
new file mode 100644
index 0000000000..ea8ca1666c
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-get-elem-with-sab.js
@@ -0,0 +1,48 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+load(libdir + "dataview.js");
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new SharedArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ for (let i = 0; i < length; ++i) {
+ actual[i] = type(i * i);
+ expected[i] = type(i * i);
+ }
+
+ let dv = new DataView(rab);
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+ let byteIndex = index * TA.BYTES_PER_ELEMENT;
+
+ assertEq(dv.getElem(byteIndex, nativeIsLittleEndian), expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ let getter = "get" + typeName(TA);
+
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`.replaceAll("getElem", getter))();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-get-elem.js b/js/src/jit-test/tests/dataview/resizable-dataview-get-elem.js
new file mode 100644
index 0000000000..1762f9f5ee
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-get-elem.js
@@ -0,0 +1,48 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+load(libdir + "dataview.js");
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new ArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ for (let i = 0; i < length; ++i) {
+ actual[i] = type(i * i);
+ expected[i] = type(i * i);
+ }
+
+ let dv = new DataView(rab);
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+ let byteIndex = index * TA.BYTES_PER_ELEMENT;
+
+ assertEq(dv.getElem(byteIndex, nativeIsLittleEndian), expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ let getter = "get" + typeName(TA);
+
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`.replaceAll("getElem", getter))();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-set-elem-with-sab.js b/js/src/jit-test/tests/dataview/resizable-dataview-set-elem-with-sab.js
new file mode 100644
index 0000000000..52d9ddaf4c
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-set-elem-with-sab.js
@@ -0,0 +1,47 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+load(libdir + "dataview.js");
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new SharedArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ let dv = new DataView(rab);
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+ let byteIndex = index * TA.BYTES_PER_ELEMENT;
+
+ let v = type(i);
+ dv.setElem(byteIndex, v, nativeIsLittleEndian);
+ expected[index] = v;
+
+ assertEq(actual[index], expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ let setter = "set" + typeName(TA);
+
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`.replaceAll("setElem", setter))();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/dataview/resizable-dataview-set-elem.js b/js/src/jit-test/tests/dataview/resizable-dataview-set-elem.js
new file mode 100644
index 0000000000..0359d1a8b3
--- /dev/null
+++ b/js/src/jit-test/tests/dataview/resizable-dataview-set-elem.js
@@ -0,0 +1,47 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+load(libdir + "dataview.js");
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new ArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ let dv = new DataView(rab);
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+ let byteIndex = index * TA.BYTES_PER_ELEMENT;
+
+ let v = type(i);
+ dv.setElem(byteIndex, v, nativeIsLittleEndian);
+ expected[index] = v;
+
+ assertEq(actual[index], expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ let setter = "set" + typeName(TA);
+
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`.replaceAll("setElem", setter))();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/debug/Debugger-findScripts-26.js b/js/src/jit-test/tests/debug/Debugger-findScripts-26.js
index 1c1510aa4f..57709782ab 100644
--- a/js/src/jit-test/tests/debug/Debugger-findScripts-26.js
+++ b/js/src/jit-test/tests/debug/Debugger-findScripts-26.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
var g = newGlobal({newCompartment: true});
var dbg = new Debugger();
var gw = dbg.addDebuggee(g);
diff --git a/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-18.js b/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-18.js
index 6ab17714e6..781e300f93 100644
--- a/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-18.js
+++ b/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-18.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
// Test drainAllocationsLog() entries' inNursery flag.
gczeal(0);
diff --git a/js/src/jit-test/tests/debug/Memory-takeCensus-06.js b/js/src/jit-test/tests/debug/Memory-takeCensus-06.js
index 02f8de30be..9a49140638 100644
--- a/js/src/jit-test/tests/debug/Memory-takeCensus-06.js
+++ b/js/src/jit-test/tests/debug/Memory-takeCensus-06.js
@@ -106,3 +106,29 @@ Pattern({
other: { by: 'count', label: 'other' }
}
}));
+
+try {
+ const breakdown = { by: "objectClass" };
+ breakdown.then = breakdown;
+ dbg.memory.takeCensus({ breakdown });
+ assertEq(true, false, "should not reach here");
+} catch (e) {
+ assertEq(e.message, "takeCensus breakdown 'by' value nested within itself: \"objectClass\"");
+}
+
+try {
+ const breakdown = { by: "objectClass", then: { by: "objectClass" } };
+ dbg.memory.takeCensus({ breakdown });
+ assertEq(true, false, "should not reach here");
+} catch (e) {
+ assertEq(e.message, "takeCensus breakdown 'by' value nested within itself: \"objectClass\"");
+}
+
+try {
+ const breakdown = { by: "coarseType", scripts: { by: "filename" } };
+ breakdown.scripts.noFilename = breakdown;
+ dbg.memory.takeCensus({ breakdown });
+ assertEq(true, false, "should not reach here");
+} catch (e) {
+ assertEq(e.message, "takeCensus breakdown 'by' value nested within itself: \"coarseType\"");
+}
diff --git a/js/src/jit-test/tests/debug/Object-getPromiseReactions-07.js b/js/src/jit-test/tests/debug/Object-getPromiseReactions-07.js
new file mode 100644
index 0000000000..a79b2dc2ef
--- /dev/null
+++ b/js/src/jit-test/tests/debug/Object-getPromiseReactions-07.js
@@ -0,0 +1,14 @@
+async function f(arg) {
+ await arg;
+
+ const g = newGlobal({ sameZoneAs: {} });
+ const dbg = g.Debugger({});
+ const promise = dbg.getNewestFrame().asyncPromise;
+ dbg.removeAllDebuggees();
+
+ // getPromiseReactions should return an empty array after removing debuggee.
+ assertEq(promise.getPromiseReactions().length, 0);
+}
+
+const p = f();
+f(p);
diff --git a/js/src/jit-test/tests/debug/Object-isSameNativeWithJitInfo.js b/js/src/jit-test/tests/debug/Object-isSameNativeWithJitInfo.js
new file mode 100644
index 0000000000..648847ae49
--- /dev/null
+++ b/js/src/jit-test/tests/debug/Object-isSameNativeWithJitInfo.js
@@ -0,0 +1,32 @@
+var g = newGlobal({newCompartment: true});
+var dbg = Debugger(g);
+var gdbg = dbg.addDebuggee(g);
+
+assertEq(gdbg.getProperty("print").return.isSameNativeWithJitInfo(print), true);
+assertEq(gdbg.getProperty("print").return.isSameNativeWithJitInfo(newGlobal), false);
+
+// FakeDOMObject's accessor shares the single native functions, with
+// different JSJitInfo for each.
+
+gdbg.executeInGlobal(`
+var fun1 = Object.getOwnPropertyDescriptor(FakeDOMObject.prototype, "x").get;
+var fun2 = Object.getOwnPropertyDescriptor(FakeDOMObject.prototype, "slot").get;
+`);
+
+var g_fun1 = gdbg.executeInGlobal("fun1").return;
+var g_fun2 = gdbg.executeInGlobal("fun2").return;
+
+var fun1 = Object.getOwnPropertyDescriptor(FakeDOMObject.prototype, "x").get;
+var fun2 = Object.getOwnPropertyDescriptor(FakeDOMObject.prototype, "slot").get;
+
+// isSameNative doesn't distinguish between fun1 and fun2.
+assertEq(g_fun1.isSameNative(fun1), true);
+assertEq(g_fun1.isSameNative(fun2), true);
+assertEq(g_fun2.isSameNative(fun1), true);
+assertEq(g_fun2.isSameNative(fun2), true);
+
+// isSameNativeWithJitInfo can distinguish between fun1 and fun2.
+assertEq(g_fun1.isSameNativeWithJitInfo(fun1), true);
+assertEq(g_fun1.isSameNativeWithJitInfo(fun2), false);
+assertEq(g_fun2.isSameNativeWithJitInfo(fun1), false);
+assertEq(g_fun2.isSameNativeWithJitInfo(fun2), true);
diff --git a/js/src/jit-test/tests/debug/breakpoint-oom-01.js b/js/src/jit-test/tests/debug/breakpoint-oom-01.js
index 4fd709ccd0..cbbbd7ffda 100644
--- a/js/src/jit-test/tests/debug/breakpoint-oom-01.js
+++ b/js/src/jit-test/tests/debug/breakpoint-oom-01.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this)
+// |jit-test| skip-if: !hasFunction.oomTest
// Test for OOM hitting a breakpoint in a generator.
//
diff --git a/js/src/jit-test/tests/debug/bug-1238610.js b/js/src/jit-test/tests/debug/bug-1238610.js
index 91562443bd..79fe5b3c2c 100644
--- a/js/src/jit-test/tests/debug/bug-1238610.js
+++ b/js/src/jit-test/tests/debug/bug-1238610.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this) || helperThreadCount() === 0
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations || helperThreadCount() === 0
lfcode = new Array();
dbg = Debugger();
diff --git a/js/src/jit-test/tests/debug/bug-1248162.js b/js/src/jit-test/tests/debug/bug-1248162.js
index 825b3376e4..6deadfdc07 100644
--- a/js/src/jit-test/tests/debug/bug-1248162.js
+++ b/js/src/jit-test/tests/debug/bug-1248162.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
// Adapted from randomly chosen test: js/src/jit-test/tests/debug/Debugger-onNewGlobalObject-01.js
for (var i = 0; i < 9; ++i) {
diff --git a/js/src/jit-test/tests/debug/bug-1260725.js b/js/src/jit-test/tests/debug/bug-1260725.js
index ce1c263f6a..7f8895e589 100644
--- a/js/src/jit-test/tests/debug/bug-1260725.js
+++ b/js/src/jit-test/tests/debug/bug-1260725.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var dbg = new Debugger;
dbg.onNewGlobalObject = function(global) {
dbg.memory.takeCensus({});
diff --git a/js/src/jit-test/tests/debug/bug-1565275.js b/js/src/jit-test/tests/debug/bug-1565275.js
index 242bce3a85..9e600162a4 100644
--- a/js/src/jit-test/tests/debug/bug-1565275.js
+++ b/js/src/jit-test/tests/debug/bug-1565275.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
Object.defineProperty(this, "fuzzutils", {
value: {
evaluate: function() {},
diff --git a/js/src/jit-test/tests/debug/bug-1576862-2.js b/js/src/jit-test/tests/debug/bug-1576862-2.js
index 6f7c31e98a..e1922f4a19 100644
--- a/js/src/jit-test/tests/debug/bug-1576862-2.js
+++ b/js/src/jit-test/tests/debug/bug-1576862-2.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
// Failure to rewrap an exception in Completion::fromJSResult should be propagated.
var dbgGlobal = newGlobal({ newCompartment: true });
diff --git a/js/src/jit-test/tests/debug/bug-1584195.js b/js/src/jit-test/tests/debug/bug-1584195.js
index 93aca7291d..dd74597b1b 100644
--- a/js/src/jit-test/tests/debug/bug-1584195.js
+++ b/js/src/jit-test/tests/debug/bug-1584195.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
// Bug 1584195: Debugger.Frame finalizer should't try to apply
// IsAboutToBeFinalized to cells of other alloc kinds, whose arenas may have
// been turned over to fresh allocations.
diff --git a/js/src/jit-test/tests/debug/bug1216261.js b/js/src/jit-test/tests/debug/bug1216261.js
index 0d98327256..6051d69c95 100644
--- a/js/src/jit-test/tests/debug/bug1216261.js
+++ b/js/src/jit-test/tests/debug/bug1216261.js
@@ -1,4 +1,4 @@
-// |jit-test| exitstatus: 3; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| exitstatus: 3
var g = newGlobal();
var dbg = new Debugger(g);
diff --git a/js/src/jit-test/tests/debug/bug1219905.js b/js/src/jit-test/tests/debug/bug1219905.js
index 5ed51a2423..5e5595a4e7 100644
--- a/js/src/jit-test/tests/debug/bug1219905.js
+++ b/js/src/jit-test/tests/debug/bug1219905.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
// We need allow-oom here because the debugger reports an uncaught exception if
// it hits OOM calling the exception unwind hook. This causes the shell to exit
diff --git a/js/src/jit-test/tests/debug/bug1240546.js b/js/src/jit-test/tests/debug/bug1240546.js
index 6d548d8b92..797af37988 100644
--- a/js/src/jit-test/tests/debug/bug1240546.js
+++ b/js/src/jit-test/tests/debug/bug1240546.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
var g = newGlobal();
g.debuggeeGlobal = this;
diff --git a/js/src/jit-test/tests/debug/bug1240803.js b/js/src/jit-test/tests/debug/bug1240803.js
index ab1e0fb641..74eb34ed61 100644
--- a/js/src/jit-test/tests/debug/bug1240803.js
+++ b/js/src/jit-test/tests/debug/bug1240803.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
(function() {
g = newGlobal({newCompartment: true})
diff --git a/js/src/jit-test/tests/debug/bug1242111.js b/js/src/jit-test/tests/debug/bug1242111.js
index dae0efcdab..ebfc2eec70 100644
--- a/js/src/jit-test/tests/debug/bug1242111.js
+++ b/js/src/jit-test/tests/debug/bug1242111.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
var g = newGlobal();
g.debuggeeGlobal = [];
diff --git a/js/src/jit-test/tests/debug/bug1245862.js b/js/src/jit-test/tests/debug/bug1245862.js
index 274903bc40..83793a7306 100644
--- a/js/src/jit-test/tests/debug/bug1245862.js
+++ b/js/src/jit-test/tests/debug/bug1245862.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
var g = newGlobal({newCompartment: true});
var dbg = new Debugger;
diff --git a/js/src/jit-test/tests/debug/bug1251919.js b/js/src/jit-test/tests/debug/bug1251919.js
index 79fd05f890..9e5ccac82f 100644
--- a/js/src/jit-test/tests/debug/bug1251919.js
+++ b/js/src/jit-test/tests/debug/bug1251919.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// jsfunfuzz-generated
fullcompartmentchecks(true);
// Adapted from randomly chosen test: js/src/jit-test/tests/debug/bug-1248162.js
diff --git a/js/src/jit-test/tests/debug/bug1254123.js b/js/src/jit-test/tests/debug/bug1254123.js
index 72f36ef3ec..c96753b305 100644
--- a/js/src/jit-test/tests/debug/bug1254123.js
+++ b/js/src/jit-test/tests/debug/bug1254123.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
evaluate(`
function ERROR(msg) {
throw new Error("boom");
diff --git a/js/src/jit-test/tests/debug/bug1254190.js b/js/src/jit-test/tests/debug/bug1254190.js
index 3d3572b469..74a4799c39 100644
--- a/js/src/jit-test/tests/debug/bug1254190.js
+++ b/js/src/jit-test/tests/debug/bug1254190.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this); allow-oom
+// |jit-test| slow; allow-oom
var g = newGlobal({newCompartment: true});
var dbg = new Debugger(g);
diff --git a/js/src/jit-test/tests/debug/bug1254578.js b/js/src/jit-test/tests/debug/bug1254578.js
index f36bcef601..9592adb67a 100644
--- a/js/src/jit-test/tests/debug/bug1254578.js
+++ b/js/src/jit-test/tests/debug/bug1254578.js
@@ -1,5 +1,3 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
-
var g = newGlobal({newCompartment: true});
g.debuggeeGlobal = this;
g.eval("(" + function() {
diff --git a/js/src/jit-test/tests/debug/bug1264961.js b/js/src/jit-test/tests/debug/bug1264961.js
index c43a29504d..2047768aeb 100644
--- a/js/src/jit-test/tests/debug/bug1264961.js
+++ b/js/src/jit-test/tests/debug/bug1264961.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
loadFile(`
var o = {}
diff --git a/js/src/jit-test/tests/debug/bug1272908.js b/js/src/jit-test/tests/debug/bug1272908.js
index b1e1c5aeaf..397de9a8eb 100644
--- a/js/src/jit-test/tests/debug/bug1272908.js
+++ b/js/src/jit-test/tests/debug/bug1272908.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
// Adapted from randomly chosen test: js/src/jit-test/tests/modules/bug-1233915.js
g = newGlobal({newCompartment: true});
diff --git a/js/src/jit-test/tests/debug/bug1370905.js b/js/src/jit-test/tests/debug/bug1370905.js
index 8f8143132e..7237e55420 100644
--- a/js/src/jit-test/tests/debug/bug1370905.js
+++ b/js/src/jit-test/tests/debug/bug1370905.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
function x() {
var global = newGlobal({sameZoneAs: this});
diff --git a/js/src/jit-test/tests/debug/bug1404710.js b/js/src/jit-test/tests/debug/bug1404710.js
index 78a8bbb5b8..69aa078333 100644
--- a/js/src/jit-test/tests/debug/bug1404710.js
+++ b/js/src/jit-test/tests/debug/bug1404710.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
stackTest(new Function(`
var g = newGlobal();
var dbg = new Debugger(g);
diff --git a/js/src/jit-test/tests/debug/bug1434391.js b/js/src/jit-test/tests/debug/bug1434391.js
index 86efe7970b..d3231b6e15 100644
--- a/js/src/jit-test/tests/debug/bug1434391.js
+++ b/js/src/jit-test/tests/debug/bug1434391.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var g = newGlobal({newCompartment: true});
var dbg = new Debugger();
var gw = dbg.addDebuggee(g);
diff --git a/js/src/jit-test/tests/debug/bug1647309.js b/js/src/jit-test/tests/debug/bug1647309.js
index e4ffcfc349..520e505b5c 100644
--- a/js/src/jit-test/tests/debug/bug1647309.js
+++ b/js/src/jit-test/tests/debug/bug1647309.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
const g = newGlobal({ newCompartment: true });
const dbg = new Debugger(g);
diff --git a/js/src/jit-test/tests/debug/bug1878511.js b/js/src/jit-test/tests/debug/bug1878511.js
index f9143b9961..a6a550c4b7 100644
--- a/js/src/jit-test/tests/debug/bug1878511.js
+++ b/js/src/jit-test/tests/debug/bug1878511.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
var c = 0;
var dbg = new Debugger();
oomTest(function () {
diff --git a/js/src/jit-test/tests/debug/job-queue-04.js b/js/src/jit-test/tests/debug/job-queue-04.js
index 76cf241e8d..040f5874c0 100644
--- a/js/src/jit-test/tests/debug/job-queue-04.js
+++ b/js/src/jit-test/tests/debug/job-queue-04.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
// Bug 1527862: Don't assert that the Debugger drained its job queue unless we
// actually saved the debuggee's queue.
diff --git a/js/src/jit-test/tests/debug/wasm-14.js b/js/src/jit-test/tests/debug/wasm-14.js
index 023c16dca4..b47b540d72 100644
--- a/js/src/jit-test/tests/debug/wasm-14.js
+++ b/js/src/jit-test/tests/debug/wasm-14.js
@@ -1,4 +1,4 @@
-// |jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-function-references --wasm-gc; skip-if: !wasmDebuggingEnabled() || !wasmGcEnabled(); skip-if: true
+// |jit-test| test-also=--wasm-compiler=optimizing; test-also=--setpref=wasm_gc=true; skip-if: !wasmDebuggingEnabled() || !wasmGcEnabled(); skip-if: true
// An extension of wasm-10.js, testing that wasm GC objects are inspectable in locals.
// As of bug 1825098, this test is disabled. (skip-if: true)
diff --git a/js/src/jit-test/tests/debug/wasm-15.js b/js/src/jit-test/tests/debug/wasm-15.js
index 90b1fbea3b..c2af3bdc17 100644
--- a/js/src/jit-test/tests/debug/wasm-15.js
+++ b/js/src/jit-test/tests/debug/wasm-15.js
@@ -63,7 +63,7 @@ wasmRunWithDebugger(
);
// Checking if enter/leave frame at return_call_ref.
-wasmFunctionReferencesEnabled() && wasmRunWithDebugger(
+wasmGcEnabled() && wasmRunWithDebugger(
'(module (type $t (func)) (elem declare func 0) (func) (func (return_call_ref $t (ref.func 0))) (func (call 1)) (export "test" (func 2)))',
undefined,
function ({dbg}) {
diff --git a/js/src/jit-test/tests/fields/private-proxy-oom.js b/js/src/jit-test/tests/fields/private-proxy-oom.js
index dd5200d9a6..ff6bf4632b 100644
--- a/js/src/jit-test/tests/fields/private-proxy-oom.js
+++ b/js/src/jit-test/tests/fields/private-proxy-oom.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this);
// Check for proxy expando OOM issues.
function assertThrowsTypeError(f) {
@@ -45,4 +44,4 @@ function testing() {
assertThrowsTypeError(() => A.gf(target));
}
-oomTest(testing); \ No newline at end of file
+oomTest(testing);
diff --git a/js/src/jit-test/tests/fuses/with.js b/js/src/jit-test/tests/fuses/with.js
new file mode 100644
index 0000000000..8a76e5c09b
--- /dev/null
+++ b/js/src/jit-test/tests/fuses/with.js
@@ -0,0 +1,9 @@
+let iterProto = [].values().__proto__
+with (newGlobal()) {
+ const v3 = [].values();
+ Object.defineProperty(v3.__proto__, "return", {});
+ const v18 = [];
+ for (let i = 0; i < 500; i++) {
+ [] = v18;
+ }
+}
diff --git a/js/src/jit-test/tests/gc/bug-1108007.js b/js/src/jit-test/tests/gc/bug-1108007.js
index 35daaefc91..487b765c87 100644
--- a/js/src/jit-test/tests/gc/bug-1108007.js
+++ b/js/src/jit-test/tests/gc/bug-1108007.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-threads; --no-ion; --no-baseline; skip-if: !('gczeal' in this)
+// |jit-test| --no-threads; --no-ion; --no-baseline
gczeal(2);
(function() {
diff --git a/js/src/jit-test/tests/gc/bug-1155455.js b/js/src/jit-test/tests/gc/bug-1155455.js
index c9b8040884..47b2912804 100644
--- a/js/src/jit-test/tests/gc/bug-1155455.js
+++ b/js/src/jit-test/tests/gc/bug-1155455.js
@@ -1,4 +1,4 @@
-// |jit-test| error: TypeError; skip-if: !('gczeal' in this)
+// |jit-test| error: TypeError
var g = newGlobal();
gczeal(10, 2)
var dbg = Debugger(g);
diff --git a/js/src/jit-test/tests/gc/bug-1161968.js b/js/src/jit-test/tests/gc/bug-1161968.js
index 5f83aa510c..dc78749cd5 100644
--- a/js/src/jit-test/tests/gc/bug-1161968.js
+++ b/js/src/jit-test/tests/gc/bug-1161968.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
// This test case is a simplified version of debug/Source-invisible.js.
gczeal(2,21);
diff --git a/js/src/jit-test/tests/gc/bug-1165966.js b/js/src/jit-test/tests/gc/bug-1165966.js
index 79c58274cd..8b0bca873c 100644
--- a/js/src/jit-test/tests/gc/bug-1165966.js
+++ b/js/src/jit-test/tests/gc/bug-1165966.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-ion; skip-if: !('oomTest' in this)
+// |jit-test| --no-ion
var g = newGlobal();
oomTest(function() {
diff --git a/js/src/jit-test/tests/gc/bug-1171909.js b/js/src/jit-test/tests/gc/bug-1171909.js
index 755c6ff89d..9ad1767016 100644
--- a/js/src/jit-test/tests/gc/bug-1171909.js
+++ b/js/src/jit-test/tests/gc/bug-1171909.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest((function(x) { assertEq(x + y + ex, 25); }));
diff --git a/js/src/jit-test/tests/gc/bug-1175755.js b/js/src/jit-test/tests/gc/bug-1175755.js
index 1b6960fb8c..3c07184fe8 100644
--- a/js/src/jit-test/tests/gc/bug-1175755.js
+++ b/js/src/jit-test/tests/gc/bug-1175755.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; allow-unhandlable-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; allow-unhandlable-oom
setGCCallback({
action: "majorGC",
diff --git a/js/src/jit-test/tests/gc/bug-1191576.js b/js/src/jit-test/tests/gc/bug-1191576.js
index 6346905256..ba74bfe142 100644
--- a/js/src/jit-test/tests/gc/bug-1191576.js
+++ b/js/src/jit-test/tests/gc/bug-1191576.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('gczeal' in this && 'oomAfterAllocations' in this)
+// |jit-test| allow-oom
var lfcode = new Array();
gczeal(14);
diff --git a/js/src/jit-test/tests/gc/bug-1206677.js b/js/src/jit-test/tests/gc/bug-1206677.js
index a0d2ff3a1f..0052ef0ad1 100644
--- a/js/src/jit-test/tests/gc/bug-1206677.js
+++ b/js/src/jit-test/tests/gc/bug-1206677.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
var lfGlobal = newGlobal();
for (lfLocal in this) {
diff --git a/js/src/jit-test/tests/gc/bug-1208994.js b/js/src/jit-test/tests/gc/bug-1208994.js
index 12c24f62af..4f28b0c0eb 100644
--- a/js/src/jit-test/tests/gc/bug-1208994.js
+++ b/js/src/jit-test/tests/gc/bug-1208994.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => getBacktrace({args: oomTest[load+1], locals: true, thisprops: true}));
diff --git a/js/src/jit-test/tests/gc/bug-1209001.js b/js/src/jit-test/tests/gc/bug-1209001.js
index a737224d0d..3dfb087156 100644
--- a/js/src/jit-test/tests/gc/bug-1209001.js
+++ b/js/src/jit-test/tests/gc/bug-1209001.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => parseModule('import v from "mod";'));
diff --git a/js/src/jit-test/tests/gc/bug-1210607.js b/js/src/jit-test/tests/gc/bug-1210607.js
index 15312c810a..d304f24d7d 100644
--- a/js/src/jit-test/tests/gc/bug-1210607.js
+++ b/js/src/jit-test/tests/gc/bug-1210607.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom
var g = newGlobal({newCompartment: true});
x = Debugger(g);
diff --git a/js/src/jit-test/tests/gc/bug-1214006.js b/js/src/jit-test/tests/gc/bug-1214006.js
index ed2c6468dc..3b150fbaed 100644
--- a/js/src/jit-test/tests/gc/bug-1214006.js
+++ b/js/src/jit-test/tests/gc/bug-1214006.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
function f() {
eval("(function() y)()");
diff --git a/js/src/jit-test/tests/gc/bug-1214781.js b/js/src/jit-test/tests/gc/bug-1214781.js
index d18845812c..b8a5bb87bb 100644
--- a/js/src/jit-test/tests/gc/bug-1214781.js
+++ b/js/src/jit-test/tests/gc/bug-1214781.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
try {
gcparam("maxBytes", gcparam("gcBytes"));
diff --git a/js/src/jit-test/tests/gc/bug-1214846.js b/js/src/jit-test/tests/gc/bug-1214846.js
index 23b5b9fe94..4ba8b3a78f 100644
--- a/js/src/jit-test/tests/gc/bug-1214846.js
+++ b/js/src/jit-test/tests/gc/bug-1214846.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: !hasFunction.oomTest || helperThreadCount() === 0
enableGeckoProfiling();
var s = newGlobal();
diff --git a/js/src/jit-test/tests/gc/bug-1215363-1.js b/js/src/jit-test/tests/gc/bug-1215363-1.js
index 3ed21e1f9a..fe9eb8c9a2 100644
--- a/js/src/jit-test/tests/gc/bug-1215363-1.js
+++ b/js/src/jit-test/tests/gc/bug-1215363-1.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => parseModule(10));
diff --git a/js/src/jit-test/tests/gc/bug-1215363-2.js b/js/src/jit-test/tests/gc/bug-1215363-2.js
index 4b51a5a96d..afe0af7ac1 100644
--- a/js/src/jit-test/tests/gc/bug-1215363-2.js
+++ b/js/src/jit-test/tests/gc/bug-1215363-2.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var lfcode = new Array();
oomTest((function(x) {
assertEq(...Object);
diff --git a/js/src/jit-test/tests/gc/bug-1215363-3.js b/js/src/jit-test/tests/gc/bug-1215363-3.js
index 33495af2e1..0022507763 100644
--- a/js/src/jit-test/tests/gc/bug-1215363-3.js
+++ b/js/src/jit-test/tests/gc/bug-1215363-3.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var lfcode = new Array();
oomTest(() => getBacktrace({}));
diff --git a/js/src/jit-test/tests/gc/bug-1216607.js b/js/src/jit-test/tests/gc/bug-1216607.js
index 1afac7faab..46530c113c 100644
--- a/js/src/jit-test/tests/gc/bug-1216607.js
+++ b/js/src/jit-test/tests/gc/bug-1216607.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
enableGeckoProfilingWithSlowAssertions();
try {
(function() {
diff --git a/js/src/jit-test/tests/gc/bug-1221359.js b/js/src/jit-test/tests/gc/bug-1221359.js
index dcbafeb446..96b323edd0 100644
--- a/js/src/jit-test/tests/gc/bug-1221359.js
+++ b/js/src/jit-test/tests/gc/bug-1221359.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => getBacktrace({
locals: true,
thisprops: true
diff --git a/js/src/jit-test/tests/gc/bug-1221747.js b/js/src/jit-test/tests/gc/bug-1221747.js
index 5ff33dd64e..12d31df2fb 100644
--- a/js/src/jit-test/tests/gc/bug-1221747.js
+++ b/js/src/jit-test/tests/gc/bug-1221747.js
@@ -1,4 +1,4 @@
-// |jit-test| --dump-bytecode; skip-if: !('oomTest' in this)
+// |jit-test| --dump-bytecode
function f() {
eval("(function() {})()");
diff --git a/js/src/jit-test/tests/gc/bug-1223021.js b/js/src/jit-test/tests/gc/bug-1223021.js
index bbc40aa1fb..a5a85f3104 100644
--- a/js/src/jit-test/tests/gc/bug-1223021.js
+++ b/js/src/jit-test/tests/gc/bug-1223021.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function f() {
return this === null;
};
diff --git a/js/src/jit-test/tests/gc/bug-1224710.js b/js/src/jit-test/tests/gc/bug-1224710.js
index 9cb9aa7cd3..68809d7f36 100644
--- a/js/src/jit-test/tests/gc/bug-1224710.js
+++ b/js/src/jit-test/tests/gc/bug-1224710.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
eval("\
function g() {\
diff --git a/js/src/jit-test/tests/gc/bug-1226896.js b/js/src/jit-test/tests/gc/bug-1226896.js
index 0b8c513cc8..8ecf66fd59 100644
--- a/js/src/jit-test/tests/gc/bug-1226896.js
+++ b/js/src/jit-test/tests/gc/bug-1226896.js
@@ -1,4 +1,4 @@
-// |jit-test| --ion-pruning=on; skip-if: !('oomTest' in this)
+// |jit-test| --ion-pruning=on
oomTest(() => {
var g = newGlobal({sameZoneAs: this});
diff --git a/js/src/jit-test/tests/gc/bug-1231386.js b/js/src/jit-test/tests/gc/bug-1231386.js
index c2dc55b734..01bbee81cf 100644
--- a/js/src/jit-test/tests/gc/bug-1231386.js
+++ b/js/src/jit-test/tests/gc/bug-1231386.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
function f1() {}
function f2() {}
diff --git a/js/src/jit-test/tests/gc/bug-1232386.js b/js/src/jit-test/tests/gc/bug-1232386.js
index 82a33a7ec4..b768176cf0 100644
--- a/js/src/jit-test/tests/gc/bug-1232386.js
+++ b/js/src/jit-test/tests/gc/bug-1232386.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
var dbg = new Debugger;
dbg.onNewGlobalObject = function(global) {
diff --git a/js/src/jit-test/tests/gc/bug-1234410.js b/js/src/jit-test/tests/gc/bug-1234410.js
index fe400f8013..79623a0a43 100644
--- a/js/src/jit-test/tests/gc/bug-1234410.js
+++ b/js/src/jit-test/tests/gc/bug-1234410.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
enableGeckoProfiling();
oomTest(() => {
try {
diff --git a/js/src/jit-test/tests/gc/bug-1236473.js b/js/src/jit-test/tests/gc/bug-1236473.js
index 0051e789a6..cef0bd1d25 100644
--- a/js/src/jit-test/tests/gc/bug-1236473.js
+++ b/js/src/jit-test/tests/gc/bug-1236473.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
offThreadCompileToStencil(`try {} catch (NaN) {}`);
var stencil = finishOffThreadStencil();
diff --git a/js/src/jit-test/tests/gc/bug-1238555.js b/js/src/jit-test/tests/gc/bug-1238555.js
index 4b9963292e..f9e139a895 100644
--- a/js/src/jit-test/tests/gc/bug-1238555.js
+++ b/js/src/jit-test/tests/gc/bug-1238555.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(
function x() {
try {
diff --git a/js/src/jit-test/tests/gc/bug-1238575-2.js b/js/src/jit-test/tests/gc/bug-1238575-2.js
index 9fe011efa1..0c715d3998 100644
--- a/js/src/jit-test/tests/gc/bug-1238575-2.js
+++ b/js/src/jit-test/tests/gc/bug-1238575-2.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => evalInWorker("1"));
diff --git a/js/src/jit-test/tests/gc/bug-1238575.js b/js/src/jit-test/tests/gc/bug-1238575.js
index 8e6a629d9f..fbce26ff52 100644
--- a/js/src/jit-test/tests/gc/bug-1238575.js
+++ b/js/src/jit-test/tests/gc/bug-1238575.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; allow-unhandlable-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; allow-unhandlable-oom; skip-if: helperThreadCount() === 0
oomAfterAllocations(5)
gcslice(11);
diff --git a/js/src/jit-test/tests/gc/bug-1238582.js b/js/src/jit-test/tests/gc/bug-1238582.js
index b5dad7a64d..9b1a267627 100644
--- a/js/src/jit-test/tests/gc/bug-1238582.js
+++ b/js/src/jit-test/tests/gc/bug-1238582.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => { let a = [2147483651]; [a[0], a[undefined]].sort(); });
diff --git a/js/src/jit-test/tests/gc/bug-1240503.js b/js/src/jit-test/tests/gc/bug-1240503.js
index 167752962b..cdb93a0929 100644
--- a/js/src/jit-test/tests/gc/bug-1240503.js
+++ b/js/src/jit-test/tests/gc/bug-1240503.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function arrayProtoOutOfRange() {
for (let [] = () => r, get;;)
var r = f(i % 2 ? a : b);
diff --git a/js/src/jit-test/tests/gc/bug-1240527.js b/js/src/jit-test/tests/gc/bug-1240527.js
index ca4e0e3eb6..1621fa5764 100644
--- a/js/src/jit-test/tests/gc/bug-1240527.js
+++ b/js/src/jit-test/tests/gc/bug-1240527.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: helperThreadCount() === 0 || !('oomTest' in this)
+// |jit-test| skip-if: helperThreadCount() === 0
offThreadCompileToStencil(`
oomTest(() => "".search(/d/));
diff --git a/js/src/jit-test/tests/gc/bug-1241731.js b/js/src/jit-test/tests/gc/bug-1241731.js
index 015c7f3e67..320dece892 100644
--- a/js/src/jit-test/tests/gc/bug-1241731.js
+++ b/js/src/jit-test/tests/gc/bug-1241731.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => serialize(0, [{}]));
diff --git a/js/src/jit-test/tests/gc/bug-1242812.js b/js/src/jit-test/tests/gc/bug-1242812.js
index df4ae09998..b685d17928 100644
--- a/js/src/jit-test/tests/gc/bug-1242812.js
+++ b/js/src/jit-test/tests/gc/bug-1242812.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var lfcode = new Array();
oomTest(() => { let a = [2147483651]; [-1, 0, 1, 31, 32].sort(); });
diff --git a/js/src/jit-test/tests/gc/bug-1245520.js b/js/src/jit-test/tests/gc/bug-1245520.js
index 1f59c3dbab..d951a071ac 100644
--- a/js/src/jit-test/tests/gc/bug-1245520.js
+++ b/js/src/jit-test/tests/gc/bug-1245520.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var t = {};
oomTest(() => serialize(t));
diff --git a/js/src/jit-test/tests/gc/bug-1252329.js b/js/src/jit-test/tests/gc/bug-1252329.js
index 8f5b1ce282..100a1817cf 100644
--- a/js/src/jit-test/tests/gc/bug-1252329.js
+++ b/js/src/jit-test/tests/gc/bug-1252329.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: helperThreadCount() == 0 || !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: helperThreadCount() == 0 || !hasFunction.oomAfterAllocations
var lfcode = new Array();
lfcode.push("5");
diff --git a/js/src/jit-test/tests/gc/bug-1253124.js b/js/src/jit-test/tests/gc/bug-1253124.js
index 6949605b00..3302e120fb 100644
--- a/js/src/jit-test/tests/gc/bug-1253124.js
+++ b/js/src/jit-test/tests/gc/bug-1253124.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
for (let i = 0; i < 10; i++)
toPrimitive = Date.prototype[Symbol.toPrimitive];
assertThrowsInstanceOf(() => 0);
diff --git a/js/src/jit-test/tests/gc/bug-1259306.js b/js/src/jit-test/tests/gc/bug-1259306.js
index fba5f71b6a..bb92bb43f5 100644
--- a/js/src/jit-test/tests/gc/bug-1259306.js
+++ b/js/src/jit-test/tests/gc/bug-1259306.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
let runCount = 0;
oomTest(() => {
if (runCount < 5) {
diff --git a/js/src/jit-test/tests/gc/bug-1261329.js b/js/src/jit-test/tests/gc/bug-1261329.js
index afa1db2c3d..35015917e4 100644
--- a/js/src/jit-test/tests/gc/bug-1261329.js
+++ b/js/src/jit-test/tests/gc/bug-1261329.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this)
+// |jit-test| skip-if: !hasFunction.oomTest
print = function() {}
function k() { return dissrc(print); }
diff --git a/js/src/jit-test/tests/gc/bug-1263862.js b/js/src/jit-test/tests/gc/bug-1263862.js
index 955805047a..1b904d34c7 100644
--- a/js/src/jit-test/tests/gc/bug-1263862.js
+++ b/js/src/jit-test/tests/gc/bug-1263862.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function loadFile(lfVarx) {
oomTest(() => eval(lfVarx));
}
diff --git a/js/src/jit-test/tests/gc/bug-1263871.js b/js/src/jit-test/tests/gc/bug-1263871.js
index 6680affedf..92e6d3645b 100644
--- a/js/src/jit-test/tests/gc/bug-1263871.js
+++ b/js/src/jit-test/tests/gc/bug-1263871.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
lfLogBuffer = `this[''] = function() {}`;
loadFile(lfLogBuffer);
loadFile(lfLogBuffer);
diff --git a/js/src/jit-test/tests/gc/bug-1263884.js b/js/src/jit-test/tests/gc/bug-1263884.js
index 949945e0a4..d203c06354 100644
--- a/js/src/jit-test/tests/gc/bug-1263884.js
+++ b/js/src/jit-test/tests/gc/bug-1263884.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
eval(`
var argObj = function () { return arguments }()
diff --git a/js/src/jit-test/tests/gc/bug-1271110.js b/js/src/jit-test/tests/gc/bug-1271110.js
index 12d1617c57..49a849045d 100644
--- a/js/src/jit-test/tests/gc/bug-1271110.js
+++ b/js/src/jit-test/tests/gc/bug-1271110.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
gczeal(0);
var x1 = [];
diff --git a/js/src/jit-test/tests/gc/bug-1280588.js b/js/src/jit-test/tests/gc/bug-1280588.js
index a6b2c4f075..b3391e6eef 100644
--- a/js/src/jit-test/tests/gc/bug-1280588.js
+++ b/js/src/jit-test/tests/gc/bug-1280588.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var x = [];
oomTest(() => setGCCallback({ action: "minorGC" }));
oomTest(() => setGCCallback({ action: "majorGC" }));
diff --git a/js/src/jit-test/tests/gc/bug-1282986.js b/js/src/jit-test/tests/gc/bug-1282986.js
index 934cea5b61..215b2190af 100644
--- a/js/src/jit-test/tests/gc/bug-1282986.js
+++ b/js/src/jit-test/tests/gc/bug-1282986.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var lfLogBuffer = `
evalInWorker(\`
try { oomAfterAllocations(2); } catch(e) {}
diff --git a/js/src/jit-test/tests/gc/bug-1287399.js b/js/src/jit-test/tests/gc/bug-1287399.js
index c7e6b8f44d..c7345954f6 100644
--- a/js/src/jit-test/tests/gc/bug-1287399.js
+++ b/js/src/jit-test/tests/gc/bug-1287399.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: typeof gczeal !== 'function' || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
var lfGlobal = newGlobal();
gczeal(4);
diff --git a/js/src/jit-test/tests/gc/bug-1287869.js b/js/src/jit-test/tests/gc/bug-1287869.js
index dc04345ccf..7175342655 100644
--- a/js/src/jit-test/tests/gc/bug-1287869.js
+++ b/js/src/jit-test/tests/gc/bug-1287869.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
gczeal(16);
let a = [];
for (let i = 0; i < 1000; i++)
diff --git a/js/src/jit-test/tests/gc/bug-1292564.js b/js/src/jit-test/tests/gc/bug-1292564.js
index f292e1682c..56918093f8 100644
--- a/js/src/jit-test/tests/gc/bug-1292564.js
+++ b/js/src/jit-test/tests/gc/bug-1292564.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
oomTest(() => {
let global = newGlobal({sameZoneAs: this});
diff --git a/js/src/jit-test/tests/gc/bug-1298356.js b/js/src/jit-test/tests/gc/bug-1298356.js
index 4c8a213125..78a492649d 100644
--- a/js/src/jit-test/tests/gc/bug-1298356.js
+++ b/js/src/jit-test/tests/gc/bug-1298356.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
/x/;
oomTest(function(){
offThreadCompileToStencil('');
diff --git a/js/src/jit-test/tests/gc/bug-1303015.js b/js/src/jit-test/tests/gc/bug-1303015.js
index d5b5e3a5bd..021db0568d 100644
--- a/js/src/jit-test/tests/gc/bug-1303015.js
+++ b/js/src/jit-test/tests/gc/bug-1303015.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
var x = ``.split();
oomTest(function() {
diff --git a/js/src/jit-test/tests/gc/bug-1305220.js b/js/src/jit-test/tests/gc/bug-1305220.js
index b6dad199a9..0666530bb9 100644
--- a/js/src/jit-test/tests/gc/bug-1305220.js
+++ b/js/src/jit-test/tests/gc/bug-1305220.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
s = newGlobal();
evalcx("\
diff --git a/js/src/jit-test/tests/gc/bug-1310589.js b/js/src/jit-test/tests/gc/bug-1310589.js
index 2907c3c440..98b045050c 100644
--- a/js/src/jit-test/tests/gc/bug-1310589.js
+++ b/js/src/jit-test/tests/gc/bug-1310589.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
a = o = s = r = []
o2 = s2 = r2 = g2 = f2 = m2 = Map
e2 = Set
diff --git a/js/src/jit-test/tests/gc/bug-1315946.js b/js/src/jit-test/tests/gc/bug-1315946.js
index a0668d00b5..8e95a97d20 100644
--- a/js/src/jit-test/tests/gc/bug-1315946.js
+++ b/js/src/jit-test/tests/gc/bug-1315946.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// Don't run a full oomTest because it takes ages - a few iterations are
// sufficient to trigger the bug.
let i = 0;
diff --git a/js/src/jit-test/tests/gc/bug-1325551.js b/js/src/jit-test/tests/gc/bug-1325551.js
index 700f61daf8..8c4499a01e 100644
--- a/js/src/jit-test/tests/gc/bug-1325551.js
+++ b/js/src/jit-test/tests/gc/bug-1325551.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
let g = newGlobal({newCompartment: true});
let dbg = new Debugger;
let gw = dbg.addDebuggee(g);
diff --git a/js/src/jit-test/tests/gc/bug-1340010.js b/js/src/jit-test/tests/gc/bug-1340010.js
index 30c3cb08b9..22ceafce6f 100644
--- a/js/src/jit-test/tests/gc/bug-1340010.js
+++ b/js/src/jit-test/tests/gc/bug-1340010.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: helperThreadCount() === 0 || !('deterministicgc' in this)
+// |jit-test| skip-if: helperThreadCount() === 0
gczeal(0);
gc();
diff --git a/js/src/jit-test/tests/gc/bug-1384047.js b/js/src/jit-test/tests/gc/bug-1384047.js
index 4ec6a5272d..162af6af42 100644
--- a/js/src/jit-test/tests/gc/bug-1384047.js
+++ b/js/src/jit-test/tests/gc/bug-1384047.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
+// |jit-test| skip-if: !hasFunction.oomTest
newGlobal();
evalcx("oomTest(newGlobal);", newGlobal());
diff --git a/js/src/jit-test/tests/gc/bug-1401141.js b/js/src/jit-test/tests/gc/bug-1401141.js
index 6ed1bea611..2674f77c37 100644
--- a/js/src/jit-test/tests/gc/bug-1401141.js
+++ b/js/src/jit-test/tests/gc/bug-1401141.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('gczeal' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
gczeal(15,1);
setGCCallback({
diff --git a/js/src/jit-test/tests/gc/bug-1411302.js b/js/src/jit-test/tests/gc/bug-1411302.js
index 20c051edd9..510648b742 100644
--- a/js/src/jit-test/tests/gc/bug-1411302.js
+++ b/js/src/jit-test/tests/gc/bug-1411302.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
let lfPreamble = `
value:{
`;
diff --git a/js/src/jit-test/tests/gc/bug-1435295.js b/js/src/jit-test/tests/gc/bug-1435295.js
index 01214a6214..0f091b7d49 100644
--- a/js/src/jit-test/tests/gc/bug-1435295.js
+++ b/js/src/jit-test/tests/gc/bug-1435295.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: helperThreadCount() === 0 || !('oomTest' in this)
+// |jit-test| skip-if: helperThreadCount() === 0
oomTest(new Function(`function execOffThread(source) {
offThreadCompileModuleToStencil(source);
diff --git a/js/src/jit-test/tests/gc/bug-1449887.js b/js/src/jit-test/tests/gc/bug-1449887.js
index ef7fa45c7f..c5732b0723 100644
--- a/js/src/jit-test/tests/gc/bug-1449887.js
+++ b/js/src/jit-test/tests/gc/bug-1449887.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() { x, 0, { z: function() {} } });
diff --git a/js/src/jit-test/tests/gc/bug-1456536.js b/js/src/jit-test/tests/gc/bug-1456536.js
index adfe3f0c9b..b61f5ca09c 100644
--- a/js/src/jit-test/tests/gc/bug-1456536.js
+++ b/js/src/jit-test/tests/gc/bug-1456536.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(new Function(`let a = grayRoot();`));
diff --git a/js/src/jit-test/tests/gc/bug-1462337.js b/js/src/jit-test/tests/gc/bug-1462337.js
index 84a5392a1f..1eaafc4997 100644
--- a/js/src/jit-test/tests/gc/bug-1462337.js
+++ b/js/src/jit-test/tests/gc/bug-1462337.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
grayRoot().x = Object.create((obj[name]++));
});
diff --git a/js/src/jit-test/tests/gc/bug-1472734.js b/js/src/jit-test/tests/gc/bug-1472734.js
index f88f3af1c6..2d4432743d 100644
--- a/js/src/jit-test/tests/gc/bug-1472734.js
+++ b/js/src/jit-test/tests/gc/bug-1472734.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
try {
oomTest(function() {
diff --git a/js/src/jit-test/tests/gc/bug-1490042.js b/js/src/jit-test/tests/gc/bug-1490042.js
index b043f25486..937ae41ef3 100644
--- a/js/src/jit-test/tests/gc/bug-1490042.js
+++ b/js/src/jit-test/tests/gc/bug-1490042.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-ion; --no-baseline; --no-blinterp; skip-if: !('gcstate' in this && 'oomAfterAllocations' in this)
+// |jit-test| --no-ion; --no-baseline; --no-blinterp; skip-if: !('gcstate' in this && hasFunction.oomAfterAllocations)
gczeal(0);
diff --git a/js/src/jit-test/tests/gc/bug-1530643.js b/js/src/jit-test/tests/gc/bug-1530643.js
index c85e6267c6..fa61063ea9 100644
--- a/js/src/jit-test/tests/gc/bug-1530643.js
+++ b/js/src/jit-test/tests/gc/bug-1530643.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomAtAllocation' in this); error: Error
+// |jit-test| skip-if: !hasFunction.oomAtAllocation; error: Error
const THREAD_TYPE_WORKER = 10;
diff --git a/js/src/jit-test/tests/gc/bug-1556155.js b/js/src/jit-test/tests/gc/bug-1556155.js
index 3c0dd11251..9bf2648131 100644
--- a/js/src/jit-test/tests/gc/bug-1556155.js
+++ b/js/src/jit-test/tests/gc/bug-1556155.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
a = [];
minorgc();
Object.defineProperty(a, 12, {}).push(1);
diff --git a/js/src/jit-test/tests/gc/bug-1568119.js b/js/src/jit-test/tests/gc/bug-1568119.js
index 1aed85c325..ccba9e1a72 100644
--- a/js/src/jit-test/tests/gc/bug-1568119.js
+++ b/js/src/jit-test/tests/gc/bug-1568119.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function allocateSomeStuff() {
return {a: "a fish", b: [1, 2, 3]};
}
diff --git a/js/src/jit-test/tests/gc/bug-1574877.js b/js/src/jit-test/tests/gc/bug-1574877.js
index e53746bcb0..09b5b9e6b0 100644
--- a/js/src/jit-test/tests/gc/bug-1574877.js
+++ b/js/src/jit-test/tests/gc/bug-1574877.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function parseModule(source) {
offThreadCompileModuleToStencil(source);
var stencil = finishOffThreadStencil();
diff --git a/js/src/jit-test/tests/gc/bug-1648901.js b/js/src/jit-test/tests/gc/bug-1648901.js
index 13e4895068..2eddbd9a3c 100644
--- a/js/src/jit-test/tests/gc/bug-1648901.js
+++ b/js/src/jit-test/tests/gc/bug-1648901.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
gczeal(15);
enableShellAllocationMetadataBuilder();
var registry = new FinalizationRegistry(x => 0);
diff --git a/js/src/jit-test/tests/gc/bug-1654186.js b/js/src/jit-test/tests/gc/bug-1654186.js
index 562938d8f8..e6db718bb2 100644
--- a/js/src/jit-test/tests/gc/bug-1654186.js
+++ b/js/src/jit-test/tests/gc/bug-1654186.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom; skip-if: !hasFunction.oomAfterAllocations
gczeal(14, 5);
var g = newGlobal();
diff --git a/js/src/jit-test/tests/gc/bug-1657554.js b/js/src/jit-test/tests/gc/bug-1657554.js
index f751d442b9..f8dd0282b6 100644
--- a/js/src/jit-test/tests/gc/bug-1657554.js
+++ b/js/src/jit-test/tests/gc/bug-1657554.js
@@ -1,2 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(() => eval("new WeakRef({});"));
diff --git a/js/src/jit-test/tests/gc/bug-1660293.js b/js/src/jit-test/tests/gc/bug-1660293.js
index a2c953c11f..c7e3f46c9a 100644
--- a/js/src/jit-test/tests/gc/bug-1660293.js
+++ b/js/src/jit-test/tests/gc/bug-1660293.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
-
try {
function varying(mapColor, keyColor) {
enqueueMark(`set-color-${keyColor}`);
diff --git a/js/src/jit-test/tests/gc/bug-1689039.js b/js/src/jit-test/tests/gc/bug-1689039.js
index 4bfe9bd140..a5d3c71106 100644
--- a/js/src/jit-test/tests/gc/bug-1689039.js
+++ b/js/src/jit-test/tests/gc/bug-1689039.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
-
gczeal(7);
for (let i = 0; i < 9999; ++i) {
undefined + "y";
diff --git a/js/src/jit-test/tests/gc/bug-1692221.js b/js/src/jit-test/tests/gc/bug-1692221.js
index 6300788ad9..11bd4a3aea 100644
--- a/js/src/jit-test/tests/gc/bug-1692221.js
+++ b/js/src/jit-test/tests/gc/bug-1692221.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAtAllocation' in this)
+// |jit-test| allow-oom
// Test TenuredChunk::decommitFreeArenasWithoutUnlocking updates chunk
// metadata correctly. The data is checked by assertions so this test is about
diff --git a/js/src/jit-test/tests/gc/bug-1791975.js b/js/src/jit-test/tests/gc/bug-1791975.js
index a194a92dd0..f56f723db7 100644
--- a/js/src/jit-test/tests/gc/bug-1791975.js
+++ b/js/src/jit-test/tests/gc/bug-1791975.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAtAllocation' in this)
-
gczeal(10, 10);
try {
throw 0;
diff --git a/js/src/jit-test/tests/gc/bug-1802478.js b/js/src/jit-test/tests/gc/bug-1802478.js
index 05559c3f6f..d37188d576 100644
--- a/js/src/jit-test/tests/gc/bug-1802478.js
+++ b/js/src/jit-test/tests/gc/bug-1802478.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
-
enableTrackAllocations();
for (a of "x") {
gczeal(2, 1);
diff --git a/js/src/jit-test/tests/gc/bug-1804629.js b/js/src/jit-test/tests/gc/bug-1804629.js
index 6ed62eec8c..84c473c347 100644
--- a/js/src/jit-test/tests/gc/bug-1804629.js
+++ b/js/src/jit-test/tests/gc/bug-1804629.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('gczeal' in this); error: ReferenceError
+// |jit-test| error: ReferenceError
gczeal(0);
setMarkStackLimit(1);
diff --git a/js/src/jit-test/tests/gc/bug-1865597.js b/js/src/jit-test/tests/gc/bug-1865597.js
index 4bc7ff3a4d..451b806294 100644
--- a/js/src/jit-test/tests/gc/bug-1865597.js
+++ b/js/src/jit-test/tests/gc/bug-1865597.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
gcparam('parallelMarkingEnabled', false);
assertEq(gcparam('parallelMarkingEnabled'), 0);
diff --git a/js/src/jit-test/tests/gc/bug-1870925.js b/js/src/jit-test/tests/gc/bug-1870925.js
new file mode 100644
index 0000000000..48182a04e4
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1870925.js
@@ -0,0 +1,14 @@
+// |jit-test| --no-ggc
+
+let x = [[]];
+for (let i = 0; i < 25; i++) {
+ for (let j = 0; j < 25; j++) {
+ (function () {
+ x[i] | 0;
+ })();
+ }
+}
+
+verifyprebarriers();
+bailAfter(1);
+verifyprebarriers();
diff --git a/js/src/jit-test/tests/gc/bug-1871186.js b/js/src/jit-test/tests/gc/bug-1871186.js
index fc4620fa65..260356ab8b 100644
--- a/js/src/jit-test/tests/gc/bug-1871186.js
+++ b/js/src/jit-test/tests/gc/bug-1871186.js
@@ -1,4 +1,4 @@
-// |jit-test| --blinterp-eager; skip-if: !('oomTest' in this)
+// |jit-test| --blinterp-eager
gc();
function f(x) {
diff --git a/js/src/jit-test/tests/gc/bug-1877406.js b/js/src/jit-test/tests/gc/bug-1877406.js
index bcb26ed062..d412abe82c 100644
--- a/js/src/jit-test/tests/gc/bug-1877406.js
+++ b/js/src/jit-test/tests/gc/bug-1877406.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this); --fuzzing-safe
+// |jit-test| --fuzzing-safe
oomTest(Debugger);
oomTest(Debugger);
diff --git a/js/src/jit-test/tests/gc/bug-1880870.js b/js/src/jit-test/tests/gc/bug-1880870.js
new file mode 100644
index 0000000000..e9bffc9295
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1880870.js
@@ -0,0 +1,6 @@
+var x = [];
+function f() {
+ Object.entries(x);
+ Object.defineProperty(x, "", { enumerable: true, get: f });
+}
+oomTest(f);
diff --git a/js/src/jit-test/tests/gc/bug-1881417.js b/js/src/jit-test/tests/gc/bug-1881417.js
new file mode 100644
index 0000000000..f79d7d5c33
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1881417.js
@@ -0,0 +1,12 @@
+for (let x = 0; x < 2; (function() { x++; })()) {};
+function f() {
+ var y = new (function () {})();
+ (function () {
+ Reflect.apply(y.toString, [], [0]);
+ })();
+}
+f();
+var z = [];
+z.keepFailing = [];
+oomTest(f, z);
+dumpHeap();
diff --git a/js/src/jit-test/tests/gc/bug-1884746.js b/js/src/jit-test/tests/gc/bug-1884746.js
new file mode 100644
index 0000000000..5dab465dac
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1884746.js
@@ -0,0 +1,7 @@
+var x = newGlobal().Int8Array;
+for (let i = 0; i < 2; i++) {
+ function f() {}
+ oomTest(function() {
+ new x().__proto__ = f;
+ });
+}
diff --git a/js/src/jit-test/tests/gc/bug-978802.js b/js/src/jit-test/tests/gc/bug-978802.js
index 1e13b76e0e..42b3553488 100644
--- a/js/src/jit-test/tests/gc/bug-978802.js
+++ b/js/src/jit-test/tests/gc/bug-978802.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
try {
var max = 400;
diff --git a/js/src/jit-test/tests/gc/bug1246607.js b/js/src/jit-test/tests/gc/bug1246607.js
index 1fbe9e5208..222fd20ed3 100644
--- a/js/src/jit-test/tests/gc/bug1246607.js
+++ b/js/src/jit-test/tests/gc/bug1246607.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: typeof oomTest !== 'function' || typeof Intl !== 'object'
+// |jit-test| skip-if: typeof Intl !== 'object'
oomTest(() => {
try {
diff --git a/js/src/jit-test/tests/gc/bug1326343-gcstats.js b/js/src/jit-test/tests/gc/bug1326343-gcstats.js
index f29306af4c..fa0190284d 100644
--- a/js/src/jit-test/tests/gc/bug1326343-gcstats.js
+++ b/js/src/jit-test/tests/gc/bug1326343-gcstats.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
setJitCompilerOption('baseline.warmup.trigger', 4);
oomTest((function () {
gcslice(0);
diff --git a/js/src/jit-test/tests/gc/bug1337324.js b/js/src/jit-test/tests/gc/bug1337324.js
index eaf4c080f0..5ca628fc1d 100644
--- a/js/src/jit-test/tests/gc/bug1337324.js
+++ b/js/src/jit-test/tests/gc/bug1337324.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(function () {
offThreadCompileModuleToStencil('');
var stencil = finishOffThreadStencil();
diff --git a/js/src/jit-test/tests/gc/bug1471949.js b/js/src/jit-test/tests/gc/bug1471949.js
index 5f0f10f4df..30853bf0ff 100644
--- a/js/src/jit-test/tests/gc/bug1471949.js
+++ b/js/src/jit-test/tests/gc/bug1471949.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom
gczeal(15);
oomAfterAllocations(5);
diff --git a/js/src/jit-test/tests/gc/bug1704451.js b/js/src/jit-test/tests/gc/bug1704451.js
index d4b4d14995..426506e9fc 100644
--- a/js/src/jit-test/tests/gc/bug1704451.js
+++ b/js/src/jit-test/tests/gc/bug1704451.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
enableShellAllocationMetadataBuilder();
gczeal(9,1);
var o86 = {x76: 1, y86: 2};
diff --git a/js/src/jit-test/tests/gc/finalizationRegistry-oom1.js b/js/src/jit-test/tests/gc/finalizationRegistry-oom1.js
index 753448a650..83295bb702 100644
--- a/js/src/jit-test/tests/gc/finalizationRegistry-oom1.js
+++ b/js/src/jit-test/tests/gc/finalizationRegistry-oom1.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// Don't test prototype initialization etc.
new FinalizationRegistry(x => 0);
diff --git a/js/src/jit-test/tests/gc/finalizationRegistry-oom2.js b/js/src/jit-test/tests/gc/finalizationRegistry-oom2.js
index 9d9b2a7db8..c63307dfb0 100644
--- a/js/src/jit-test/tests/gc/finalizationRegistry-oom2.js
+++ b/js/src/jit-test/tests/gc/finalizationRegistry-oom2.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
let registry = new FinalizationRegistry(x => 0);
let token = {};
oomTest(() => registry.register({}, 1, token));
diff --git a/js/src/jit-test/tests/gc/finalizationRegistry-oom3.js b/js/src/jit-test/tests/gc/finalizationRegistry-oom3.js
index d606ad8ba8..bcde1cc6c6 100644
--- a/js/src/jit-test/tests/gc/finalizationRegistry-oom3.js
+++ b/js/src/jit-test/tests/gc/finalizationRegistry-oom3.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
let registry = new FinalizationRegistry(x => 0);
registry.register({}, 1, {});
let token = {};
diff --git a/js/src/jit-test/tests/gc/finalizationRegistry-oom4.js b/js/src/jit-test/tests/gc/finalizationRegistry-oom4.js
index 4b7ef66ba0..03defb8558 100644
--- a/js/src/jit-test/tests/gc/finalizationRegistry-oom4.js
+++ b/js/src/jit-test/tests/gc/finalizationRegistry-oom4.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
let registry = new FinalizationRegistry(x => 0);
let target = {};
let token = {};
diff --git a/js/src/jit-test/tests/gc/gcparam.js b/js/src/jit-test/tests/gc/gcparam.js
index f1077e335f..05e0359088 100644
--- a/js/src/jit-test/tests/gc/gcparam.js
+++ b/js/src/jit-test/tests/gc/gcparam.js
@@ -52,12 +52,11 @@ testChangeParam("compactingEnabled");
testChangeParam("parallelMarkingEnabled");
testChangeParam("parallelMarkingThresholdMB");
testChangeParam("minLastDitchGCPeriod");
-testChangeParam("nurseryFreeThresholdForIdleCollection");
-testChangeParam("nurseryFreeThresholdForIdleCollectionPercent");
-testChangeParam("nurseryTimeoutForIdleCollectionMS");
+testChangeParam("nurseryEagerCollectionThresholdKB");
+testChangeParam("nurseryEagerCollectionThresholdPercent");
+testChangeParam("nurseryEagerCollectionTimeoutMS");
testChangeParam("zoneAllocDelayKB");
testChangeParam("mallocThresholdBase");
testChangeParam("urgentThreshold");
-testChangeParam("nurseryTimeoutForIdleCollectionMS");
testChangeParam("helperThreadRatio");
testChangeParam("maxHelperThreads");
diff --git a/js/src/jit-test/tests/gc/incremental-compacting.js b/js/src/jit-test/tests/gc/incremental-compacting.js
index f051cf60ea..c60ec28722 100644
--- a/js/src/jit-test/tests/gc/incremental-compacting.js
+++ b/js/src/jit-test/tests/gc/incremental-compacting.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !("gcstate" in this && "gczeal" in this)
-
// Exercise incremental compacting GC
// Run with MOZ_GCTIMER to see the timings
diff --git a/js/src/jit-test/tests/gc/oomInArrayProtoTest.js b/js/src/jit-test/tests/gc/oomInArrayProtoTest.js
index b99669d92d..3e3365cde5 100644
--- a/js/src/jit-test/tests/gc/oomInArrayProtoTest.js
+++ b/js/src/jit-test/tests/gc/oomInArrayProtoTest.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function arrayProtoOutOfRange() {
function f(obj) {
return typeof obj[15];
diff --git a/js/src/jit-test/tests/gc/oomInByteSize.js b/js/src/jit-test/tests/gc/oomInByteSize.js
index 9566b9cb49..5f9aa0059a 100644
--- a/js/src/jit-test/tests/gc/oomInByteSize.js
+++ b/js/src/jit-test/tests/gc/oomInByteSize.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => byteSize({}));
oomTest(() => byteSize({ w: 1, x: 2, y: 3 }));
oomTest(() => byteSize({ w:1, x:2, y:3, z:4, a:6, 0:0, 1:1, 2:2 }));
diff --git a/js/src/jit-test/tests/gc/oomInDebugger.js b/js/src/jit-test/tests/gc/oomInDebugger.js
index c1904f573a..959f2f7f4f 100644
--- a/js/src/jit-test/tests/gc/oomInDebugger.js
+++ b/js/src/jit-test/tests/gc/oomInDebugger.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var g = newGlobal();
oomTest(() => Debugger(g));
diff --git a/js/src/jit-test/tests/gc/oomInDtoa.js b/js/src/jit-test/tests/gc/oomInDtoa.js
index 83ded51cbb..6f37137e51 100644
--- a/js/src/jit-test/tests/gc/oomInDtoa.js
+++ b/js/src/jit-test/tests/gc/oomInDtoa.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() { return 1e300; })
diff --git a/js/src/jit-test/tests/gc/oomInExceptionHandlerBailout.js b/js/src/jit-test/tests/gc/oomInExceptionHandlerBailout.js
index d5c8f29b27..cdd20cd9b7 100644
--- a/js/src/jit-test/tests/gc/oomInExceptionHandlerBailout.js
+++ b/js/src/jit-test/tests/gc/oomInExceptionHandlerBailout.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(() => {
let x = 0;
try {
diff --git a/js/src/jit-test/tests/gc/oomInFindPath.js b/js/src/jit-test/tests/gc/oomInFindPath.js
index 4b3d95688c..cacf587699 100644
--- a/js/src/jit-test/tests/gc/oomInFindPath.js
+++ b/js/src/jit-test/tests/gc/oomInFindPath.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var o = { w: { x: { y: { z: {} } } } };
oomTest(() => findPath(o, o.w.x.y.z));
diff --git a/js/src/jit-test/tests/gc/oomInFormatStackDump.js b/js/src/jit-test/tests/gc/oomInFormatStackDump.js
index ce68e47f9e..59e4921f93 100644
--- a/js/src/jit-test/tests/gc/oomInFormatStackDump.js
+++ b/js/src/jit-test/tests/gc/oomInFormatStackDump.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => getBacktrace({args: true, locals: true, thisprops: true}));
diff --git a/js/src/jit-test/tests/gc/oomInGetJumpLabelForBranch.js b/js/src/jit-test/tests/gc/oomInGetJumpLabelForBranch.js
index a568fc592f..8415c9ac6c 100644
--- a/js/src/jit-test/tests/gc/oomInGetJumpLabelForBranch.js
+++ b/js/src/jit-test/tests/gc/oomInGetJumpLabelForBranch.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => getBacktrace({thisprops: gc() && delete addDebuggee.enabled}));
diff --git a/js/src/jit-test/tests/gc/oomInNewGlobal.js b/js/src/jit-test/tests/gc/oomInNewGlobal.js
index c45737e143..92e8385063 100644
--- a/js/src/jit-test/tests/gc/oomInNewGlobal.js
+++ b/js/src/jit-test/tests/gc/oomInNewGlobal.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(newGlobal);
diff --git a/js/src/jit-test/tests/gc/oomInOffTheadCompile.js b/js/src/jit-test/tests/gc/oomInOffTheadCompile.js
index d4e0d4135e..65a2fe443c 100644
--- a/js/src/jit-test/tests/gc/oomInOffTheadCompile.js
+++ b/js/src/jit-test/tests/gc/oomInOffTheadCompile.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
oomTest(() => {
offThreadCompileToStencil(
diff --git a/js/src/jit-test/tests/gc/oomInOffTheadCompile2.js b/js/src/jit-test/tests/gc/oomInOffTheadCompile2.js
index 1cac5ee859..de4ad4106d 100644
--- a/js/src/jit-test/tests/gc/oomInOffTheadCompile2.js
+++ b/js/src/jit-test/tests/gc/oomInOffTheadCompile2.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
oomTest(() => {
offThreadCompileToStencil("function a(x) {");
diff --git a/js/src/jit-test/tests/gc/oomInOffTheadCompile3.js b/js/src/jit-test/tests/gc/oomInOffTheadCompile3.js
index 6535676b72..5e0816f575 100644
--- a/js/src/jit-test/tests/gc/oomInOffTheadCompile3.js
+++ b/js/src/jit-test/tests/gc/oomInOffTheadCompile3.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
oomTest(() => {
offThreadCompileToStencil(`
diff --git a/js/src/jit-test/tests/gc/oomInParseAsmJS.js b/js/src/jit-test/tests/gc/oomInParseAsmJS.js
index 72216e1c2c..e2d8441897 100644
--- a/js/src/jit-test/tests/gc/oomInParseAsmJS.js
+++ b/js/src/jit-test/tests/gc/oomInParseAsmJS.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function parseAsmJS() {
eval(`function m(stdlib)
{
diff --git a/js/src/jit-test/tests/gc/oomInParseFunction.js b/js/src/jit-test/tests/gc/oomInParseFunction.js
index b1c1bd6297..c7471a7e60 100644
--- a/js/src/jit-test/tests/gc/oomInParseFunction.js
+++ b/js/src/jit-test/tests/gc/oomInParseFunction.js
@@ -1,3 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => eval("function f() {}"));
diff --git a/js/src/jit-test/tests/gc/oomInRegExp.js b/js/src/jit-test/tests/gc/oomInRegExp.js
index b58f0ac50d..6564fda35b 100644
--- a/js/src/jit-test/tests/gc/oomInRegExp.js
+++ b/js/src/jit-test/tests/gc/oomInRegExp.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => assertEq("foobar\xff5baz\u1200".search(/bar\u0178\d/i), 3));
oomTest(() => assertEq((/(?!(?!(?!6)[\Wc]))/i).test(), false));
oomTest(() => assertEq((/bar\u0178\d/i).exec("foobar\xff5baz\u1200") != null, true));
diff --git a/js/src/jit-test/tests/gc/oomInRegExp2.js b/js/src/jit-test/tests/gc/oomInRegExp2.js
index c35075b375..8783435424 100644
--- a/js/src/jit-test/tests/gc/oomInRegExp2.js
+++ b/js/src/jit-test/tests/gc/oomInRegExp2.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => assertEq("foobar\xff5baz\u1200".search(/bar\u0178\d/i), 3), {keepFailing: true});
oomTest(() => assertEq((/(?!(?!(?!6)[\Wc]))/i).test(), false), {keepFailing: true});
oomTest(() => assertEq((/bar\u0178\d/i).exec("foobar\xff5baz\u1200") != null, true), {keepFailing: true});
diff --git a/js/src/jit-test/tests/gc/oomInWeakMap.js b/js/src/jit-test/tests/gc/oomInWeakMap.js
index 522dc24738..0992907ed7 100644
--- a/js/src/jit-test/tests/gc/oomInWeakMap.js
+++ b/js/src/jit-test/tests/gc/oomInWeakMap.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function () {
eval(`var wm = new WeakMap();
wm.set({}, 'FOO').get(false);`);
diff --git a/js/src/jit-test/tests/generators/bug1501722.js b/js/src/jit-test/tests/generators/bug1501722.js
index 9ff2724a9f..9ff5df1792 100644
--- a/js/src/jit-test/tests/generators/bug1501722.js
+++ b/js/src/jit-test/tests/generators/bug1501722.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
ignoreUnhandledRejections();
(function () {
diff --git a/js/src/jit-test/tests/ion/array-push-multiple-frozen.js b/js/src/jit-test/tests/ion/array-push-multiple-frozen.js
index 271f6cb07a..da2c7a9262 100644
--- a/js/src/jit-test/tests/ion/array-push-multiple-frozen.js
+++ b/js/src/jit-test/tests/ion/array-push-multiple-frozen.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-threads; skip-if: !('oomAtAllocation' in this)
+// |jit-test| --no-threads
// This test case check's Ion ability to recover from an allocation failure in
// the inlining of Array.prototype.push, when given multiple arguments. Note,
diff --git a/js/src/jit-test/tests/ion/array-push-multiple-with-funapply.js b/js/src/jit-test/tests/ion/array-push-multiple-with-funapply.js
index b09fa9c440..0d3f174d9d 100644
--- a/js/src/jit-test/tests/ion/array-push-multiple-with-funapply.js
+++ b/js/src/jit-test/tests/ion/array-push-multiple-with-funapply.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-threads; skip-if: !('oomAtAllocation' in this)
+// |jit-test| --no-threads
// This test case check's Ion ability to inline Array.prototype.push, when
// fun.apply is used and inlined with the set of arguments of the current
diff --git a/js/src/jit-test/tests/ion/array-push-multiple.js b/js/src/jit-test/tests/ion/array-push-multiple.js
index 19c1a93e70..8287d07c94 100644
--- a/js/src/jit-test/tests/ion/array-push-multiple.js
+++ b/js/src/jit-test/tests/ion/array-push-multiple.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-threads; skip-if: !('oomAtAllocation' in this)
+// |jit-test| --no-threads
// This test case check's Ion ability to recover from an allocation failure in
// the inlining of Array.prototype.push, when given multiple arguments. Note,
diff --git a/js/src/jit-test/tests/ion/bailout-oom-01.js b/js/src/jit-test/tests/ion/bailout-oom-01.js
index b56b323774..0408f44e98 100644
--- a/js/src/jit-test/tests/ion/bailout-oom-01.js
+++ b/js/src/jit-test/tests/ion/bailout-oom-01.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-threads; --fast-warmup; skip-if: !('oomTest' in this)
+// |jit-test| --no-threads; --fast-warmup
setJitCompilerOption("ion.warmup.trigger", 20);
gczeal(0);
diff --git a/js/src/jit-test/tests/ion/bug1207413.js b/js/src/jit-test/tests/ion/bug1207413.js
index aedb8ece30..e2d5652b33 100644
--- a/js/src/jit-test/tests/ion/bug1207413.js
+++ b/js/src/jit-test/tests/ion/bug1207413.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
-
function first(a) {
return a[0];
}
diff --git a/js/src/jit-test/tests/ion/bug1216157.js b/js/src/jit-test/tests/ion/bug1216157.js
index 1ec9497e40..01a654502b 100644
--- a/js/src/jit-test/tests/ion/bug1216157.js
+++ b/js/src/jit-test/tests/ion/bug1216157.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this); allow-oom
+// |jit-test| allow-oom
gcslice(0); // Start IGC, but don't mark anything.
function f(str) {
diff --git a/js/src/jit-test/tests/ion/bug1233331.js b/js/src/jit-test/tests/ion/bug1233331.js
index 11b8faafd1..b181dfb856 100644
--- a/js/src/jit-test/tests/ion/bug1233331.js
+++ b/js/src/jit-test/tests/ion/bug1233331.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
x = 0;
try {
a;
diff --git a/js/src/jit-test/tests/ion/bug1240521.js b/js/src/jit-test/tests/ion/bug1240521.js
index 5955705b7a..3160be7a7d 100644
--- a/js/src/jit-test/tests/ion/bug1240521.js
+++ b/js/src/jit-test/tests/ion/bug1240521.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomAfterAllocations' in this)
+// |jit-test| allow-oom
var egc = 138;
function SwitchTest(value) {
diff --git a/js/src/jit-test/tests/ion/bug1269756.js b/js/src/jit-test/tests/ion/bug1269756.js
index 659805f16c..1e201f18a4 100644
--- a/js/src/jit-test/tests/ion/bug1269756.js
+++ b/js/src/jit-test/tests/ion/bug1269756.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(function() {
m = parseModule(`while (x && NaN) prototype; let x`);
moduleLink(m);
diff --git a/js/src/jit-test/tests/ion/bug1284491.js b/js/src/jit-test/tests/ion/bug1284491.js
index eb8f15619b..f061536bb1 100644
--- a/js/src/jit-test/tests/ion/bug1284491.js
+++ b/js/src/jit-test/tests/ion/bug1284491.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
loadFile(`
function SwitchTest(){
switch(value) {
diff --git a/js/src/jit-test/tests/ion/bug1394505.js b/js/src/jit-test/tests/ion/bug1394505.js
index 84979c046c..49c24d7ec6 100644
--- a/js/src/jit-test/tests/ion/bug1394505.js
+++ b/js/src/jit-test/tests/ion/bug1394505.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: helperThreadCount() === 0 || !('oomTest' in this)
+// |jit-test| skip-if: helperThreadCount() === 0
for (let j = 0; j < 50; j++) {
if (j === 1)
diff --git a/js/src/jit-test/tests/ion/bug1479394.js b/js/src/jit-test/tests/ion/bug1479394.js
index ef33e35b20..7123a280d9 100644
--- a/js/src/jit-test/tests/ion/bug1479394.js
+++ b/js/src/jit-test/tests/ion/bug1479394.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
var dbgGlobal = newGlobal({newCompartment: true});
var dbg = new dbgGlobal.Debugger(this);
function f1() {
diff --git a/js/src/jit-test/tests/ion/bug1492574.js b/js/src/jit-test/tests/ion/bug1492574.js
index c1ce0c000a..070d69eaf7 100644
--- a/js/src/jit-test/tests/ion/bug1492574.js
+++ b/js/src/jit-test/tests/ion/bug1492574.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function foo() {}
function foooooooooooooooooooooooooooooooo() {}
function fn(s) {
diff --git a/js/src/jit-test/tests/ion/bug1568397.js b/js/src/jit-test/tests/ion/bug1568397.js
index c03bb0283d..68fd83a226 100644
--- a/js/src/jit-test/tests/ion/bug1568397.js
+++ b/js/src/jit-test/tests/ion/bug1568397.js
@@ -1,4 +1,4 @@
-// |jit-test| error:TypeError: can't access property
+// |jit-test| --setpref=property_error_message_fix=true; error:TypeError: can't access property "x"
let obj = {x: 1};
obj.x = 1.1;
diff --git a/js/src/jit-test/tests/ion/bug1877357.js b/js/src/jit-test/tests/ion/bug1877357.js
new file mode 100644
index 0000000000..b1e0d9c686
--- /dev/null
+++ b/js/src/jit-test/tests/ion/bug1877357.js
@@ -0,0 +1,17 @@
+function f(x) {
+ let y = x | 0;
+ z = 1;
+ Function((x | 0 ? 1 : 9999999999) ? (z ? y : 9999999999) : 1);
+}
+f(1);
+f(1);
+f(1);
+f(1);
+f(1);
+f(1);
+f(1);
+f(1);
+f(9999999999);
+f(1);
+f();
+f(9007199254740993);
diff --git a/js/src/jit-test/tests/ion/bug1877709.js b/js/src/jit-test/tests/ion/bug1877709.js
new file mode 100644
index 0000000000..1dac277a90
--- /dev/null
+++ b/js/src/jit-test/tests/ion/bug1877709.js
@@ -0,0 +1,22 @@
+function testMathyFunction (f, inputs) {
+ var results = [];
+ for (var j = 0; j < inputs.length; ++j)
+ for (var k = 0; k < inputs.length; ++k)
+ results.push(f(inputs[j], inputs[k]));
+}
+mathy1=(function(stdlib,foreign,heap){
+ ff = foreign.ff;
+ Float32ArrayView = new stdlib.Float32Array(heap);
+ Uint32ArrayView = new stdlib.Uint32Array(heap);
+ function f(d0) {
+ var i2=0;
+ var i4;
+ i2=Float32ArrayView[2];
+ i4=i2;
+ ff(2,0) ? f : 6;
+ Uint32ArrayView[!!d0] + [...[eval]]
+ return i4 ? 1 : 0;
+ }
+return f
+})(this,{ ff:(Function('')) },new SharedArrayBuffer(40));
+testMathyFunction(mathy1,[Math.N,Number.M,(2),Number.M])
diff --git a/js/src/jit-test/tests/ion/dce-with-rinstructions.js b/js/src/jit-test/tests/ion/dce-with-rinstructions.js
index 481a1279ec..5fd530fde4 100644
--- a/js/src/jit-test/tests/ion/dce-with-rinstructions.js
+++ b/js/src/jit-test/tests/ion/dce-with-rinstructions.js
@@ -1983,6 +1983,26 @@ function rnantozero_negzero(i) {
return i;
}
+let uceFault_ratomicsislockfree_true = eval(`(${uceFault})`.replace('uceFault', 'uceFault_ratomicsislockfree_true'));
+function ratomicsislockfree_true(i) {
+ var x = [1, 2, 4, 8][i & 3];
+ var y = Atomics.isLockFree(x);
+ if (uceFault_ratomicsislockfree_true(i) || uceFault_ratomicsislockfree_true(i))
+ assertEq(y, true);
+ assertRecoveredOnBailout(y, true);
+ return i;
+}
+
+let uceFault_ratomicsislockfree_false = eval(`(${uceFault})`.replace('uceFault', 'uceFault_ratomicsislockfree_false'));
+function ratomicsislockfree_false(i) {
+ var x = [-1, 0, 3, 1000][i & 3];
+ var y = Atomics.isLockFree(x);
+ if (uceFault_ratomicsislockfree_false(i) || uceFault_ratomicsislockfree_false(i))
+ assertEq(y, false);
+ assertRecoveredOnBailout(y, true);
+ return i;
+}
+
for (j = 100 - max; j < 100; j++) {
with({}){} // Do not Ion-compile this loop.
let i = j < 2 ? (Math.abs(j) % 50) + 2 : j;
@@ -2184,6 +2204,8 @@ for (j = 100 - max; j < 100; j++) {
rnantozero_nan(i);
rnantozero_poszero(i);
rnantozero_negzero(i);
+ ratomicsislockfree_true(i);
+ ratomicsislockfree_false(i);
}
// Test that we can refer multiple time to the same recover instruction, as well
diff --git a/js/src/jit-test/tests/ion/scalar-replacement-oom.js b/js/src/jit-test/tests/ion/scalar-replacement-oom.js
index 32a3a2e5d5..19d0b026e7 100644
--- a/js/src/jit-test/tests/ion/scalar-replacement-oom.js
+++ b/js/src/jit-test/tests/ion/scalar-replacement-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAtAllocation' in this)
-
var lfcode = new Array();
function k(a, f_arg, b, c) {
for (var i = 0; i < 5; ++i) {
diff --git a/js/src/jit-test/tests/modules/bug-1219044.js b/js/src/jit-test/tests/modules/bug-1219044.js
index 3917d7ca9c..bcddf8fb95 100644
--- a/js/src/jit-test/tests/modules/bug-1219044.js
+++ b/js/src/jit-test/tests/modules/bug-1219044.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => parseModule('import v from "mod";'));
fullcompartmentchecks(true);
diff --git a/js/src/jit-test/tests/modules/bug-1402535.js b/js/src/jit-test/tests/modules/bug-1402535.js
index f8cf878260..2ff8e3716b 100644
--- a/js/src/jit-test/tests/modules/bug-1402535.js
+++ b/js/src/jit-test/tests/modules/bug-1402535.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
-
stackTest(function() {
let m = parseModule(``);
moduleLink(m);
diff --git a/js/src/jit-test/tests/modules/bug-1402649.js b/js/src/jit-test/tests/modules/bug-1402649.js
index 559a91a98f..8199cbeb31 100644
--- a/js/src/jit-test/tests/modules/bug-1402649.js
+++ b/js/src/jit-test/tests/modules/bug-1402649.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
loadFile(`
function parseAndEvaluate(source) {
let m = parseModule(source);
diff --git a/js/src/jit-test/tests/modules/bug-1420420-3.js b/js/src/jit-test/tests/modules/bug-1420420-3.js
index 508afa41b1..5ccbf536ca 100644
--- a/js/src/jit-test/tests/modules/bug-1420420-3.js
+++ b/js/src/jit-test/tests/modules/bug-1420420-3.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('stackTest' in this)
-
let a = parseModule(`throw new Error`);
moduleLink(a);
stackTest(function() {
diff --git a/js/src/jit-test/tests/modules/bug-1435327.js b/js/src/jit-test/tests/modules/bug-1435327.js
index bb9f18a220..5edb47bedd 100644
--- a/js/src/jit-test/tests/modules/bug-1435327.js
+++ b/js/src/jit-test/tests/modules/bug-1435327.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
lfLogBuffer = `
let c = registerModule('c', parseModule(""));
let d = registerModule('d', parseModule("import { a } from 'c'; a;"));
diff --git a/js/src/jit-test/tests/modules/bug-1771090.js b/js/src/jit-test/tests/modules/bug-1771090.js
index cb5a5a2bc6..ffc73f93f2 100644
--- a/js/src/jit-test/tests/modules/bug-1771090.js
+++ b/js/src/jit-test/tests/modules/bug-1771090.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
+// |jit-test| skip-if: !hasFunction.oomAfterAllocations
asyncFunc1("geval0\n await ''")
async function asyncFunc1(lfVarx) {
diff --git a/js/src/jit-test/tests/modules/bug-1802479.js b/js/src/jit-test/tests/modules/bug-1802479.js
index c00d0e0da6..fd6ca259da 100644
--- a/js/src/jit-test/tests/modules/bug-1802479.js
+++ b/js/src/jit-test/tests/modules/bug-1802479.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this); slow
-
function test(lfVarx) {
try {
oomTest(function() {
diff --git a/js/src/jit-test/tests/modules/bug1670236.js b/js/src/jit-test/tests/modules/bug1670236.js
index 35192c2b58..9237b4c1b2 100644
--- a/js/src/jit-test/tests/modules/bug1670236.js
+++ b/js/src/jit-test/tests/modules/bug1670236.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
o0=r=/x/;
this.toString=(function() {
evaluate("",({ element:o0 }));
diff --git a/js/src/jit-test/tests/modules/bug1685992.js b/js/src/jit-test/tests/modules/bug1685992.js
index 39fd4ce7fd..d1672e18f9 100644
--- a/js/src/jit-test/tests/modules/bug1685992.js
+++ b/js/src/jit-test/tests/modules/bug1685992.js
@@ -1,4 +1,4 @@
-// |jit-test| --ion-offthread-compile=off; skip-if: !('oomTest' in this)
+// |jit-test| --ion-offthread-compile=off
function oomModule(lfMod) {
oomTest(function () {
@@ -9,4 +9,4 @@ oomModule(`
class B50 {
#priv() {}
}
-`) \ No newline at end of file
+`)
diff --git a/js/src/jit-test/tests/modules/bug1846247.js b/js/src/jit-test/tests/modules/bug1846247.js
index 4d63f0cfb3..d7dcd61132 100644
--- a/js/src/jit-test/tests/modules/bug1846247.js
+++ b/js/src/jit-test/tests/modules/bug1846247.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this); allow-unhandlable-oom
+// |jit-test| allow-unhandlable-oom
ignoreUnhandledRejections();
oomTest(() => {
gc();
diff --git a/js/src/jit-test/tests/modules/dynamic-import-oom.js b/js/src/jit-test/tests/modules/dynamic-import-oom.js
index 9682c7560a..561ccf761e 100644
--- a/js/src/jit-test/tests/modules/dynamic-import-oom.js
+++ b/js/src/jit-test/tests/modules/dynamic-import-oom.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this); --ion-offthread-compile=off
+// |jit-test| --ion-offthread-compile=off
//
// Note: without --ion-offthread-compile=off this test takes a long time and
// may timeout on some platforms. See bug 1507721.
diff --git a/js/src/jit-test/tests/modules/eval-module-oom.js b/js/src/jit-test/tests/modules/eval-module-oom.js
index 5587670735..ad5564e5c8 100644
--- a/js/src/jit-test/tests/modules/eval-module-oom.js
+++ b/js/src/jit-test/tests/modules/eval-module-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// OOM tests for module parsing.
const sa =
diff --git a/js/src/jit-test/tests/modules/import-meta-oom.js b/js/src/jit-test/tests/modules/import-meta-oom.js
index 168f1102a1..a3856e90eb 100644
--- a/js/src/jit-test/tests/modules/import-meta-oom.js
+++ b/js/src/jit-test/tests/modules/import-meta-oom.js
@@ -1,3 +1,3 @@
-// |jit-test| module; skip-if: !('oomTest' in this)
+// |jit-test| module
oomTest(() => import.meta);
diff --git a/js/src/jit-test/tests/modules/offthread-oom.js b/js/src/jit-test/tests/modules/offthread-oom.js
index 97b783c1ba..0af7d745c3 100644
--- a/js/src/jit-test/tests/modules/offthread-oom.js
+++ b/js/src/jit-test/tests/modules/offthread-oom.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
// Test Out-of-Memory handling when parsing modules off-thread
diff --git a/js/src/jit-test/tests/parser/bug-1263355-44.js b/js/src/jit-test/tests/parser/bug-1263355-44.js
index c2de49fd49..a10366ea49 100644
--- a/js/src/jit-test/tests/parser/bug-1263355-44.js
+++ b/js/src/jit-test/tests/parser/bug-1263355-44.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// Adapted from randomly chosen test: js/src/jit-test/tests/profiler/bug1231925.js
"use strict";
enableGeckoProfiling();
diff --git a/js/src/jit-test/tests/parser/bug-1324773-2.js b/js/src/jit-test/tests/parser/bug-1324773-2.js
index bf485ee602..a95012b72a 100644
--- a/js/src/jit-test/tests/parser/bug-1324773-2.js
+++ b/js/src/jit-test/tests/parser/bug-1324773-2.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
var lfGlobal = newGlobal();
lfGlobal.evaluate(`
for (var i = 0; i < 600; i++)
diff --git a/js/src/jit-test/tests/parser/bug-1324773.js b/js/src/jit-test/tests/parser/bug-1324773.js
index 1ab1a3fb9a..3cb86fe944 100644
--- a/js/src/jit-test/tests/parser/bug-1324773.js
+++ b/js/src/jit-test/tests/parser/bug-1324773.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('gczeal' in this)
-
var lfGlobal = newGlobal();
lfGlobal.evaluate(`
for (var i = 0; i < 600; i++)
diff --git a/js/src/jit-test/tests/parser/bug-1433014.js b/js/src/jit-test/tests/parser/bug-1433014.js
index efb8ab98d3..7787862f6c 100644
--- a/js/src/jit-test/tests/parser/bug-1433014.js
+++ b/js/src/jit-test/tests/parser/bug-1433014.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: helperThreadCount() === 0 || !('oomTest' in this)
+// |jit-test| skip-if: helperThreadCount() === 0
evaluate(`
oomTest(() => {
offThreadCompileToStencil("");
diff --git a/js/src/jit-test/tests/parser/bug-1576865-1.js b/js/src/jit-test/tests/parser/bug-1576865-1.js
index f31539b8ea..6becf49905 100644
--- a/js/src/jit-test/tests/parser/bug-1576865-1.js
+++ b/js/src/jit-test/tests/parser/bug-1576865-1.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
var sourceText = `
function Outer() {
var X00, X01, X02, X03, X04, X05, X06, X07;
diff --git a/js/src/jit-test/tests/parser/bug-1576865-2.js b/js/src/jit-test/tests/parser/bug-1576865-2.js
index d053c24728..4e2b02bfe9 100644
--- a/js/src/jit-test/tests/parser/bug-1576865-2.js
+++ b/js/src/jit-test/tests/parser/bug-1576865-2.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
var sourceText = `
function Outer() {
function LazyFunction() {
diff --git a/js/src/jit-test/tests/parser/bug-1662260.js b/js/src/jit-test/tests/parser/bug-1662260.js
index 235737657e..82fc7056a0 100644
--- a/js/src/jit-test/tests/parser/bug-1662260.js
+++ b/js/src/jit-test/tests/parser/bug-1662260.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function loadX(lfVarx) {
oomTest(function() {
let m55 = parseModule(lfVarx);
diff --git a/js/src/jit-test/tests/parser/bug-1764737.js b/js/src/jit-test/tests/parser/bug-1764737.js
index 0fcc39e276..66d57b0dcc 100644
--- a/js/src/jit-test/tests/parser/bug-1764737.js
+++ b/js/src/jit-test/tests/parser/bug-1764737.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this); --fuzzing-safe; --ion-offthread-compile=off
+// |jit-test| --fuzzing-safe; --ion-offthread-compile=off
function r(src) {
oomTest(function() {
diff --git a/js/src/jit-test/tests/parser/bug1461034.js b/js/src/jit-test/tests/parser/bug1461034.js
index 84d6ae88ca..1b7798a703 100644
--- a/js/src/jit-test/tests/parser/bug1461034.js
+++ b/js/src/jit-test/tests/parser/bug1461034.js
@@ -1,2 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(function(){s[-1]});
diff --git a/js/src/jit-test/tests/parser/bug1547655.js b/js/src/jit-test/tests/parser/bug1547655.js
index 540e011d9a..7a84d55b89 100644
--- a/js/src/jit-test/tests/parser/bug1547655.js
+++ b/js/src/jit-test/tests/parser/bug1547655.js
@@ -1,2 +1,2 @@
-// |jit-test| allow-unhandlable-oom; allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-unhandlable-oom; allow-oom
oomTest(() => evaluate(`meta: { with({}) {} }`));
diff --git a/js/src/jit-test/tests/parser/bug1661454.js b/js/src/jit-test/tests/parser/bug1661454.js
index ca7792f4bc..7692431039 100644
--- a/js/src/jit-test/tests/parser/bug1661454.js
+++ b/js/src/jit-test/tests/parser/bug1661454.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function oomTestEval(lfVarx) {
oomTest(() => eval(lfVarx));
}
diff --git a/js/src/jit-test/tests/parser/bug1764715.js b/js/src/jit-test/tests/parser/bug1764715.js
index b203925aa8..6fe1a19451 100644
--- a/js/src/jit-test/tests/parser/bug1764715.js
+++ b/js/src/jit-test/tests/parser/bug1764715.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(function() {
let m = parseModule(`x = a?.b; x = a?.b; x = a?.b;`);
});
diff --git a/js/src/jit-test/tests/parser/bug1835785.js b/js/src/jit-test/tests/parser/bug1835785.js
index 25317757ec..ea5b642f20 100644
--- a/js/src/jit-test/tests/parser/bug1835785.js
+++ b/js/src/jit-test/tests/parser/bug1835785.js
@@ -1,4 +1,4 @@
-// |jit-test| allow-unhandlable-oom; allow-oom; skip-if: !('oomAtAllocation' in this)
+// |jit-test| allow-unhandlable-oom; allow-oom; skip-if: !hasFunction.oomAtAllocation
function main() {
this
oomAtAllocation(7);
diff --git a/js/src/jit-test/tests/parser/compile-script.js b/js/src/jit-test/tests/parser/compile-script.js
index 293d25632e..c752f0b518 100644
--- a/js/src/jit-test/tests/parser/compile-script.js
+++ b/js/src/jit-test/tests/parser/compile-script.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
load(libdir + "asserts.js");
let stencil = compileToStencil('314;');
diff --git a/js/src/jit-test/tests/parser/off_thread_compile_oom.js b/js/src/jit-test/tests/parser/off_thread_compile_oom.js
index 5a8e32eb37..e9fb4c677d 100644
--- a/js/src/jit-test/tests/parser/off_thread_compile_oom.js
+++ b/js/src/jit-test/tests/parser/off_thread_compile_oom.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
// OOM during off-thread initialization shouldn't leak memory.
eval('oomTest(function(){offThreadCompileToStencil("")})');
diff --git a/js/src/jit-test/tests/parser/warning-oom.js b/js/src/jit-test/tests/parser/warning-oom.js
index baf91cb9c9..d784c642d4 100644
--- a/js/src/jit-test/tests/parser/warning-oom.js
+++ b/js/src/jit-test/tests/parser/warning-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// OOM during reporting warning should be handled.
oomTest(function(){
diff --git a/js/src/jit-test/tests/profiler/bug1211962.js b/js/src/jit-test/tests/profiler/bug1211962.js
index d47d823ff2..6803888e36 100644
--- a/js/src/jit-test/tests/profiler/bug1211962.js
+++ b/js/src/jit-test/tests/profiler/bug1211962.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| slow; skip-if: helperThreadCount() === 0
enableGeckoProfiling();
var lfGlobal = newGlobal();
diff --git a/js/src/jit-test/tests/profiler/bug1231925.js b/js/src/jit-test/tests/profiler/bug1231925.js
index 87325b6763..0e0011feb6 100644
--- a/js/src/jit-test/tests/profiler/bug1231925.js
+++ b/js/src/jit-test/tests/profiler/bug1231925.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
enableGeckoProfiling();
oomTest(function() {
eval("(function() {})()")
diff --git a/js/src/jit-test/tests/profiler/bug1242840.js b/js/src/jit-test/tests/profiler/bug1242840.js
index 8770403409..4c2f72024f 100644
--- a/js/src/jit-test/tests/profiler/bug1242840.js
+++ b/js/src/jit-test/tests/profiler/bug1242840.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
enableGeckoProfiling();
oomTest(() => {
try {
diff --git a/js/src/jit-test/tests/profiler/bug1563889.js b/js/src/jit-test/tests/profiler/bug1563889.js
index c8f9776ada..64a4c56423 100644
--- a/js/src/jit-test/tests/profiler/bug1563889.js
+++ b/js/src/jit-test/tests/profiler/bug1563889.js
@@ -1,3 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
for (var i = 0; i < 20; i++) {}
oomTest(enableGeckoProfiling);
diff --git a/js/src/jit-test/tests/promise/unhandled-rejections-oom.js b/js/src/jit-test/tests/promise/unhandled-rejections-oom.js
index 706eba0032..922161f5ed 100644
--- a/js/src/jit-test/tests/promise/unhandled-rejections-oom.js
+++ b/js/src/jit-test/tests/promise/unhandled-rejections-oom.js
@@ -1,3 +1,3 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom
oomTest(async function() {}, { keepFailing: true });
diff --git a/js/src/jit-test/tests/regexp/CheckRegExpSyntax.js b/js/src/jit-test/tests/regexp/CheckRegExpSyntax.js
index 19471fdf50..d261e6ae07 100644
--- a/js/src/jit-test/tests/regexp/CheckRegExpSyntax.js
+++ b/js/src/jit-test/tests/regexp/CheckRegExpSyntax.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
load(libdir + "asserts.js");
assertEq(checkRegExpSyntax("correct[reg]exp"), undefined);
diff --git a/js/src/jit-test/tests/regexp/bug-1845715.js b/js/src/jit-test/tests/regexp/bug-1845715.js
index 992a5a8d8a..d6ca6776a3 100644
--- a/js/src/jit-test/tests/regexp/bug-1845715.js
+++ b/js/src/jit-test/tests/regexp/bug-1845715.js
@@ -1,2 +1 @@
-// |jit-test| skip-if: !('oomTest' in this)
oomTest(() => { gc(); /./d.exec(); });
diff --git a/js/src/jit-test/tests/regexp/bug1640475.js b/js/src/jit-test/tests/regexp/bug1640475.js
index 58c092ec1d..07c04827af 100644
--- a/js/src/jit-test/tests/regexp/bug1640475.js
+++ b/js/src/jit-test/tests/regexp/bug1640475.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var i = 0;
oomTest(function() {
for (var j = 0; j < 10; ++j) {
diff --git a/js/src/jit-test/tests/regexp/bug1640479.js b/js/src/jit-test/tests/regexp/bug1640479.js
index ff166d6451..a04b44a2a3 100644
--- a/js/src/jit-test/tests/regexp/bug1640479.js
+++ b/js/src/jit-test/tests/regexp/bug1640479.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var failures = 0;
var i = 0;
diff --git a/js/src/jit-test/tests/regexp/bug1794317.js b/js/src/jit-test/tests/regexp/bug1794317.js
index 1ecb21eb64..ef50fd0c97 100644
--- a/js/src/jit-test/tests/regexp/bug1794317.js
+++ b/js/src/jit-test/tests/regexp/bug1794317.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
for (let i = 0; i < 2; i++) {
oomTest(function () {
RegExp("(?<name" + i + ">)").exec();
diff --git a/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js b/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js
index e95315f71f..2e8a5cef34 100644
--- a/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js
+++ b/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-baseline; skip-if: !('oomTest' in this)
+// |jit-test| --no-baseline
//
// For background, see the comments for LiveSavedFrameCache in js/src/vm/Stack.h.
//
diff --git a/js/src/jit-test/tests/saved-stacks/oom-in-save-stack-02.js b/js/src/jit-test/tests/saved-stacks/oom-in-save-stack-02.js
index ed24db8f0f..e62a3e953e 100644
--- a/js/src/jit-test/tests/saved-stacks/oom-in-save-stack-02.js
+++ b/js/src/jit-test/tests/saved-stacks/oom-in-save-stack-02.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-ion; --no-baseline; --no-blinterp; skip-if: !('oomAtAllocation' in this)
+// |jit-test| --no-ion; --no-baseline; --no-blinterp
// This shouldn't assert (bug 1516514).
//
// Disabled for ion and baseline because those introduce OOMs at some point that
diff --git a/js/src/jit-test/tests/saved-stacks/oom-in-save-stack.js b/js/src/jit-test/tests/saved-stacks/oom-in-save-stack.js
index c96bbc5c6b..3163646626 100644
--- a/js/src/jit-test/tests/saved-stacks/oom-in-save-stack.js
+++ b/js/src/jit-test/tests/saved-stacks/oom-in-save-stack.js
@@ -1,4 +1,2 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
let s = saveStack();
oomTest(() => { saveStack(); });
diff --git a/js/src/jit-test/tests/self-hosting/oom-delazify.js b/js/src/jit-test/tests/self-hosting/oom-delazify.js
index 2c9bfc71e9..eb8c64fc0f 100644
--- a/js/src/jit-test/tests/self-hosting/oom-delazify.js
+++ b/js/src/jit-test/tests/self-hosting/oom-delazify.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-blinterp; skip-if: !('oomTest' in this)
+// |jit-test| --no-blinterp
// Disable the JITs to make oomTest more reliable
diff --git a/js/src/jit-test/tests/self-hosting/oom-toplevel.js b/js/src/jit-test/tests/self-hosting/oom-toplevel.js
index 60f2be4e1e..9562d87049 100644
--- a/js/src/jit-test/tests/self-hosting/oom-toplevel.js
+++ b/js/src/jit-test/tests/self-hosting/oom-toplevel.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomAtAllocation' in this)
+// |jit-test| skip-if: !hasFunction.oomAtAllocation
function code(n) {
return `
diff --git a/js/src/jit-test/tests/self-test/oom-test-bug1497906.js b/js/src/jit-test/tests/self-test/oom-test-bug1497906.js
index da6a0a959d..2acca432d0 100644
--- a/js/src/jit-test/tests/self-test/oom-test-bug1497906.js
+++ b/js/src/jit-test/tests/self-test/oom-test-bug1497906.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this && 'stackTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: !(hasFunction.oomTest && hasFunction.stackTest) || helperThreadCount() === 0
// Check that oomTest throws an exception on worker threads.
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength-with-non-growable-write.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength-with-non-growable-write.js
new file mode 100644
index 0000000000..32005f4725
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength-with-non-growable-write.js
@@ -0,0 +1,102 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab, sab) {
+ var ta = new Int32Array(gsab);
+ var ta2 = new Int32Array(sab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta2, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+ var sab = new SharedArrayBuffer(12);
+
+ // Start the worker.
+ {
+ let buffers = [gsab, sab];
+
+ // Shared memory locations:
+ //
+ // 0: Number of buffers
+ // 1: Ready-Flag Worker
+ // 2: Ready-Flag Main
+ let sync = new Int32Array(new SharedArrayBuffer(3 * Int32Array.BYTES_PER_ELEMENT));
+ sync[0] = buffers.length;
+
+ setSharedObject(sync.buffer);
+
+ evalInWorker(`
+ let buffers = [];
+ let sync = new Int32Array(getSharedObject());
+ let n = sync[0];
+ for (let i = 0; i < n; ++i) {
+ // Notify we're ready to receive.
+ Atomics.store(sync, 1, 1);
+
+ // Wait until buffer is in mailbox.
+ while (Atomics.compareExchange(sync, 2, 1, 0) !== 1);
+
+ buffers.push(getSharedObject());
+ }
+ (${worker})(...buffers);
+ `);
+
+ for (let buffer of buffers) {
+ // Wait until worker is ready.
+ while (Atomics.compareExchange(sync, 1, 1, 0) !== 1);
+
+ setSharedObject(buffer);
+
+ // Notify buffer is in mailbox.
+ Atomics.store(sync, 2, 1);
+ }
+ }
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return {gsab, sab};
+}
+
+function testGrowableSharedArrayBufferByteLength() {
+ var {gsab, sab} = setup();
+ var ta2 = new Int32Array(sab);
+ var r = 0;
+
+ // |gsab.byteLength| is a seq-cst load, so it must prevent reordering any
+ // other loads, including unordered loads like |ta2[2]|.
+ while (gsab.byteLength <= 12) {
+ // |ta2[2]| is an unordered load, so it's hoistable by default.
+ r += ta2[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "gsab.byteLength acts as a memory barrier, so ta2[2] can't be hoisted"
+ );
+}
+testGrowableSharedArrayBufferByteLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength.js
new file mode 100644
index 0000000000..c2f27d967c
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-bytelength.js
@@ -0,0 +1,67 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab) {
+ var ta = new Int32Array(gsab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+
+ // Pass |gsab| to the mailbox.
+ setSharedObject(gsab);
+
+ // Start the worker.
+ evalInWorker(`
+ (${worker})(getSharedObject());
+ `);
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return gsab;
+}
+
+function testGrowableSharedArrayBufferByteLength() {
+ var gsab = setup();
+ var ta = new Int32Array(gsab);
+ var r = 0;
+
+ // |gsab.byteLength| is a seq-cst load, so it must prevent reordering any
+ // other loads, including unordered loads like |ta[2]|.
+ while (gsab.byteLength <= 12) {
+ // |ta[2]| is an unordered load, so it's hoistable by default.
+ r += ta[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "gsab.byteLength acts as a memory barrier, so ta[2] can't be hoisted"
+ );
+}
+testGrowableSharedArrayBufferByteLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength-with-non-growable-write.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength-with-non-growable-write.js
new file mode 100644
index 0000000000..fc05df3ab1
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength-with-non-growable-write.js
@@ -0,0 +1,103 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab, sab) {
+ var ta = new Int32Array(gsab);
+ var ta2 = new Int32Array(sab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta2, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+ var sab = new SharedArrayBuffer(12);
+
+ // Start the worker.
+ {
+ let buffers = [gsab, sab];
+
+ // Shared memory locations:
+ //
+ // 0: Number of buffers
+ // 1: Ready-Flag Worker
+ // 2: Ready-Flag Main
+ let sync = new Int32Array(new SharedArrayBuffer(3 * Int32Array.BYTES_PER_ELEMENT));
+ sync[0] = buffers.length;
+
+ setSharedObject(sync.buffer);
+
+ evalInWorker(`
+ let buffers = [];
+ let sync = new Int32Array(getSharedObject());
+ let n = sync[0];
+ for (let i = 0; i < n; ++i) {
+ // Notify we're ready to receive.
+ Atomics.store(sync, 1, 1);
+
+ // Wait until buffer is in mailbox.
+ while (Atomics.compareExchange(sync, 2, 1, 0) !== 1);
+
+ buffers.push(getSharedObject());
+ }
+ (${worker})(...buffers);
+ `);
+
+ for (let buffer of buffers) {
+ // Wait until worker is ready.
+ while (Atomics.compareExchange(sync, 1, 1, 0) !== 1);
+
+ setSharedObject(buffer);
+
+ // Notify buffer is in mailbox.
+ Atomics.store(sync, 2, 1);
+ }
+ }
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return {gsab, sab};
+}
+
+function testDataViewByteLength() {
+ var {gsab, sab} = setup();
+ var dv = new DataView(gsab);
+ var ta2 = new Int32Array(sab);
+ var r = 0;
+
+ // |dv.byteLength| is a seq-cst load, so it must prevent reordering any other
+ // loads, including unordered loads like |ta2[2]|.
+ while (dv.byteLength <= 12) {
+ // |ta2[2]| is an unordered load, so it's hoistable by default.
+ r += ta2[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "dv.byteLength acts as a memory barrier, so ta2[2] can't be hoisted"
+ );
+}
+testDataViewByteLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength.js
new file mode 100644
index 0000000000..51ed63f254
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-dataview-bytelength.js
@@ -0,0 +1,68 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab) {
+ var ta = new Int32Array(gsab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+
+ // Pass |gsab| to the mailbox.
+ setSharedObject(gsab);
+
+ // Start the worker.
+ evalInWorker(`
+ (${worker})(getSharedObject());
+ `);
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return gsab;
+}
+
+function testDataViewByteLength() {
+ var gsab = setup();
+ var ta = new Int32Array(gsab);
+ var dv = new DataView(gsab);
+ var r = 0;
+
+ // |dv.byteLength| is a seq-cst load, so it must prevent reordering any other
+ // loads, including unordered loads like |ta[2]|.
+ while (dv.byteLength <= 12) {
+ // |ta[2]| is an unordered load, so it's hoistable by default.
+ r += ta[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "dv.byteLength acts as a memory barrier, so ta[2] can't be hoisted"
+ );
+}
+testDataViewByteLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength-with-non-growable-write.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength-with-non-growable-write.js
new file mode 100644
index 0000000000..fd92ff615a
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength-with-non-growable-write.js
@@ -0,0 +1,103 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab, sab) {
+ var ta = new Int32Array(gsab);
+ var ta2 = new Int32Array(sab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta2, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+ var sab = new SharedArrayBuffer(12);
+
+ // Start the worker.
+ {
+ let buffers = [gsab, sab];
+
+ // Shared memory locations:
+ //
+ // 0: Number of buffers
+ // 1: Ready-Flag Worker
+ // 2: Ready-Flag Main
+ let sync = new Int32Array(new SharedArrayBuffer(3 * Int32Array.BYTES_PER_ELEMENT));
+ sync[0] = buffers.length;
+
+ setSharedObject(sync.buffer);
+
+ evalInWorker(`
+ let buffers = [];
+ let sync = new Int32Array(getSharedObject());
+ let n = sync[0];
+ for (let i = 0; i < n; ++i) {
+ // Notify we're ready to receive.
+ Atomics.store(sync, 1, 1);
+
+ // Wait until buffer is in mailbox.
+ while (Atomics.compareExchange(sync, 2, 1, 0) !== 1);
+
+ buffers.push(getSharedObject());
+ }
+ (${worker})(...buffers);
+ `);
+
+ for (let buffer of buffers) {
+ // Wait until worker is ready.
+ while (Atomics.compareExchange(sync, 1, 1, 0) !== 1);
+
+ setSharedObject(buffer);
+
+ // Notify buffer is in mailbox.
+ Atomics.store(sync, 2, 1);
+ }
+ }
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return {gsab, sab};
+}
+
+function testTypedArrayByteLength() {
+ var {gsab, sab} = setup();
+ var ta = new Int32Array(gsab);
+ var ta2 = new Int32Array(sab);
+ var r = 0;
+
+ // |ta.byteLength| is a seq-cst load, so it must prevent reordering any other
+ // loads, including unordered loads like |ta2[2]|.
+ while (ta.byteLength <= 12) {
+ // |ta2[2]| is an unordered load, so it's hoistable by default.
+ r += ta2[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "ta.byteLength acts as a memory barrier, so ta2[2] can't be hoisted"
+ );
+}
+testTypedArrayByteLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength.js
new file mode 100644
index 0000000000..843f7dce15
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-bytelength.js
@@ -0,0 +1,67 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab) {
+ var ta = new Int32Array(gsab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+
+ // Pass |gsab| to the mailbox.
+ setSharedObject(gsab);
+
+ // Start the worker.
+ evalInWorker(`
+ (${worker})(getSharedObject());
+ `);
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return gsab;
+}
+
+function testTypedArrayByteLength() {
+ var gsab = setup();
+ var ta = new Int32Array(gsab);
+ var r = 0;
+
+ // |ta.byteLength| is a seq-cst load, so it must prevent reordering any other
+ // loads, including unordered loads like |ta[2]|.
+ while (ta.byteLength <= 12) {
+ // |ta[2]| is an unordered load, so it's hoistable by default.
+ r += ta[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "ta.byteLength acts as a memory barrier, so ta[2] can't be hoisted"
+ );
+}
+testTypedArrayByteLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length-with-non-growable-write.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length-with-non-growable-write.js
new file mode 100644
index 0000000000..4371dd2b00
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length-with-non-growable-write.js
@@ -0,0 +1,103 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab, sab) {
+ var ta = new Int32Array(gsab);
+ var ta2 = new Int32Array(sab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta2, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+ var sab = new SharedArrayBuffer(12);
+
+ // Start the worker.
+ {
+ let buffers = [gsab, sab];
+
+ // Shared memory locations:
+ //
+ // 0: Number of buffers
+ // 1: Ready-Flag Worker
+ // 2: Ready-Flag Main
+ let sync = new Int32Array(new SharedArrayBuffer(3 * Int32Array.BYTES_PER_ELEMENT));
+ sync[0] = buffers.length;
+
+ setSharedObject(sync.buffer);
+
+ evalInWorker(`
+ let buffers = [];
+ let sync = new Int32Array(getSharedObject());
+ let n = sync[0];
+ for (let i = 0; i < n; ++i) {
+ // Notify we're ready to receive.
+ Atomics.store(sync, 1, 1);
+
+ // Wait until buffer is in mailbox.
+ while (Atomics.compareExchange(sync, 2, 1, 0) !== 1);
+
+ buffers.push(getSharedObject());
+ }
+ (${worker})(...buffers);
+ `);
+
+ for (let buffer of buffers) {
+ // Wait until worker is ready.
+ while (Atomics.compareExchange(sync, 1, 1, 0) !== 1);
+
+ setSharedObject(buffer);
+
+ // Notify buffer is in mailbox.
+ Atomics.store(sync, 2, 1);
+ }
+ }
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return {gsab, sab};
+}
+
+function testTypedArrayLength() {
+ var {gsab, sab} = setup();
+ var ta = new Int32Array(gsab);
+ var ta2 = new Int32Array(sab);
+ var r = 0;
+
+ // |ta.length| is a seq-cst load, so it must prevent reordering any other
+ // loads, including unordered loads like |ta2[2]|.
+ while (ta.length <= 3) {
+ // |ta2[2]| is an unordered load, so it's hoistable by default.
+ r += ta2[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "ta.length acts as a memory barrier, so ta2[2] can't be hoisted"
+ );
+}
+testTypedArrayLength();
diff --git a/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length.js b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length.js
new file mode 100644
index 0000000000..b32af6ae78
--- /dev/null
+++ b/js/src/jit-test/tests/sharedbuf/growable-sab-memory-barrier-typedarray-length.js
@@ -0,0 +1,67 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer||helperThreadCount()===0
+
+function setup() {
+ // Shared memory locations:
+ //
+ // 0: Lock
+ // 1: Sleep
+ // 2: Data
+ // 3: Unused
+
+ function worker(gsab) {
+ var ta = new Int32Array(gsab);
+
+ // Notify the main thread that the worker is ready.
+ Atomics.store(ta, 0, 1);
+
+ // Sleep to give the main thread time to execute and tier-up the loop.
+ Atomics.wait(ta, 1, 0, 500);
+
+ // Modify the memory read in the loop.
+ Atomics.store(ta, 2, 1);
+
+ // Sleep again to give the main thread time to execute the loop.
+ Atomics.wait(ta, 1, 0, 100);
+
+ // Grow the buffer. This modifies the loop condition.
+ gsab.grow(16);
+ }
+
+ var gsab = new SharedArrayBuffer(12, {maxByteLength: 16});
+
+ // Pass |gsab| to the mailbox.
+ setSharedObject(gsab);
+
+ // Start the worker.
+ evalInWorker(`
+ (${worker})(getSharedObject());
+ `);
+
+ // Wait until worker is ready.
+ var ta = new Int32Array(gsab);
+ while (Atomics.load(ta, 0) === 0);
+
+ return gsab;
+}
+
+function testTypedArrayLength() {
+ var gsab = setup();
+ var ta = new Int32Array(gsab);
+ var r = 0;
+
+ // |ta.length| is a seq-cst load, so it must prevent reordering any other
+ // loads, including unordered loads like |ta[2]|.
+ while (ta.length <= 3) {
+ // |ta[2]| is an unordered load, so it's hoistable by default.
+ r += ta[2];
+ }
+
+ // The memory location is first modified and then the buffer is grown, so we
+ // must observe reads of the modified memory location before exiting the loop.
+ assertEq(
+ r > 0,
+ true,
+ "ta.length acts as a memory barrier, so ta[2] can't be hoisted"
+ );
+}
+testTypedArrayLength();
diff --git a/js/src/jit-test/tests/stream/bug-1513266.js b/js/src/jit-test/tests/stream/bug-1513266.js
index 3db1e2d941..5511ce8000 100644
--- a/js/src/jit-test/tests/stream/bug-1513266.js
+++ b/js/src/jit-test/tests/stream/bug-1513266.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-ion; --no-baseline; skip-if: !('oomTest' in this && this.hasOwnProperty("ReadableStream"))
+// |jit-test| --no-ion; --no-baseline; skip-if: !this.hasOwnProperty("ReadableStream")
ignoreUnhandledRejections();
diff --git a/js/src/jit-test/tests/stream/bug-1515816.js b/js/src/jit-test/tests/stream/bug-1515816.js
index 44329b056d..6199eb9a31 100644
--- a/js/src/jit-test/tests/stream/bug-1515816.js
+++ b/js/src/jit-test/tests/stream/bug-1515816.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-ion; --no-baseline; --no-blinterp; skip-if: !('oomAfterAllocations' in this && this.hasOwnProperty("ReadableStream"))
+// |jit-test| --no-ion; --no-baseline; --no-blinterp; skip-if: !this.hasOwnProperty("ReadableStream")
// Don't crash on OOM in ReadableStreamDefaultReader.prototype.read().
for (let n = 1; n < 1000; n++) {
diff --git a/js/src/jit-test/tests/typedarray/construct-with-growable-sharedarraybuffer.js b/js/src/jit-test/tests/typedarray/construct-with-growable-sharedarraybuffer.js
new file mode 100644
index 0000000000..29696f0860
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/construct-with-growable-sharedarraybuffer.js
@@ -0,0 +1,81 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+// Test TypedArray constructor when called with growable SharedArrayBuffers.
+
+function testSharedArrayBuffer() {
+ function test() {
+ var N = 200;
+ var sab = new SharedArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: (5 + N) * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < N; ++i) {
+ var ta = new Int32Array(sab);
+ assertEq(ta.length, 4 + i);
+
+ // Ensure auto-length tracking works correctly.
+ sab.grow((5 + i) * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 5 + i);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testSharedArrayBuffer();
+
+function testSharedArrayBufferAndByteOffset() {
+ function test() {
+ var N = 200;
+ var sab = new SharedArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: (5 + N) * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < N; ++i) {
+ var ta = new Int32Array(sab, Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 3 + i);
+
+ // Ensure auto-length tracking works correctly.
+ sab.grow((5 + i) * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 4 + i);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testSharedArrayBufferAndByteOffset();
+
+function testSharedArrayBufferAndByteOffsetAndLength() {
+ function test() {
+ var N = 200;
+ var sab = new SharedArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: (5 + N) * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < N; ++i) {
+ var ta = new Int32Array(sab, Int32Array.BYTES_PER_ELEMENT, 2);
+ assertEq(ta.length, 2);
+
+ // Ensure length doesn't change when resizing the buffer.
+ sab.grow((5 + i) * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 2);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testSharedArrayBufferAndByteOffsetAndLength();
+
+function testWrappedSharedArrayBuffer() {
+ var g = newGlobal();
+
+ function test() {
+ var N = 200;
+ var sab = new g.SharedArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: (5 + N) * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < N; ++i) {
+ var ta = new Int32Array(sab);
+ assertEq(ta.length, 4 + i);
+
+ // Ensure auto-length tracking works correctly.
+ sab.grow((5 + i) * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 5 + i);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testWrappedSharedArrayBuffer();
diff --git a/js/src/jit-test/tests/typedarray/construct-with-resizable-arraybuffer.js b/js/src/jit-test/tests/typedarray/construct-with-resizable-arraybuffer.js
new file mode 100644
index 0000000000..33f7a4f6c6
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/construct-with-resizable-arraybuffer.js
@@ -0,0 +1,102 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+// Test TypedArray constructor when called with resizable ArrayBuffers.
+
+function testArrayBuffer() {
+ function test() {
+ var ab = new ArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: 5 * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < 200; ++i) {
+ var ta = new Int32Array(ab);
+ assertEq(ta.length, 4);
+
+ // Ensure auto-length tracking works correctly.
+ ab.resize(5 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 5);
+
+ ab.resize(2 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 2);
+
+ // Reset to original length.
+ ab.resize(4 * Int32Array.BYTES_PER_ELEMENT);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testArrayBuffer();
+
+function testArrayBufferAndByteOffset() {
+ function test() {
+ var ab = new ArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: 5 * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < 200; ++i) {
+ var ta = new Int32Array(ab, Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 3);
+
+ // Ensure auto-length tracking works correctly.
+ ab.resize(5 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 4);
+
+ ab.resize(2 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 1);
+
+ // Reset to original length.
+ ab.resize(4 * Int32Array.BYTES_PER_ELEMENT);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testArrayBufferAndByteOffset();
+
+function testArrayBufferAndByteOffsetAndLength() {
+ function test() {
+ var ab = new ArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: 5 * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < 200; ++i) {
+ var ta = new Int32Array(ab, Int32Array.BYTES_PER_ELEMENT, 2);
+ assertEq(ta.length, 2);
+
+ // Ensure length doesn't change when resizing the buffer.
+ ab.resize(5 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 2);
+
+ // Returns zero when the TypedArray get out-of-bounds.
+ ab.resize(2 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 0);
+
+ // Reset to original length.
+ ab.resize(4 * Int32Array.BYTES_PER_ELEMENT);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testArrayBufferAndByteOffsetAndLength();
+
+function testWrappedArrayBuffer() {
+ var g = newGlobal();
+
+ function test() {
+ var ab = new g.ArrayBuffer(4 * Int32Array.BYTES_PER_ELEMENT, {maxByteLength: 5 * Int32Array.BYTES_PER_ELEMENT});
+ for (var i = 0; i < 200; ++i) {
+ var ta = new Int32Array(ab);
+ assertEq(ta.length, 4);
+
+ // Ensure auto-length tracking works correctly.
+ ab.resize(5 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 5);
+
+ ab.resize(2 * Int32Array.BYTES_PER_ELEMENT);
+ assertEq(ta.length, 2);
+
+ // Reset to original length.
+ ab.resize(4 * Int32Array.BYTES_PER_ELEMENT);
+ }
+ }
+ for (var i = 0; i < 2; ++i) {
+ test();
+ }
+}
+testWrappedArrayBuffer();
diff --git a/js/src/jit-test/tests/typedarray/ensure-non-inline.js b/js/src/jit-test/tests/typedarray/ensure-non-inline.js
index d8859fa627..a1321be88a 100644
--- a/js/src/jit-test/tests/typedarray/ensure-non-inline.js
+++ b/js/src/jit-test/tests/typedarray/ensure-non-inline.js
@@ -1,3 +1,5 @@
+// |jit-test| --enable-arraybuffer-resizable
+
const constructors = [
Int8Array,
Uint8Array,
@@ -73,6 +75,16 @@ function test() {
const big = new ctor(ab);
messWith(big, ab);
}
+
+ // With resizable buffer.
+ for (const ctor of constructors) {
+ let ab = new ArrayBuffer(32, {maxByteLength: 64});
+ const small = new ctor(ab);
+ messWith(small, small);
+ ab = new ArrayBuffer(4000, {maxByteLength: 4096});
+ const big = new ctor(ab);
+ messWith(big, big);
+ }
}
try {
diff --git a/js/src/jit-test/tests/typedarray/growable-sharedarraybuffer-bytelength.js b/js/src/jit-test/tests/typedarray/growable-sharedarraybuffer-bytelength.js
new file mode 100644
index 0000000000..421bf03475
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/growable-sharedarraybuffer-bytelength.js
@@ -0,0 +1,14 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+function testGrowableSharedArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ for (let j = 0; j < 100; ++j) {
+ assertEq(sab.byteLength, i + j);
+
+ sab.grow(i + j + 1);
+ assertEq(sab.byteLength, i + j + 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testGrowableSharedArrayBuffer();
diff --git a/js/src/jit-test/tests/typedarray/indexed-integer-exotics.js b/js/src/jit-test/tests/typedarray/indexed-integer-exotics.js
index 6331efd7c3..510e57c0f0 100644
--- a/js/src/jit-test/tests/typedarray/indexed-integer-exotics.js
+++ b/js/src/jit-test/tests/typedarray/indexed-integer-exotics.js
@@ -88,10 +88,10 @@ assertEq(undefined, f());
// These checks currently fail due to bug 1129202 not being implemented yet.
// We should uncomment them once that bug landed.
-//assertThrows('Object.defineProperty(new Int32Array(100), -1, {value: 1})');
+assertThrows('Object.defineProperty(new Int32Array(100), -1, {value: 1})');
// -0 gets converted to the string "0", so use "-0" instead.
-//assertThrows('Object.defineProperty(new Int32Array(100), "-0", {value: 1})');
-//assertThrows('Object.defineProperty(new Int32Array(100), -10, {value: 1})');
-//assertThrows('Object.defineProperty(new Int32Array(), 4294967295, {value: 1})');
+assertThrows('Object.defineProperty(new Int32Array(100), "-0", {value: 1})');
+assertThrows('Object.defineProperty(new Int32Array(100), -10, {value: 1})');
+assertThrows('Object.defineProperty(new Int32Array(), 4294967295, {value: 1})');
check();
diff --git a/js/src/jit-test/tests/typedarray/oom-allocating-arraybuffer-contents.js b/js/src/jit-test/tests/typedarray/oom-allocating-arraybuffer-contents.js
index 16d1bf976f..1362c8d14c 100644
--- a/js/src/jit-test/tests/typedarray/oom-allocating-arraybuffer-contents.js
+++ b/js/src/jit-test/tests/typedarray/oom-allocating-arraybuffer-contents.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// Resolve ArrayBuffer before OOM-testing, so OOM-testing runs less code and is
// less fragile.
var AB = ArrayBuffer;
diff --git a/js/src/jit-test/tests/typedarray/oom-allocating-copying-same-buffer-contents.js b/js/src/jit-test/tests/typedarray/oom-allocating-copying-same-buffer-contents.js
index ec33e4ef8e..cdbc3d7215 100644
--- a/js/src/jit-test/tests/typedarray/oom-allocating-copying-same-buffer-contents.js
+++ b/js/src/jit-test/tests/typedarray/oom-allocating-copying-same-buffer-contents.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var buffer = new ArrayBuffer(16);
var i8 = new Int8Array(buffer);
var i16 = new Int16Array(buffer);
diff --git a/js/src/jit-test/tests/typedarray/resizable-arraybuffer-bytelength.js b/js/src/jit-test/tests/typedarray/resizable-arraybuffer-bytelength.js
new file mode 100644
index 0000000000..7b50331a15
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-arraybuffer-bytelength.js
@@ -0,0 +1,20 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ab.byteLength, i);
+
+ ab.resize(i + 1);
+ assertEq(ab.byteLength, i + 1);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ab.byteLength, i - 1);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
diff --git a/js/src/jit-test/tests/typedarray/resizable-buffer-inlined-data-moved.js b/js/src/jit-test/tests/typedarray/resizable-buffer-inlined-data-moved.js
new file mode 100644
index 0000000000..1206edd5e5
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-buffer-inlined-data-moved.js
@@ -0,0 +1,53 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+function fillArrayBuffer(rab) {
+ let fill = new Int8Array(rab);
+ for (let i = 0; i < fill.length; ++i) fill[i] = i + 1;
+}
+
+function test() {
+ let rab = new ArrayBuffer(4, {maxByteLength: 4});
+ let ta = new Int8Array(rab, 2, 2);
+
+ fillArrayBuffer(rab);
+
+ assertEq(ta[0], 3);
+ assertEq(ta[1], 4);
+
+ // Shrink to make |ta| out-of-bounds.
+ rab.resize(3);
+
+ // Request GC to move inline data.
+ gc();
+
+ // Grow to make |ta| in-bounds again.
+ rab.resize(4);
+
+ assertEq(ta[0], 3);
+ assertEq(ta[1], 0);
+}
+test();
+
+
+function testAutoLength() {
+ let rab = new ArrayBuffer(4, {maxByteLength: 4});
+ let ta = new Int8Array(rab, 2);
+
+ fillArrayBuffer(rab);
+
+ assertEq(ta[0], 3);
+ assertEq(ta[1], 4);
+
+ // Shrink to make |ta| out-of-bounds.
+ rab.resize(1);
+
+ // Request GC to move inline data.
+ gc();
+
+ // Grow to make |ta| in-bounds again.
+ rab.resize(4);
+
+ assertEq(ta[0], 0);
+ assertEq(ta[1], 0);
+}
+testAutoLength();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength-with-sab.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength-with-sab.js
new file mode 100644
index 0000000000..828e989b92
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength-with-sab.js
@@ -0,0 +1,29 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new Int8Array(sab, 0, i);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteLength, i);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.byteLength, i);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new Int8Array(sab);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteLength, i + j);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.byteLength, i + j + 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength.js
new file mode 100644
index 0000000000..925750d186
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-bytelength.js
@@ -0,0 +1,41 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab, 0, i);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteLength, i);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteLength, i);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.byteLength, 0);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteLength, i);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteLength, i + 1);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.byteLength, i - 1);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset-sab.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset-sab.js
new file mode 100644
index 0000000000..6142d75978
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset-sab.js
@@ -0,0 +1,43 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new Int8Array(sab);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteOffset, 0);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.byteOffset, 0);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
+
+function testResizableArrayBufferAutoLengthNonZeroOffset() {
+ for (let i = 1; i < 4 + 1; ++i) {
+ let sab = new SharedArrayBuffer(i + 1, {maxByteLength: i + 100 + 1});
+ let ta = new Int8Array(sab, 1);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteOffset, 1);
+
+ sab.grow(i + j + 2);
+ assertEq(ta.byteOffset, 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLengthNonZeroOffset();
+
+function testResizableArrayBufferNonZeroOffset() {
+ for (let i = 2; i < 4 + 2; ++i) {
+ let sab = new SharedArrayBuffer(i + 2, {maxByteLength: i + 100 + 2});
+ let ta = new Int8Array(sab, 1, 1);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.byteOffset, 1);
+
+ sab.grow(i + j + 3);
+ assertEq(ta.byteOffset, 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferNonZeroOffset();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset.js
new file mode 100644
index 0000000000..56552eeb7f
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-byteoffset.js
@@ -0,0 +1,57 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteOffset, 0);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteOffset, 0);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.byteOffset, 0);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
+
+function testResizableArrayBufferAutoLengthNonZeroOffset() {
+ for (let i = 1; i < 4 + 1; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab, 1);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i - 1);
+ assertEq(ta.byteOffset, i > 1 ? 1 : 0);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLengthNonZeroOffset();
+
+function testResizableArrayBufferNonZeroOffset() {
+ for (let i = 2; i < 4 + 2; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab, 1, 1);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i + 1);
+ assertEq(ta.byteOffset, 1);
+
+ ab.resize(i - 1);
+ assertEq(ta.byteOffset, i > 2 ? 1 : 0);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferNonZeroOffset();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem-with-sab.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem-with-sab.js
new file mode 100644
index 0000000000..236641d9a8
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem-with-sab.js
@@ -0,0 +1,49 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new SharedArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ for (let i = 0; i < length; ++i) {
+ actual[i] = type(i * i);
+ expected[i] = type(i * i);
+ }
+
+ // In-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+ assertEq(actual[index], expected[index]);
+ }
+
+ // Out-of-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % (length + 4);
+ assertEq(actual[index], expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`)();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem.js
new file mode 100644
index 0000000000..902841e526
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-get-elem.js
@@ -0,0 +1,49 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new ArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ for (let i = 0; i < length; ++i) {
+ actual[i] = type(i * i);
+ expected[i] = type(i * i);
+ }
+
+ // In-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+ assertEq(actual[index], expected[index]);
+ }
+
+ // Out-of-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % (length + 4);
+ assertEq(actual[index], expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`)();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem-with-sab.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem-with-sab.js
new file mode 100644
index 0000000000..40d51ebfe2
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem-with-sab.js
@@ -0,0 +1,36 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new SharedArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+
+ for (let i = 0; i < 200; ++i) {
+ let index = (i % (length + 4));
+ assertEq(index in actual, index in expected);
+ }
+}
+
+for (let TA of TypedArrays) {
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`)();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem.js
new file mode 100644
index 0000000000..07d44bf55b
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-has-elem.js
@@ -0,0 +1,36 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new ArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+
+ for (let i = 0; i < 200; ++i) {
+ let index = (i % (length + 4));
+ assertEq(index in actual, index in expected);
+ }
+}
+
+for (let TA of TypedArrays) {
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`)();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-byteOffset.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-byteOffset.js
new file mode 100644
index 0000000000..1b9f9a28e7
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-byteOffset.js
@@ -0,0 +1,73 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+const TypedArrayByteOffset = getSelfHostedValue("TypedArrayByteOffset");
+
+function testTypedArrayByteOffset() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab),
+ new Int8Array(ab, 1),
+ new Int8Array(ab, 2),
+ new Int8Array(ab, 3),
+ ];
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayByteOffset(ta), i & 3);
+ }
+}
+testTypedArrayByteOffset();
+
+function testTypedArrayByteOffsetOutOfBounds() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab, 0, 10),
+ new Int8Array(ab, 1, 10),
+ new Int8Array(ab, 2, 10),
+ new Int8Array(ab, 3, 10),
+ ];
+
+ // Resize to zero to make all views out-of-bounds.
+ ab.resize(0);
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayByteOffset(ta), i & 3);
+ }
+}
+testTypedArrayByteOffsetOutOfBounds();
+
+function testTypedArrayByteOffsetDetached() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab, 0, 10),
+ new Int8Array(ab, 1, 10),
+ new Int8Array(ab, 2, 10),
+ new Int8Array(ab, 3, 10),
+ ];
+
+ // Detach the buffer.
+ ab.transfer();
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayByteOffset(ta), 0);
+ }
+}
+testTypedArrayByteOffsetDetached();
+
+function testTypedArrayByteOffsetWithShared() {
+ let ab = new SharedArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab),
+ new Int8Array(ab, 1),
+ new Int8Array(ab, 2),
+ new Int8Array(ab, 3),
+ ];
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayByteOffset(ta), i & 3);
+ }
+}
+testTypedArrayByteOffsetWithShared();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLength.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLength.js
new file mode 100644
index 0000000000..c36b4f997f
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLength.js
@@ -0,0 +1,75 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+load(libdir + "asserts.js");
+
+const TypedArrayLength = getSelfHostedValue("TypedArrayLength");
+
+function testTypedArrayLength() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab),
+ new Int8Array(ab, 1),
+ new Int8Array(ab, 2),
+ new Int8Array(ab, 3),
+ ];
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLength(ta), 100 - (i & 3));
+ }
+}
+testTypedArrayLength();
+
+function testTypedArrayLengthOutOfBounds() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab, 0, 10),
+ new Int8Array(ab, 1, 10),
+ new Int8Array(ab, 2, 10),
+ new Int8Array(ab, 3, 10),
+ ];
+
+ // Resize to zero to make all views out-of-bounds.
+ ab.resize(0);
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertThrowsInstanceOf(() => TypedArrayLength(ta), TypeError);
+ }
+}
+testTypedArrayLengthOutOfBounds();
+
+function testTypedArrayLengthDetached() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab, 0, 10),
+ new Int8Array(ab, 1, 10),
+ new Int8Array(ab, 2, 10),
+ new Int8Array(ab, 3, 10),
+ ];
+
+ // Detach the buffer.
+ ab.transfer();
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLength(ta), 0);
+ }
+}
+testTypedArrayLengthDetached();
+
+function testTypedArrayLengthWithShared() {
+ let ab = new SharedArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab),
+ new Int8Array(ab, 1),
+ new Int8Array(ab, 2),
+ new Int8Array(ab, 3),
+ ];
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLength(ta), 100 - (i & 3));
+ }
+}
+testTypedArrayLengthWithShared();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLengthZeroOnOutOfBounds.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLengthZeroOnOutOfBounds.js
new file mode 100644
index 0000000000..a272d7d8c2
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-intrinsic-typedArrayLengthZeroOnOutOfBounds.js
@@ -0,0 +1,75 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+load(libdir + "asserts.js");
+
+const TypedArrayLengthZeroOnOutOfBounds = getSelfHostedValue("TypedArrayLengthZeroOnOutOfBounds");
+
+function testTypedArrayLength() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab),
+ new Int8Array(ab, 1),
+ new Int8Array(ab, 2),
+ new Int8Array(ab, 3),
+ ];
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLengthZeroOnOutOfBounds(ta), 100 - (i & 3));
+ }
+}
+testTypedArrayLength();
+
+function testTypedArrayLengthOutOfBounds() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab, 0, 10),
+ new Int8Array(ab, 1, 10),
+ new Int8Array(ab, 2, 10),
+ new Int8Array(ab, 3, 10),
+ ];
+
+ // Resize to zero to make all views out-of-bounds.
+ ab.resize(0);
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLengthZeroOnOutOfBounds(ta), 0);
+ }
+}
+testTypedArrayLengthOutOfBounds();
+
+function testTypedArrayLengthDetached() {
+ let ab = new ArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab, 0, 10),
+ new Int8Array(ab, 1, 10),
+ new Int8Array(ab, 2, 10),
+ new Int8Array(ab, 3, 10),
+ ];
+
+ // Detach the buffer.
+ ab.transfer();
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLengthZeroOnOutOfBounds(ta), 0);
+ }
+}
+testTypedArrayLengthDetached();
+
+function testTypedArrayLengthWithShared() {
+ let ab = new SharedArrayBuffer(100, {maxByteLength: 100});
+ let typedArrays = [
+ new Int8Array(ab),
+ new Int8Array(ab, 1),
+ new Int8Array(ab, 2),
+ new Int8Array(ab, 3),
+ ];
+
+ for (let i = 0; i < 200; ++i) {
+ let ta = typedArrays[i & 3];
+ assertEq(TypedArrayLengthZeroOnOutOfBounds(ta), 100 - (i & 3));
+ }
+}
+testTypedArrayLengthWithShared();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-length-with-sab.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-length-with-sab.js
new file mode 100644
index 0000000000..9b1a9d41e8
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-length-with-sab.js
@@ -0,0 +1,29 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new Int8Array(sab, 0, i);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.length, i);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.length, i);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let sab = new SharedArrayBuffer(i, {maxByteLength: i + 100});
+ let ta = new Int8Array(sab);
+ for (let j = 0; j < 100; ++j) {
+ assertEq(ta.length, i + j);
+
+ sab.grow(i + j + 1);
+ assertEq(ta.length, i + j + 1);
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-length.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-length.js
new file mode 100644
index 0000000000..96bbc9752e
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-length.js
@@ -0,0 +1,41 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+function testResizableArrayBuffer() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab, 0, i);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.length, i);
+
+ ab.resize(i + 1);
+ assertEq(ta.length, i);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.length, 0);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBuffer();
+
+function testResizableArrayBufferAutoLength() {
+ for (let i = 0; i < 4; ++i) {
+ let ab = new ArrayBuffer(i, {maxByteLength: i + 1});
+ let ta = new Int8Array(ab);
+ for (let j = 0; j < 100; ++j) {
+ ab.resize(i);
+ assertEq(ta.length, i);
+
+ ab.resize(i + 1);
+ assertEq(ta.length, i + 1);
+
+ if (i > 0) {
+ ab.resize(i - 1);
+ assertEq(ta.length, i - 1);
+ }
+ }
+ }
+}
+for (let i = 0; i < 2; ++i) testResizableArrayBufferAutoLength();
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem-with-sab.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem-with-sab.js
new file mode 100644
index 0000000000..67a15e7714
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem-with-sab.js
@@ -0,0 +1,54 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize||!this.SharedArrayBuffer
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new SharedArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ // In-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+
+ let v = type(i);
+ actual[index] = v;
+ expected[index] = v;
+
+ assertEq(actual[index], expected[index]);
+ }
+
+ // Out-of-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % (length + 4);
+
+ let v = type(i);
+ actual[index] = v;
+ expected[index] = v;
+
+ assertEq(actual[index], expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`)();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem.js
new file mode 100644
index 0000000000..91f5c7b048
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-set-elem.js
@@ -0,0 +1,54 @@
+// |jit-test| --enable-arraybuffer-resizable; skip-if: !ArrayBuffer.prototype.resize
+
+const TypedArrays = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array,
+ BigInt64Array,
+ BigUint64Array,
+];
+
+function test(TA) {
+ const length = 4;
+ const byteLength = length * TA.BYTES_PER_ELEMENT;
+
+ let rab = new ArrayBuffer(byteLength, {maxByteLength: byteLength});
+ let actual = new TA(rab);
+ let expected = new TA(length);
+ let type = expected[0].constructor;
+
+ // In-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % length;
+
+ let v = type(i);
+ actual[index] = v;
+ expected[index] = v;
+
+ assertEq(actual[index], expected[index]);
+ }
+
+ // Out-of-bounds access
+ for (let i = 0; i < 200; ++i) {
+ let index = i % (length + 4);
+
+ let v = type(i);
+ actual[index] = v;
+ expected[index] = v;
+
+ assertEq(actual[index], expected[index]);
+ }
+}
+
+for (let TA of TypedArrays) {
+ // Copy test function to ensure monomorphic ICs.
+ let copy = Function(`return ${test}`)();
+
+ copy(TA);
+}
diff --git a/js/src/jit-test/tests/warp/bug1665303.js b/js/src/jit-test/tests/warp/bug1665303.js
index b2c26e01cf..3b0bc75079 100644
--- a/js/src/jit-test/tests/warp/bug1665303.js
+++ b/js/src/jit-test/tests/warp/bug1665303.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this); --fast-warmup
+// |jit-test| --fast-warmup
// Prevent slowness with --ion-eager.
setJitCompilerOption("ion.warmup.trigger", 100);
diff --git a/js/src/jit-test/tests/warp/bug1667685.js b/js/src/jit-test/tests/warp/bug1667685.js
index 2b9e392d24..99699b88d5 100644
--- a/js/src/jit-test/tests/warp/bug1667685.js
+++ b/js/src/jit-test/tests/warp/bug1667685.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this); --fast-warmup
+// |jit-test| --fast-warmup
// Prevent slowness with --ion-eager.
setJitCompilerOption("ion.warmup.trigger", 100);
diff --git a/js/src/jit-test/tests/warp/bug1668197.js b/js/src/jit-test/tests/warp/bug1668197.js
index 2dcd6cb376..24e1becec0 100644
--- a/js/src/jit-test/tests/warp/bug1668197.js
+++ b/js/src/jit-test/tests/warp/bug1668197.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
function f(x, y) {
return ~Math.hypot(x >>> 0, 2 - x >>> 0);
}
diff --git a/js/src/jit-test/tests/warp/bug1871089.js b/js/src/jit-test/tests/warp/bug1871089.js
new file mode 100644
index 0000000000..8abddb6574
--- /dev/null
+++ b/js/src/jit-test/tests/warp/bug1871089.js
@@ -0,0 +1,13 @@
+// |jit-test| --fast-warmup
+var i = 0;
+function a() {
+ if (i++ > 50) {
+ return;
+ }
+ function b() {
+ new a("abcdefghijklm");
+ }
+ [new b];
+}
+gczeal(4);
+a();
diff --git a/js/src/jit-test/tests/warp/trial-inline-gc-4.js b/js/src/jit-test/tests/warp/trial-inline-gc-4.js
new file mode 100644
index 0000000000..2aecd4d371
--- /dev/null
+++ b/js/src/jit-test/tests/warp/trial-inline-gc-4.js
@@ -0,0 +1,42 @@
+// 1) Trial inline f => g1 => h.
+// 2) Make f => g1 call site polymorphic by calling f => g2.
+// This gets rid of the ICScript::inlinedChildren_ edge.
+// 3) Restore f => g1.
+// 4) Trigger a shrinking GC from f => g1 => h (h not trial-inlined; h preserves Baseline code)
+// This purges h's inlined ICScript.
+// 5) Call f => g1 => h (trial inlined). This must not use the discarded ICScript.
+function h(i, x) {
+ if (i === 900) {
+ // Step 4.
+ gc(this, "shrinking");
+ }
+ return x + 1;
+}
+function g2(i, x) {
+ if (i === 820) {
+ // Step 3.
+ callee = g1;
+ }
+ return h(i, x) + x;
+}
+function g1(i, x) {
+ if (i === 800) {
+ // Step 2.
+ callee = g2;
+ }
+ if (i === 900) {
+ // Step 4.
+ h(i, x);
+ }
+ return h(i, x) + x;
+}
+
+var callee = g1;
+
+function f() {
+ for (var i = 0; i < 1000; i++) {
+ callee(i, i);
+ callee(i, "foo");
+ }
+}
+f();
diff --git a/js/src/jit-test/tests/wasm/binary.js b/js/src/jit-test/tests/wasm/binary.js
index 5a59330768..f7d60a56ce 100644
--- a/js/src/jit-test/tests/wasm/binary.js
+++ b/js/src/jit-test/tests/wasm/binary.js
@@ -119,7 +119,7 @@ wasmEval(moduleWithSections([tableSection0()]));
wasmEval(moduleWithSections([memorySection(0)]));
-function invalidMemorySection2() {
+function memorySection2() {
var body = [];
body.push(...varU32(2)); // number of memories
body.push(...varU32(0x0));
@@ -130,7 +130,11 @@ function invalidMemorySection2() {
}
wasmEval(moduleWithSections([memorySection0()]));
-assertErrorMessage(() => wasmEval(moduleWithSections([invalidMemorySection2()])), CompileError, /number of memories must be at most one/);
+if (wasmMultiMemoryEnabled()) {
+ wasmEval(moduleWithSections([memorySection2()]));
+} else {
+ assertErrorMessage(() => wasmEval(moduleWithSections([memorySection2()])), CompileError, /number of memories must be at most one/);
+}
// Test early 'end'
const bodyMismatch = /(function body length mismatch)|(operators remaining after end of function)/;
diff --git a/js/src/jit-test/tests/wasm/bug1858423.js b/js/src/jit-test/tests/wasm/bug1858423.js
index a90d308906..f6296c03d9 100644
--- a/js/src/jit-test/tests/wasm/bug1858423.js
+++ b/js/src/jit-test/tests/wasm/bug1858423.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; skip-if: !wasmCachingEnabled() || !wasmGcEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmCachingEnabled() || !wasmGcEnabled()
const code = wasmTextToBinary(`(module
(type $t (struct (field i32) (field anyref)))
diff --git a/js/src/jit-test/tests/wasm/builtin-modules/integer-gemm/directives.txt b/js/src/jit-test/tests/wasm/builtin-modules/integer-gemm/directives.txt
index 46c9c504f4..7c458ede07 100644
--- a/js/src/jit-test/tests/wasm/builtin-modules/integer-gemm/directives.txt
+++ b/js/src/jit-test/tests/wasm/builtin-modules/integer-gemm/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-moz-intgemm; skip-if: (!getBuildConfiguration("x64") && !getBuildConfiguration("x86") && !getBuildConfiguration("arm64")) || getBuildConfiguration("simulator") || !wasmMozIntGemmEnabled()
+|jit-test| --setpref=wasm_moz_intgemm=true; skip-if: (!getBuildConfiguration("x64") && !getBuildConfiguration("x86") && !getBuildConfiguration("arm64")) || getBuildConfiguration("simulator") || !wasmMozIntGemmEnabled()
diff --git a/js/src/jit-test/tests/wasm/builtin-modules/js-string/basic.js b/js/src/jit-test/tests/wasm/builtin-modules/js-string/basic.js
index dce6204fee..2c0ecb89c5 100644
--- a/js/src/jit-test/tests/wasm/builtin-modules/js-string/basic.js
+++ b/js/src/jit-test/tests/wasm/builtin-modules/js-string/basic.js
@@ -1,110 +1,126 @@
// |jit-test| skip-if: !wasmJSStringBuiltinsEnabled();
-let testModule = wasmTextToBinary(`(module
+let testModule = `(module
+ (type $arrayMutI16 (array (mut i16)))
+
(func
- (import "wasm:js-string" "fromWTF16Array")
- (param anyref i32 i32)
- (result externref)
+ (import "wasm:js-string" "test")
+ (param externref)
+ (result i32)
+ )
+ (export "test" (func 0))
+
+ (func
+ (import "wasm:js-string" "cast")
+ (param externref)
+ (result (ref extern))
+ )
+ (export "cast" (func 1))
+
+ (func
+ (import "wasm:js-string" "fromCharCodeArray")
+ (param (ref null $arrayMutI16) i32 i32)
+ (result (ref extern))
)
- (export "fromWTF16Array" (func 0))
+ (export "fromCharCodeArray" (func 2))
(func
- (import "wasm:js-string" "toWTF16Array")
- (param externref anyref i32)
+ (import "wasm:js-string" "intoCharCodeArray")
+ (param externref (ref null $arrayMutI16) i32)
(result i32)
)
- (export "toWTF16Array" (func 1))
+ (export "intoCharCodeArray" (func 3))
(func
(import "wasm:js-string" "fromCharCode")
(param i32)
(result externref)
)
- (export "fromCharCode" (func 2))
+ (export "fromCharCode" (func 4))
(func
(import "wasm:js-string" "fromCodePoint")
(param i32)
(result externref)
)
- (export "fromCodePoint" (func 3))
+ (export "fromCodePoint" (func 5))
(func
(import "wasm:js-string" "charCodeAt")
(param externref i32)
(result i32)
)
- (export "charCodeAt" (func 4))
+ (export "charCodeAt" (func 6))
(func
(import "wasm:js-string" "codePointAt")
(param externref i32)
(result i32)
)
- (export "codePointAt" (func 5))
+ (export "codePointAt" (func 7))
(func
(import "wasm:js-string" "length")
(param externref)
(result i32)
)
- (export "length" (func 6))
+ (export "length" (func 8))
(func
- (import "wasm:js-string" "concatenate")
+ (import "wasm:js-string" "concat")
(param externref externref)
(result externref)
)
- (export "concatenate" (func 7))
+ (export "concat" (func 9))
(func
(import "wasm:js-string" "substring")
(param externref i32 i32)
(result externref)
)
- (export "substring" (func 8))
+ (export "substring" (func 10))
(func
(import "wasm:js-string" "equals")
(param externref externref)
(result i32)
)
- (export "equals" (func 9))
+ (export "equals" (func 11))
(func
(import "wasm:js-string" "compare")
(param externref externref)
(result i32)
)
- (export "compare" (func 10))
-)`);
+ (export "compare" (func 12))
+)`;
let {
- createArray,
+ createArrayMutI16,
arrayLength,
arraySet,
arrayGet
} = wasmEvalText(`(module
- (type $i16Array (array (mut i16)))
- (func (export "createArray") (param i32) (result anyref)
+ (type $arrayMutI16 (array (mut i16)))
+ (func (export "createArrayMutI16") (param i32) (result anyref)
i32.const 0
local.get 0
- array.new $i16Array
+ array.new $arrayMutI16
)
(func (export "arrayLength") (param arrayref) (result i32)
local.get 0
array.len
)
- (func (export "arraySet") (param (ref $i16Array) i32 i32)
+ (func (export "arraySet") (param (ref $arrayMutI16) i32 i32)
local.get 0
local.get 1
local.get 2
- array.set $i16Array
+ array.set $arrayMutI16
)
- (func (export "arrayGet") (param (ref $i16Array) i32) (result i32)
+ (func (export "arrayGet") (param (ref $arrayMutI16) i32) (result i32)
local.get 0
local.get 1
- array.get_u $i16Array
+ array.get_u $arrayMutI16
)
)`).exports;
@@ -114,9 +130,23 @@ function throwIfNotString(a) {
}
}
let polyFillImports = {
- fromWTF16Array: (array, arrayStart, arrayCount) => {
- arrayStart |= 0;
- arrayCount |= 0;
+ test: (string) => {
+ if (string === null ||
+ typeof string !== "string") {
+ return 0;
+ }
+ return 1;
+ },
+ cast: (string) => {
+ if (string === null ||
+ typeof string !== "string") {
+ throw new WebAssembly.RuntimeError();
+ }
+ return string;
+ },
+ fromCharCodeArray: (array, arrayStart, arrayCount) => {
+ arrayStart >>>= 0;
+ arrayCount >>>= 0;
let length = arrayLength(array);
if (BigInt(arrayStart) + BigInt(arrayCount) > BigInt(length)) {
throw new WebAssembly.RuntimeError();
@@ -127,8 +157,8 @@ let polyFillImports = {
}
return result;
},
- toWTF16Array: (string, arr, arrayStart) => {
- arrayStart |= 0;
+ intoCharCodeArray: (string, arr, arrayStart) => {
+ arrayStart >>>= 0;
throwIfNotString(string);
let arrLength = arrayLength(arr);
let stringLength = string.length;
@@ -141,22 +171,22 @@ let polyFillImports = {
return stringLength;
},
fromCharCode: (charCode) => {
- charCode |= 0;
+ charCode >>>= 0;
return String.fromCharCode(charCode);
},
fromCodePoint: (codePoint) => {
- codePoint |= 0;
+ codePoint >>>= 0;
return String.fromCodePoint(codePoint);
},
charCodeAt: (string, stringIndex) => {
- stringIndex |= 0;
+ stringIndex >>>= 0;
throwIfNotString(string);
if (stringIndex >= string.length)
throw new WebAssembly.RuntimeError();
return string.charCodeAt(stringIndex);
},
codePointAt: (string, stringIndex) => {
- stringIndex |= 0;
+ stringIndex >>>= 0;
throwIfNotString(string);
if (stringIndex >= string.length)
throw new WebAssembly.RuntimeError();
@@ -166,14 +196,14 @@ let polyFillImports = {
throwIfNotString(string);
return string.length;
},
- concatenate: (stringA, stringB) => {
+ concat: (stringA, stringB) => {
throwIfNotString(stringA);
throwIfNotString(stringB);
return stringA + stringB;
},
substring: (string, startIndex, endIndex) => {
- startIndex |= 0;
- endIndex |= 0;
+ startIndex >>>= 0;
+ endIndex >>>= 0;
throwIfNotString(string);
if (startIndex > string.length,
endIndex > string.length,
@@ -217,7 +247,6 @@ function assertSameBehavior(funcA, funcB, ...params) {
if (errA || errB) {
assertEq(errA === null, errB === null, errA ? errA.message : errB.message);
assertEq(Object.getPrototypeOf(errA), Object.getPrototypeOf(errB));
- assertEq(errA.message, errB.message);
}
assertEq(resultA, resultB);
@@ -227,13 +256,30 @@ function assertSameBehavior(funcA, funcB, ...params) {
return resultA;
}
-let builtinExports = new WebAssembly.Instance(new WebAssembly.Module(testModule, {builtins: ["js-string"]}), {}).exports;
-let polyfillExports = new WebAssembly.Instance(new WebAssembly.Module(testModule), { 'wasm:js-string': polyFillImports }).exports;
+let builtinExports = wasmEvalText(testModule, {}, {builtins: ["js-string"]}).exports;
+let polyfillExports = wasmEvalText(testModule, { 'wasm:js-string': polyFillImports }).exports;
let testStrings = ["", "a", "1", "ab", "hello, world", "\n", "☺", "☺smiley", String.fromCodePoint(0x10000, 0x10001)];
let testCharCodes = [1, 2, 3, 10, 0x7f, 0xff, 0xfffe, 0xffff];
let testCodePoints = [1, 2, 3, 10, 0x7f, 0xff, 0xfffe, 0xffff, 0x10000, 0x10001];
+for (let a of WasmExternrefValues) {
+ assertSameBehavior(
+ builtinExports['test'],
+ polyfillExports['test'],
+ a
+ );
+ try {
+ assertSameBehavior(
+ builtinExports['cast'],
+ polyfillExports['cast'],
+ a
+ );
+ } catch (err) {
+ assertEq(err instanceof WebAssembly.RuntimeError, true);
+ }
+}
+
for (let a of testCharCodes) {
assertSameBehavior(
builtinExports['fromCharCode'],
@@ -272,16 +318,16 @@ for (let a of testStrings) {
);
}
- let array = createArray(length);
+ let arrayMutI16 = createArrayMutI16(length);
assertSameBehavior(
- builtinExports['toWTF16Array'],
- polyfillExports['toWTF16Array'],
- a, array, 0
+ builtinExports['intoCharCodeArray'],
+ polyfillExports['intoCharCodeArray'],
+ a, arrayMutI16, 0
);
assertSameBehavior(
- builtinExports['fromWTF16Array'],
- polyfillExports['fromWTF16Array'],
- array, 0, length
+ builtinExports['fromCharCodeArray'],
+ polyfillExports['fromCharCodeArray'],
+ arrayMutI16, 0, length
);
for (let i = 0; i < length; i++) {
@@ -298,8 +344,8 @@ for (let a of testStrings) {
for (let a of testStrings) {
for (let b of testStrings) {
assertSameBehavior(
- builtinExports['concatenate'],
- polyfillExports['concatenate'],
+ builtinExports['concat'],
+ polyfillExports['concat'],
a, b
);
assertSameBehavior(
@@ -314,3 +360,13 @@ for (let a of testStrings) {
);
}
}
+
+// fromCharCodeArray length is an unsigned integer
+{
+ let arrayMutI16 = createArrayMutI16(1);
+ assertErrorMessage(() => assertSameBehavior(
+ builtinExports['fromCharCodeArray'],
+ polyfillExports['fromCharCodeArray'],
+ arrayMutI16, 1, -1
+ ), WebAssembly.RuntimeError, /./);
+}
diff --git a/js/src/jit-test/tests/wasm/builtin-modules/js-string/directives.txt b/js/src/jit-test/tests/wasm/builtin-modules/js-string/directives.txt
index 8c5e6882eb..408d4dd01a 100644
--- a/js/src/jit-test/tests/wasm/builtin-modules/js-string/directives.txt
+++ b/js/src/jit-test/tests/wasm/builtin-modules/js-string/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-gc; --wasm-js-string-builtins; test-also=--wasm-compiler=optimizing; include:wasm.js
+|jit-test| --setpref=wasm_js_string_builtins=true; test-also=--wasm-compiler=optimizing; include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/builtin-modules/oom-test.js b/js/src/jit-test/tests/wasm/builtin-modules/oom-test.js
index 32aa1b2515..aaf8d1e5c3 100644
--- a/js/src/jit-test/tests/wasm/builtin-modules/oom-test.js
+++ b/js/src/jit-test/tests/wasm/builtin-modules/oom-test.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
const module = wasmBuiltinI8VecMul();
WebAssembly.Module.imports(module);
diff --git a/js/src/jit-test/tests/wasm/directiveless/bug1877358.js b/js/src/jit-test/tests/wasm/directiveless/bug1877358.js
index 1f8fad0e43..10cb54398a 100644
--- a/js/src/jit-test/tests/wasm/directiveless/bug1877358.js
+++ b/js/src/jit-test/tests/wasm/directiveless/bug1877358.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-wasm-exceptions; include:wasm.js
+// |jit-test| -P wasm_exceptions=false; include:wasm.js
let {test} = wasmEvalText(`(module
(func $m (import "" "m"))
diff --git a/js/src/jit-test/tests/wasm/directives.txt b/js/src/jit-test/tests/wasm/directives.txt
index 15c7511171..4a2413125a 100644
--- a/js/src/jit-test/tests/wasm/directives.txt
+++ b/js/src/jit-test/tests/wasm/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); test-also=--wasm-test-serialization; test-also=--wasm-compiler=optimizing --no-avx; skip-variant-if: --wasm-compiler=optimizing --no-avx, !getBuildConfiguration("x86") && !getBuildConfiguration("x64") || getBuildConfiguration("simulator"); include:wasm.js
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); test-also=--setpref=wasm_test_serialization=true; test-also=--wasm-compiler=optimizing --no-avx; skip-variant-if: --wasm-compiler=optimizing --no-avx, !getBuildConfiguration("x86") && !getBuildConfiguration("x64") || getBuildConfiguration("simulator"); include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/exceptions/bug-1751699.js b/js/src/jit-test/tests/wasm/exceptions/bug-1751699.js
index 38268e7905..c60a94a8ed 100644
--- a/js/src/jit-test/tests/wasm/exceptions/bug-1751699.js
+++ b/js/src/jit-test/tests/wasm/exceptions/bug-1751699.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
wasmEvalText(`
(import "" "" (func $d))
diff --git a/js/src/jit-test/tests/wasm/exceptions/bug-1788213.js b/js/src/jit-test/tests/wasm/exceptions/bug-1788213.js
index 4d0b4abcdc..7c0710c117 100644
--- a/js/src/jit-test/tests/wasm/exceptions/bug-1788213.js
+++ b/js/src/jit-test/tests/wasm/exceptions/bug-1788213.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
wasmEvalText(`(import "" "" (tag $undef)) (func throw 0) (func (try (do)))`);
});
diff --git a/js/src/jit-test/tests/wasm/exceptions/bug-1791361.js b/js/src/jit-test/tests/wasm/exceptions/bug-1791361.js
index 9c4432de91..0e09a8a746 100644
--- a/js/src/jit-test/tests/wasm/exceptions/bug-1791361.js
+++ b/js/src/jit-test/tests/wasm/exceptions/bug-1791361.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
wasmEvalText(`
(tag $d)
diff --git a/js/src/jit-test/tests/wasm/exceptions/directives.txt b/js/src/jit-test/tests/wasm/exceptions/directives.txt
index 84f10ac2b3..c9c2613039 100644
--- a/js/src/jit-test/tests/wasm/exceptions/directives.txt
+++ b/js/src/jit-test/tests/wasm/exceptions/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-exceptions; test-also=--wasm-exnref; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; include:wasm.js; skip-if: !wasmExceptionsEnabled()
+|jit-test| test-also=--setpref=wasm_exnref=true; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/exceptions/oom-construct-message.js b/js/src/jit-test/tests/wasm/exceptions/oom-construct-message.js
index a7820876bc..8eb515ea9b 100644
--- a/js/src/jit-test/tests/wasm/exceptions/oom-construct-message.js
+++ b/js/src/jit-test/tests/wasm/exceptions/oom-construct-message.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
const tag = new WebAssembly.Tag({ parameters: ["i32"] });
oomTest(() => {
new WebAssembly.Exception(tag, []);
diff --git a/js/src/jit-test/tests/wasm/exceptions/oom-create-exception-data.js b/js/src/jit-test/tests/wasm/exceptions/oom-create-exception-data.js
index 0aa1e75698..51d4558da8 100644
--- a/js/src/jit-test/tests/wasm/exceptions/oom-create-exception-data.js
+++ b/js/src/jit-test/tests/wasm/exceptions/oom-create-exception-data.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
const tag = new WebAssembly.Tag({ parameters: ["i32", "i32", "i32", "i32"] });
const params = [0, 0, 0, 0];
oomTest(() => {
diff --git a/js/src/jit-test/tests/wasm/exceptions/unreachable.js b/js/src/jit-test/tests/wasm/exceptions/unreachable.js
index f0091bc617..2dc2c8b3b1 100644
--- a/js/src/jit-test/tests/wasm/exceptions/unreachable.js
+++ b/js/src/jit-test/tests/wasm/exceptions/unreachable.js
@@ -1,4 +1,4 @@
-// |jit-test| test-also=--wasm-function-references --wasm-gc --wasm-compiler=optimizing; test-also=--wasm-function-references --wasm-gc --wasm-compiler=baseline;
+// |jit-test| test-also=--setpref=wasm_gc=true --wasm-compiler=optimizing; test-also=--setpref=wasm_gc=true --wasm-compiler=baseline;
wasmFailValidateText(`(module
(tag)
diff --git a/js/src/jit-test/tests/wasm/exnref/bug1883865.js b/js/src/jit-test/tests/wasm/exnref/bug1883865.js
new file mode 100644
index 0000000000..d6418d1285
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/exnref/bug1883865.js
@@ -0,0 +1,25 @@
+// Checks proper padding for nested tryNotes.
+
+new WebAssembly.Module(wasmTextToBinary(`(module
+ (func
+ try_table $l3
+ try_table $l4
+ try_table $l5
+ end
+ end
+ end
+ )
+)`));
+
+new WebAssembly.Module(wasmTextToBinary(`(module
+ (func
+ try_table $l3
+ try_table $l4
+ try_table $l5
+ end
+ try_table $l5a
+ end
+ end
+ end
+ )
+)`));
diff --git a/js/src/jit-test/tests/wasm/exnref/directives.txt b/js/src/jit-test/tests/wasm/exnref/directives.txt
index bc17009ea8..b993dbd1d5 100644
--- a/js/src/jit-test/tests/wasm/exnref/directives.txt
+++ b/js/src/jit-test/tests/wasm/exnref/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-exnref; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; include:wasm.js; skip-if: !wasmExnRefEnabled()
+|jit-test| --setpref=wasm_exnref=true; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; include:wasm.js; skip-if: !wasmExnRefEnabled()
diff --git a/js/src/jit-test/tests/wasm/extended-const/basic.js b/js/src/jit-test/tests/wasm/extended-const/basic.js
index bf0ce460d5..6b71385cb6 100644
--- a/js/src/jit-test/tests/wasm/extended-const/basic.js
+++ b/js/src/jit-test/tests/wasm/extended-const/basic.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !wasmExtendedConstEnabled()
-
function testPrivateGlobal(valtype, expr, result) {
// Immutable private globals have a single cell for wasm.
let { get } = wasmEvalText(`(module
diff --git a/js/src/jit-test/tests/wasm/extended-const/directives.txt b/js/src/jit-test/tests/wasm/extended-const/directives.txt
index 0d16de6524..c4b5e420f1 100644
--- a/js/src/jit-test/tests/wasm/extended-const/directives.txt
+++ b/js/src/jit-test/tests/wasm/extended-const/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-extended-const; test-also=--wasm-compiler=optimizing; test-also=--wasm-test-serialization; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm.js
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--setpref=wasm_test_serialization=true; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/extended-const/disabled.js b/js/src/jit-test/tests/wasm/extended-const/disabled.js
deleted file mode 100644
index 01e64f6c44..0000000000
--- a/js/src/jit-test/tests/wasm/extended-const/disabled.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// |jit-test| skip-if: wasmExtendedConstEnabled()
-
-const { CompileError, validate } = WebAssembly;
-
-const DISABLED = /extended constant expressions not enabled|unrecognized opcode/;
-
-let tests = [
- "(module (global i32 i32.const 0 i32.const 0 i32.add))",
- "(module (global i32 i32.const 0 i32.const 0 i32.sub))",
- "(module (global i32 i32.const 0 i32.const 0 i32.mul))",
- "(module (global i64 i64.const 0 i64.const 0 i64.add))",
- "(module (global i64 i64.const 0 i64.const 0 i64.sub))",
- "(module (global i64 i64.const 0 i64.const 0 i64.mul))",
-];
-
-// Test that use of extended constants fails when disabled.
-
-for (let src of tests) {
- let bin = wasmTextToBinary(src);
- assertEq(validate(bin), false);
- wasmCompilationShouldFail(bin, DISABLED);
-}
diff --git a/js/src/jit-test/tests/wasm/extended-const/pathological.js b/js/src/jit-test/tests/wasm/extended-const/pathological.js
index e3695f3625..8a87b25be4 100644
--- a/js/src/jit-test/tests/wasm/extended-const/pathological.js
+++ b/js/src/jit-test/tests/wasm/extended-const/pathological.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !wasmExtendedConstEnabled()
-
// Let's calculate zero in some elaborate ways.
function testFancyZeroOffset(fancyZero, memType = 'i32') {
try {
diff --git a/js/src/jit-test/tests/wasm/features.js b/js/src/jit-test/tests/wasm/features.js
index 904dd03e76..3292334ee6 100644
--- a/js/src/jit-test/tests/wasm/features.js
+++ b/js/src/jit-test/tests/wasm/features.js
@@ -1,5 +1,3 @@
-// |jit-test| test-also=--wasm-extended-const; test-also=--wasm-exceptions;
-
// Test that if a feature is 'experimental' then we must be in a nightly build,
// and if a feature is 'released' then it must be enabled on release and beta.
//
@@ -68,26 +66,16 @@ for (let [name, enabled, test] of releasedFeaturesMaybeDisabledAnyway) {
let releasedFeatures = [
['threads', wasmThreadsEnabled(), `(module (memory 1 1 shared))`],
[
- 'exceptions',
- wasmExceptionsEnabled(),
- `(module (type (func)) (tag (type 0)))`
- ],
- [
- 'extended-const',
- wasmExtendedConstEnabled(),
- `(module
- (global i32
- i32.const 0
- i32.const 0
- i32.add
- )
- )`
- ],
- [
'tail-calls',
wasmTailCallsEnabled(),
`(module (func) (func (return_call 0)))`
],
+ ['gc', wasmGcEnabled(), `(module (type (struct)))`],
+ [
+ 'multi-memory',
+ wasmMultiMemoryEnabled(),
+ `(module (memory 0) (memory 0))`,
+ ],
];
for (let [name, enabled, test] of releasedFeatures) {
diff --git a/js/src/jit-test/tests/wasm/function-references/as-non-null.js b/js/src/jit-test/tests/wasm/function-references/as-non-null.js
index 1280c8ee06..6fd02d61ac 100644
--- a/js/src/jit-test/tests/wasm/function-references/as-non-null.js
+++ b/js/src/jit-test/tests/wasm/function-references/as-non-null.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
let {checkNonNull} = wasmEvalText(`(module
(func (export "checkNonNull") (param externref) (result (ref extern))
diff --git a/js/src/jit-test/tests/wasm/function-references/binary.js b/js/src/jit-test/tests/wasm/function-references/binary.js
index 91cf807dc4..0e64f0723e 100644
--- a/js/src/jit-test/tests/wasm/function-references/binary.js
+++ b/js/src/jit-test/tests/wasm/function-references/binary.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
load(libdir + "wasm-binary.js");
diff --git a/js/src/jit-test/tests/wasm/function-references/br-non-null.js b/js/src/jit-test/tests/wasm/function-references/br-non-null.js
index 371cdaa40a..6d359105fb 100644
--- a/js/src/jit-test/tests/wasm/function-references/br-non-null.js
+++ b/js/src/jit-test/tests/wasm/function-references/br-non-null.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
// br_on_non_null from constant
wasmValidateText(`(module
diff --git a/js/src/jit-test/tests/wasm/function-references/br-null.js b/js/src/jit-test/tests/wasm/function-references/br-null.js
index 26d1013de2..08728a3644 100644
--- a/js/src/jit-test/tests/wasm/function-references/br-null.js
+++ b/js/src/jit-test/tests/wasm/function-references/br-null.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
// br_on_null from constant
wasmValidateText(`(module
diff --git a/js/src/jit-test/tests/wasm/function-references/call_ref.js b/js/src/jit-test/tests/wasm/function-references/call_ref.js
index 514c3145d6..50bdfb6441 100644
--- a/js/src/jit-test/tests/wasm/function-references/call_ref.js
+++ b/js/src/jit-test/tests/wasm/function-references/call_ref.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
let { plusOne } = wasmEvalText(`(module
(; forward declaration so that ref.func works ;)
diff --git a/js/src/jit-test/tests/wasm/function-references/directives.txt b/js/src/jit-test/tests/wasm/function-references/directives.txt
index f7d2135421..293724e57a 100644
--- a/js/src/jit-test/tests/wasm/function-references/directives.txt
+++ b/js/src/jit-test/tests/wasm/function-references/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing --wasm-function-references; test-also=--wasm-compiler=baseline --wasm-function-references; include:wasm.js
+|jit-test| test-also=--setpref=wasm_gc=true; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/function-references/disabled.js b/js/src/jit-test/tests/wasm/function-references/disabled.js
index 70a66a937d..6e67d2ce7f 100644
--- a/js/src/jit-test/tests/wasm/function-references/disabled.js
+++ b/js/src/jit-test/tests/wasm/function-references/disabled.js
@@ -1,8 +1,8 @@
-// |jit-test| skip-if: wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: wasmGcEnabled()
const { CompileError, validate } = WebAssembly;
-const UNRECOGNIZED_OPCODE_OR_BAD_TYPE = /unrecognized opcode|bad type|\(ref T\) types not enabled/;
+const UNRECOGNIZED_OPCODE_OR_BAD_TYPE = /unrecognized opcode|bad type|gc not enabled/;
let simpleTests = [
`(module (func (param (ref 0)) (unreachable)))`,
diff --git a/js/src/jit-test/tests/wasm/function-references/nnl-test.js b/js/src/jit-test/tests/wasm/function-references/nnl-test.js
index 9436b970d5..df79a748a9 100644
--- a/js/src/jit-test/tests/wasm/function-references/nnl-test.js
+++ b/js/src/jit-test/tests/wasm/function-references/nnl-test.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
// Generates combinations of different block types and operations for
// non-defaultable locals (local.set / .tee / .get).
diff --git a/js/src/jit-test/tests/wasm/function-references/non-nullable-table.js b/js/src/jit-test/tests/wasm/function-references/non-nullable-table.js
index 97ab04713c..4efdfcd642 100644
--- a/js/src/jit-test/tests/wasm/function-references/non-nullable-table.js
+++ b/js/src/jit-test/tests/wasm/function-references/non-nullable-table.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
// non-null table initialization
var { get1, get2, get3, get4 } = wasmEvalText(`(module
@@ -40,25 +40,33 @@ for (let i of [
)`, /(type mismatch|table with non-nullable references requires initializer)/);
}
-var t1 = new WebAssembly.Table({initial: 10, element: {ref: 'func', nullable: false }}, sampleWasmFunction);
+let values = "10 funcref (ref.func $dummy)";
+let t1 = new wasmEvalText(`(module (func $dummy) (table (export "t1") ${values}))`).exports.t1;
assertEq(t1.get(2) != null, true);
-assertThrows(() => {
- new WebAssembly.Table({initial: 10, element: {ref: 'func', nullable: false }});
-});
-assertThrows(() => {
- new WebAssembly.Table({initial: 10, element: {ref: 'func', nullable: false }}, null);
-});
-var t2 = new WebAssembly.Table({initial: 6, maximum: 20, element: {ref: 'extern', nullable: false }}, {foo: "bar"});
-assertEq(t2.get(1).foo, "bar");
+wasmFailValidateText(`(module
+ (table $t 10 (ref func))
+)`, /table with non-nullable references requires initializer/);
+
+wasmFailValidateText(`
+(module
+ (func $dummy)
+ (table (export "t") 10 funcref (ref.null none))
+)`, /type mismatch/);
+
+const foo = "bar";
+const { t2, get } = wasmEvalText(`
+(module
+ (global (import "" "foo") externref)
+ (table (export "t2") 6 20 externref (global.get 0))
+)`, { "": { "foo": foo } }).exports;
+
+assertEq(t2.get(5), "bar");
assertThrows(() => { t2.get(7) });
-assertThrows(() => { t2.grow(9, null) });
+assertThrows(() => { t2.grow(30, null) });
t2.grow(8, {t: "test"});
-assertEq(t2.get(3).foo, "bar");
+assertEq(t2.get(3), "bar");
assertEq(t2.get(7).t, "test");
-assertThrows(() => {
- new WebAssembly.Table({initial: 10, element: {ref: 'extern', nullable: false }}, null);
-});
// Fail because tables come before globals in the binary format, so tables
// cannot refer to globals.
diff --git a/js/src/jit-test/tests/wasm/function-references/non-nullable.js b/js/src/jit-test/tests/wasm/function-references/non-nullable.js
index afe1d3cb43..653bbe7ab6 100644
--- a/js/src/jit-test/tests/wasm/function-references/non-nullable.js
+++ b/js/src/jit-test/tests/wasm/function-references/non-nullable.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
// non-null values are subtype of null values
wasmValidateText(`(module
diff --git a/js/src/jit-test/tests/wasm/function-references/reftype-parse.js b/js/src/jit-test/tests/wasm/function-references/reftype-parse.js
index 643f753ec8..f4cca7cb74 100644
--- a/js/src/jit-test/tests/wasm/function-references/reftype-parse.js
+++ b/js/src/jit-test/tests/wasm/function-references/reftype-parse.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmFunctionReferencesEnabled()
+// |jit-test| skip-if: !wasmGcEnabled()
// RefType/ValueType as a simple string
const t01 = new WebAssembly.Table({element: 'funcref', initial: 3});
@@ -16,36 +16,6 @@ assertErrorMessage(
assertErrorMessage(
() => new WebAssembly.Table({element: true, initial: 1}),
TypeError, /bad value type/);
-
-// RefType/ValueType can be specified as an {ref: 'func', ...} object
-const t11 = new WebAssembly.Table({element: {ref: 'func', nullable: true}, initial: 3});
-const t12 = new WebAssembly.Table({element: {ref: 'extern', nullable: true}, initial: 3});
-const t13 = new WebAssembly.Table({element: {ref: 'extern', nullable: false}, initial: 3}, {});
-
-assertErrorMessage(
- () => new WebAssembly.Table({element: {ref: 'func', nullable: false}, initial: 1}, null),
- TypeError, /cannot pass null to non-nullable WebAssembly reference/);
-assertErrorMessage(
- () => new WebAssembly.Table({element: {ref: 'extern', nullable: false}, initial: 1}, null),
- TypeError, /cannot pass null to non-nullable WebAssembly reference/);
-
-assertErrorMessage(
- () => new WebAssembly.Table({element: {ref: 'bar', nullable: true}, initial: 1}),
- TypeError, /bad value type/);
-
-const g11 = new WebAssembly.Global({value: {ref: 'func', nullable: true}, mutable: true});
-const g12 = new WebAssembly.Global({value: {ref: 'extern', nullable: true}, mutable: true});
-const g13 = new WebAssembly.Global({value: {ref: 'extern', nullable: false}, mutable: true}, {});
-const g14 = new WebAssembly.Global({value: {ref: 'extern', nullable: false}, mutable: true});
-const g15 = new WebAssembly.Global({value: {ref: 'extern', nullable: false}, mutable: true}, void 0);
-
-assertErrorMessage(
- () => new WebAssembly.Global({value: {ref: 'func', nullable: false}, mutable: true}),
- TypeError, /cannot pass null to non-nullable WebAssembly reference/);
-assertErrorMessage(
- () => new WebAssembly.Global({value: {ref: 'extern', nullable: false}, mutable: true}, null),
- TypeError, /cannot pass null to non-nullable WebAssembly reference/);
-
assertErrorMessage(
() => new WebAssembly.Global({value: {ref: 'bar', nullable: true}, mutable: true}),
TypeError, /bad value type/);
diff --git a/js/src/jit-test/tests/wasm/gc/arrays.js b/js/src/jit-test/tests/wasm/gc/arrays.js
index b3f03151bb..cb61bb1b07 100644
--- a/js/src/jit-test/tests/wasm/gc/arrays.js
+++ b/js/src/jit-test/tests/wasm/gc/arrays.js
@@ -604,6 +604,51 @@ assertErrorMessage(() => wasmEvalText(`(module
},WebAssembly.RuntimeError, /index out of bounds/);
}
+// run: zero-length copies are allowed
+{
+ let { newData } = wasmEvalText(`(module
+ (type $a (array i8))
+ (data $d "1337")
+ (func (export "newData") (result eqref)
+ (; offset=0 into data ;) i32.const 0
+ (; size=0 into data ;) i32.const 0
+ array.new_data $a $d
+ )
+ )`).exports;
+ let arr = newData();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
+// run: a zero-length copy from the end is allowed
+{
+ let { newData } = wasmEvalText(`(module
+ (type $a (array i8))
+ (data $d "1337")
+ (func (export "newData") (result eqref)
+ (; offset=4 into data ;) i32.const 4
+ (; size=0 into data ;) i32.const 0
+ array.new_data $a $d
+ )
+ )`).exports;
+ let arr = newData();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
+// run: even empty data segments are allowed
+{
+ let { newData } = wasmEvalText(`(module
+ (type $a (array i8))
+ (data $d "")
+ (func (export "newData") (result eqref)
+ (; offset=0 into data ;) i32.const 0
+ (; size=0 into data ;) i32.const 0
+ array.new_data $a $d
+ )
+ )`).exports;
+ let arr = newData();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
// run: resulting array is as expected
{
let { newData } = wasmEvalText(`(module
@@ -802,6 +847,59 @@ assertErrorMessage(() => wasmEvalText(`(module
},WebAssembly.RuntimeError, /index out of bounds/);
}
+// run: zero-length copies are allowed
+{
+ let { newElem, f1, f2, f3, f4 } = wasmEvalText(`(module
+ (type $a (array funcref))
+ (elem $e func $f1 $f2 $f3 $f4)
+ (func $f1 (export "f1"))
+ (func $f2 (export "f2"))
+ (func $f3 (export "f3"))
+ (func $f4 (export "f4"))
+ (func (export "newElem") (result eqref)
+ (; offset=0 into elem ;) i32.const 0
+ (; size=0 into elem ;) i32.const 0
+ array.new_elem $a $e
+ )
+ )`).exports;
+ let arr = newElem();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
+// run: a zero-length copy from the end is allowed
+{
+ let { newElem, f1, f2, f3, f4 } = wasmEvalText(`(module
+ (type $a (array funcref))
+ (elem $e func $f1 $f2 $f3 $f4)
+ (func $f1 (export "f1"))
+ (func $f2 (export "f2"))
+ (func $f3 (export "f3"))
+ (func $f4 (export "f4"))
+ (func (export "newElem") (result eqref)
+ (; offset=4 into elem ;) i32.const 4
+ (; size=0 into elem ;) i32.const 0
+ array.new_elem $a $e
+ )
+ )`).exports;
+ let arr = newElem();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
+// run: even empty elem segments are allowed
+{
+ let { newElem, f1, f2, f3, f4 } = wasmEvalText(`(module
+ (type $a (array funcref))
+ (elem $e func)
+ (func (export "newElem") (result eqref)
+ (; offset=0 into elem ;) i32.const 0
+ (; size=0 into elem ;) i32.const 0
+ array.new_elem $a $e
+ )
+ )`).exports;
+ let arr = newElem();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
// run: resulting array is as expected
{
let { newElem, f1, f2, f3, f4 } = wasmEvalText(`(module
@@ -1130,6 +1228,29 @@ assertErrorMessage(() => wasmEvalText(`(module
},WebAssembly.RuntimeError, /index out of bounds/);
}
+// run: zeroes everywhere
+{
+ let { initData } = wasmEvalText(`(module
+ (type $a (array (mut i8)))
+ (data $d "")
+ (func (export "initData") (result eqref)
+ (local $arr (ref $a))
+ (local.set $arr (array.new_default $a (i32.const 0)))
+
+ (; array to init ;) local.get $arr
+ (; offset=0 into array ;) i32.const 0
+ (; offset=0 into data ;) i32.const 0
+ (; size=0 elements ;) i32.const 0
+ array.init_data $a $d
+
+ local.get $arr
+ )
+ (func data.drop 0) ;; force write of data count section, see https://github.com/bytecodealliance/wasm-tools/pull/1194
+ )`).exports;
+ let arr = initData();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
// run: resulting array is as expected
{
let { initData } = wasmEvalText(`(module
@@ -1488,6 +1609,28 @@ assertErrorMessage(() => wasmEvalText(`(module
},WebAssembly.RuntimeError, /index out of bounds/);
}
+// run: zeroes everywhere
+{
+ let { initElem, f1, f2, f3, f4 } = wasmEvalText(`(module
+ (type $a (array (mut funcref)))
+ (elem $e func)
+ (func (export "initElem") (result eqref)
+ (local $arr (ref $a))
+ (local.set $arr (array.new_default $a (i32.const 0)))
+
+ (; array to init ;) local.get $arr
+ (; offset=0 into array ;) i32.const 0
+ (; offset=0 into elem ;) i32.const 0
+ (; size=0 into elem ;) i32.const 0
+ array.init_elem $a $e
+
+ local.get $arr
+ )
+ )`).exports;
+ let arr = initElem();
+ assertEq(wasmGcArrayLength(arr), 0);
+}
+
// run: resulting array is as expected
{
let { initElem, f1, f2, f3, f4 } = wasmEvalText(`(module
diff --git a/js/src/jit-test/tests/wasm/gc/binary.js b/js/src/jit-test/tests/wasm/gc/binary.js
index 1ef4a586a8..e16af2bfbf 100644
--- a/js/src/jit-test/tests/wasm/gc/binary.js
+++ b/js/src/jit-test/tests/wasm/gc/binary.js
@@ -2,14 +2,16 @@
load(libdir + "wasm-binary.js");
-const v2vSig = {args:[], ret:VoidCode};
-const v2vSigSection = sigSection([v2vSig]);
-
function checkInvalid(body, errorMessage) {
assertErrorMessage(() => new WebAssembly.Module(
- moduleWithSections([v2vSigSection, declSection([0]), bodySection([body])])),
- WebAssembly.CompileError,
- errorMessage);
+ moduleWithSections([
+ typeSection([
+ { kind: FuncCode, args: [], ret: [] },
+ ]),
+ declSection([0]),
+ bodySection([body]),
+ ])
+ ), WebAssembly.CompileError, errorMessage);
}
const invalidRefBlockType = funcBody({locals:[], body:[
@@ -23,7 +25,7 @@ checkInvalid(invalidRefBlockType, /heap type/);
const invalidTooBigRefType = funcBody({locals:[], body:[
BlockCode,
OptRefCode,
- varU32(1000000),
+ ...varU32(1000000),
EndCode,
]});
checkInvalid(invalidTooBigRefType, /heap type/);
diff --git a/js/src/jit-test/tests/wasm/gc/bug-1843295.js b/js/src/jit-test/tests/wasm/gc/bug-1843295.js
index 19a32263f6..765a9000a1 100644
--- a/js/src/jit-test/tests/wasm/gc/bug-1843295.js
+++ b/js/src/jit-test/tests/wasm/gc/bug-1843295.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmGcEnabled(); --wasm-test-serialization
+// |jit-test| skip-if: !wasmGcEnabled(); --setpref=wasm_test_serialization=true
wasmEvalText(`(module
(type (sub (array (mut i32))))
diff --git a/js/src/jit-test/tests/wasm/gc/bug-1845436.js b/js/src/jit-test/tests/wasm/gc/bug-1845436.js
index a79c22d9a1..6ea8070f4d 100644
--- a/js/src/jit-test/tests/wasm/gc/bug-1845436.js
+++ b/js/src/jit-test/tests/wasm/gc/bug-1845436.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmGcEnabled(); --wasm-test-serialization
+// |jit-test| skip-if: !wasmGcEnabled(); --setpref=wasm_test_serialization=true
// Test that serialization doesn't create a forward reference to the third
// struct when serializing the reference to the first struct, which is
diff --git a/js/src/jit-test/tests/wasm/gc/bug-1854007.js b/js/src/jit-test/tests/wasm/gc/bug-1854007.js
index c9d6b25369..d3e5afa9cd 100644
--- a/js/src/jit-test/tests/wasm/gc/bug-1854007.js
+++ b/js/src/jit-test/tests/wasm/gc/bug-1854007.js
@@ -1,4 +1,4 @@
-// |jit-test| test-also=--wasm-test-serialization; skip-if: !wasmGcEnabled()
+// |jit-test| test-also=--setpref=wasm_test_serialization=true; skip-if: !wasmGcEnabled()
let {run} = wasmEvalText(`(module
(rec (type $$t1 (func (result (ref null $$t1)))))
diff --git a/js/src/jit-test/tests/wasm/gc/bug-1879096.js b/js/src/jit-test/tests/wasm/gc/bug-1879096.js
new file mode 100644
index 0000000000..e71d2bac27
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/bug-1879096.js
@@ -0,0 +1,65 @@
+// |jit-test| test-also=--setpref=wasm_test_serialization; skip-if: !wasmGcEnabled()
+
+// Conditional branch instructions need to rewrite their stack types according
+// to the destination label types. This loses information but is mandated by
+// the spec.
+
+// br_if
+wasmFailValidateText(`(module
+ (func (result anyref)
+ ref.null array ;; stack: [arrayref]
+ ref.null struct ;; stack: [arrayref structref]
+ i32.const 0 ;; stack: [arrayref structref i32]
+ br_if 0 ;; stack: [arrayref anyref]
+ ref.eq ;; should fail (anyref is not eq)
+ unreachable
+ )
+)`, /type mismatch: expression has type anyref but expected eqref/);
+
+// br_on_null
+wasmFailValidateText(`(module
+ (func (param externref) (result anyref)
+ ref.null array ;; stack: [arrayref]
+ local.get 0 ;; stack: [arrayref externref]
+ br_on_null 0 ;; stack: [anyref (ref extern)]
+ drop ;; stack: [anyref]
+ array.len ;; should fail
+ unreachable
+ )
+)`, /type mismatch: expression has type anyref but expected arrayref/);
+
+// br_on_non_null
+wasmFailValidateText(`(module
+ (func (param externref) (result anyref (ref extern))
+ ref.null array ;; stack: [arrayref]
+ ref.null struct ;; stack: [arrayref structref]
+ local.get 0 ;; stack: [arrayref structref externref]
+ br_on_non_null 0 ;; stack: [arrayref anyref]
+ ref.eq ;; should fail (anyref is not eq)
+ unreachable
+ )
+)`, /type mismatch: expression has type anyref but expected eqref/);
+
+// br_on_cast
+wasmFailValidateText(`(module
+ (type $s (struct))
+ (func (result anyref (ref $s))
+ ref.null array ;; stack: [arrayref]
+ ref.null struct ;; stack: [arrayref structref]
+ br_on_cast 0 structref (ref $s) ;; stack: [anyref structref]
+ ref.eq ;; should fail (anyref is not eq)
+ unreachable
+ )
+)`, /type mismatch: expression has type anyref but expected eqref/);
+
+// br_on_cast_fail
+wasmFailValidateText(`(module
+ (type $s (struct))
+ (func (result anyref anyref)
+ ref.null array ;; stack: [arrayref]
+ ref.null struct ;; stack: [arrayref structref]
+ br_on_cast_fail 0 structref (ref $s) ;; stack: [anyref (ref $s)]
+ ref.eq ;; should fail (anyref is not eq)
+ unreachable
+ )
+)`, /type mismatch: expression has type anyref but expected eqref/);
diff --git a/js/src/jit-test/tests/wasm/gc/call-indirect-subtyping.js b/js/src/jit-test/tests/wasm/gc/call-indirect-subtyping.js
index 4301621a8c..d83f2ed624 100644
--- a/js/src/jit-test/tests/wasm/gc/call-indirect-subtyping.js
+++ b/js/src/jit-test/tests/wasm/gc/call-indirect-subtyping.js
@@ -1,4 +1,4 @@
-// |jit-test| test-also=--wasm-tail-calls; skip-if: !wasmGcEnabled()
+// |jit-test| test-also=--setpref=wasm_tail_calls=true; skip-if: !wasmGcEnabled()
// Test that call_indirect will respect subtyping by defining a bunch of types
// and checking every combination of (expected, actual) type.
diff --git a/js/src/jit-test/tests/wasm/gc/directives.txt b/js/src/jit-test/tests/wasm/gc/directives.txt
index e6d978cc44..293724e57a 100644
--- a/js/src/jit-test/tests/wasm/gc/directives.txt
+++ b/js/src/jit-test/tests/wasm/gc/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-gc; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; include:wasm.js
+|jit-test| test-also=--setpref=wasm_gc=true; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/gc/disabled.js b/js/src/jit-test/tests/wasm/gc/disabled.js
index 791c6ff25e..206d32b1c4 100644
--- a/js/src/jit-test/tests/wasm/gc/disabled.js
+++ b/js/src/jit-test/tests/wasm/gc/disabled.js
@@ -2,7 +2,7 @@
const { CompileError, validate } = WebAssembly;
-const UNRECOGNIZED_OPCODE_OR_BAD_TYPE = /unrecognized opcode|(Structure|reference|gc) types not enabled|invalid heap type|invalid inline block type|bad type|\(ref T\) types not enabled|Invalid type|invalid function type/;
+const UNRECOGNIZED_OPCODE_OR_BAD_TYPE = /unrecognized opcode|gc not enabled|invalid heap type|invalid inline block type|bad type|Invalid type|invalid function type/;
let simpleTests = [
"(module (func (drop (ref.null eq))))",
diff --git a/js/src/jit-test/tests/wasm/gc/ion-and-baseline.js b/js/src/jit-test/tests/wasm/gc/ion-and-baseline.js
index 5a4951c585..2c67bbce8b 100644
--- a/js/src/jit-test/tests/wasm/gc/ion-and-baseline.js
+++ b/js/src/jit-test/tests/wasm/gc/ion-and-baseline.js
@@ -10,7 +10,7 @@
// actually testing something here.
//
// Some logging with printf confirms that refmod is baseline-compiled and
-// nonrefmod is ion-compiled at present, with --wasm-gc enabled.
+// nonrefmod is ion-compiled at present, with --setpref=wasm_gc=true enabled.
var refmod = new WebAssembly.Module(wasmTextToBinary(
`(module
diff --git a/js/src/jit-test/tests/wasm/gc/limits.js b/js/src/jit-test/tests/wasm/gc/limits.js
deleted file mode 100644
index e6f21b5d6b..0000000000
--- a/js/src/jit-test/tests/wasm/gc/limits.js
+++ /dev/null
@@ -1,69 +0,0 @@
-// |jit-test| skip-if: !wasmGcEnabled() || getBuildConfiguration("tsan")
-
-// This test has a timeout on TSAN configurations due to the large
-// allocations.
-
-// Limit of 1 million recursion groups
-wasmValidateText(`(module
- ${`(rec (type (func)))`.repeat(1_000_000)}
- )`);
-wasmFailValidateText(`(module
- ${`(rec (type (func)))`.repeat(1_000_001)}
- )`, /too many/);
-
-// Limit of 1 million types (across all recursion groups)
-wasmValidateText(`(module
- (rec ${`(type (func))`.repeat(1_000_000)})
- )`);
-wasmValidateText(`(module
- (rec ${`(type (func))`.repeat(500_000)})
- (rec ${`(type (func))`.repeat(500_000)})
- )`);
-wasmFailValidateText(`(module
- (rec ${`(type (func))`.repeat(1_000_001)})
- )`, /too many/);
-wasmFailValidateText(`(module
- (rec ${`(type (func))`.repeat(500_000)})
- (rec ${`(type (func))`.repeat(500_001)})
- )`, /too many/);
-
-// Limit of subtyping hierarchy 63 deep
-function testSubtypingModule(depth) {
- let types = '(type (sub (func)))';
- for (let i = 1; i <= depth; i++) {
- types += `(type (sub ${i - 1} (func)))`;
- }
- return `(module
- ${types}
- )`;
-}
-wasmValidateText(testSubtypingModule(63));
-wasmFailValidateText(testSubtypingModule(64), /too deep/);
-
-// Limit of 10_000 struct fields
-wasmFailValidateText(`(module
- (type (struct ${'(field i64)'.repeat(10_001)}))
-)`, /too many/);
-
-{
- let {makeLargeStructDefault, makeLargeStruct} = wasmEvalText(`(module
- (type $s (struct ${'(field i64)'.repeat(10_000)}))
- (func (export "makeLargeStructDefault") (result anyref)
- struct.new_default $s
- )
- (func (export "makeLargeStruct") (result anyref)
- ${'i64.const 0 '.repeat(10_000)}
- struct.new $s
- )
- )`).exports;
- let largeStructDefault = makeLargeStructDefault();
- let largeStruct = makeLargeStruct();
-}
-
-// array.new_fixed has limit of 10_000 operands
-wasmFailValidateText(`(module
- (type $a (array i32))
- (func
- array.new_fixed $a 10001
- )
-)`, /too many/);
diff --git a/js/src/jit-test/tests/wasm/gc/limits/array-new-fixed.js b/js/src/jit-test/tests/wasm/gc/limits/array-new-fixed.js
new file mode 100644
index 0000000000..4b0600c724
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/array-new-fixed.js
@@ -0,0 +1,9 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+// array.new_fixed has limit of 10_000 operands
+wasmFailValidateText(`(module
+ (type $a (array i32))
+ (func
+ array.new_fixed $a 10001
+ )
+)`, /too many/);
diff --git a/js/src/jit-test/tests/wasm/gc/limits/load-mod.js b/js/src/jit-test/tests/wasm/gc/limits/load-mod.js
new file mode 100644
index 0000000000..cd972ceb65
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/load-mod.js
@@ -0,0 +1,5 @@
+// Files for some of these tests are pre-generated and located in js/src/jit-test/lib/gen.
+// There you will also find the script to update these files.
+function loadMod(name) {
+ return decompressLZ4(os.file.readFile(libdir + "gen/" + name, "binary").buffer)
+}
diff --git a/js/src/jit-test/tests/wasm/gc/limits/rec-groups-1.js b/js/src/jit-test/tests/wasm/gc/limits/rec-groups-1.js
new file mode 100644
index 0000000000..489bf89cd4
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/rec-groups-1.js
@@ -0,0 +1,6 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 1 million recursion groups
+wasmValidateBinary(loadMod("wasm-gc-limits-r1M-t1.wasm"));
diff --git a/js/src/jit-test/tests/wasm/gc/limits/rec-groups-2.js b/js/src/jit-test/tests/wasm/gc/limits/rec-groups-2.js
new file mode 100644
index 0000000000..40c020c4b5
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/rec-groups-2.js
@@ -0,0 +1,6 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 1 million recursion groups
+wasmFailValidateBinary(loadMod("wasm-gc-limits-r1M1-t1.wasm"), /too many/);
diff --git a/js/src/jit-test/tests/wasm/gc/limits/struct-fields.js b/js/src/jit-test/tests/wasm/gc/limits/struct-fields.js
new file mode 100644
index 0000000000..ae60f38d57
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/struct-fields.js
@@ -0,0 +1,11 @@
+// |jit-test| --setpref=wasm_gc; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 10_000 struct fields
+wasmFailValidateBinary(loadMod("wasm-gc-limits-s10K1.wasm"), /too many/);
+{
+ let {makeLargeStructDefault, makeLargeStruct} = wasmEvalBinary(loadMod("wasm-gc-limits-s10K.wasm")).exports;
+ let largeStructDefault = makeLargeStructDefault();
+ let largeStruct = makeLargeStruct();
+}
diff --git a/js/src/jit-test/tests/wasm/gc/limits/subtyping-depth.js b/js/src/jit-test/tests/wasm/gc/limits/subtyping-depth.js
new file mode 100644
index 0000000000..2d70215ee9
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/subtyping-depth.js
@@ -0,0 +1,13 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js; include: wasm-binary.js;
+
+// Limit of subtyping hierarchy 63 deep
+function moduleSubtypingDepth(depth) {
+ let types = [];
+ types.push({final: false, kind: FuncCode, args: [], ret: []});
+ for (let i = 1; i <= depth; i++) {
+ types.push({final: false, sub: i - 1, kind: FuncCode, args: [], ret: []});
+ }
+ return moduleWithSections([typeSection(types)]);
+}
+wasmValidateBinary(moduleSubtypingDepth(63));
+wasmFailValidateBinary(moduleSubtypingDepth(64), /too deep/);
diff --git a/js/src/jit-test/tests/wasm/gc/limits/types-1.js b/js/src/jit-test/tests/wasm/gc/limits/types-1.js
new file mode 100644
index 0000000000..c097907e79
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/types-1.js
@@ -0,0 +1,6 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 1 million types (across all recursion groups)
+wasmValidateBinary(loadMod("wasm-gc-limits-r1-t1M.wasm"));
diff --git a/js/src/jit-test/tests/wasm/gc/limits/types-2.js b/js/src/jit-test/tests/wasm/gc/limits/types-2.js
new file mode 100644
index 0000000000..5e81bdf6ab
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/types-2.js
@@ -0,0 +1,6 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 1 million types (across all recursion groups)
+wasmValidateBinary(loadMod("wasm-gc-limits-r2-t500K.wasm"));
diff --git a/js/src/jit-test/tests/wasm/gc/limits/types-3.js b/js/src/jit-test/tests/wasm/gc/limits/types-3.js
new file mode 100644
index 0000000000..e9effa4bfa
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/types-3.js
@@ -0,0 +1,6 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 1 million types (across all recursion groups)
+wasmFailValidateBinary(loadMod("wasm-gc-limits-r1-t1M1.wasm"), /too many/);
diff --git a/js/src/jit-test/tests/wasm/gc/limits/types-4.js b/js/src/jit-test/tests/wasm/gc/limits/types-4.js
new file mode 100644
index 0000000000..dd413ed4e9
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/limits/types-4.js
@@ -0,0 +1,6 @@
+// |jit-test| --setpref=wasm_gc; include:wasm.js;
+
+loadRelativeToScript("load-mod.js");
+
+// Limit of 1 million types (across all recursion groups)
+wasmFailValidateBinary(loadMod("wasm-gc-limits-r2-t500K1.wasm"), /too many/);
diff --git a/js/src/jit-test/tests/wasm/gc/ref.js b/js/src/jit-test/tests/wasm/gc/ref.js
index a55b0c8f02..2bf76daf52 100644
--- a/js/src/jit-test/tests/wasm/gc/ref.js
+++ b/js/src/jit-test/tests/wasm/gc/ref.js
@@ -173,7 +173,7 @@ assertErrorMessage(() => wasmEvalText(`
`),
WebAssembly.CompileError, /expression has type \(ref null.*\) but expected \(ref null.*\)/);
-if (!wasmFunctionReferencesEnabled()) {
+if (!wasmGcEnabled()) {
// Ref type can't reference a function type
assertErrorMessage(() => wasmEvalText(`
diff --git a/js/src/jit-test/tests/wasm/gc/regress-1754701.js b/js/src/jit-test/tests/wasm/gc/regress-1754701.js
index 656aa5d625..1a2307fa87 100644
--- a/js/src/jit-test/tests/wasm/gc/regress-1754701.js
+++ b/js/src/jit-test/tests/wasm/gc/regress-1754701.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmGcEnabled() || !('oomTest' in this)
+// |jit-test| skip-if: !wasmGcEnabled()
let { testArray, testStructInline, testStructOutline } = wasmEvalText(`
(module
diff --git a/js/src/jit-test/tests/wasm/gc/regress-1884767.js b/js/src/jit-test/tests/wasm/gc/regress-1884767.js
new file mode 100644
index 0000000000..54a168d657
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/regress-1884767.js
@@ -0,0 +1,13 @@
+// |jit-test| skip-if: !wasmGcEnabled()
+
+const { test } = wasmEvalText(`(module
+ (type $a (array i32))
+ (func (export "test") (result anyref)
+ try (result anyref)
+ (array.new_default $a (i32.const 999999999))
+ catch_all
+ unreachable
+ end
+ )
+)`).exports;
+assertErrorMessage(() => test(), WebAssembly.RuntimeError, /too many array elements/);
diff --git a/js/src/jit-test/tests/wasm/gc/structs.js b/js/src/jit-test/tests/wasm/gc/structs.js
index 0ff0cbd4b4..15dab873e9 100644
--- a/js/src/jit-test/tests/wasm/gc/structs.js
+++ b/js/src/jit-test/tests/wasm/gc/structs.js
@@ -665,46 +665,6 @@ assertErrorMessage(() => new WebAssembly.Module(bad),
let exports = wasmEvalText(txt).exports;
}
-//////////////////////////////////////////////////////////////////////////////
-//
-// Checks for requests to create structs with more than MaxStructFields, where
-// MaxStructFields == 1000.
-
-function structNewOfManyFields(numFields) {
- let defString = "(type $s (struct ";
- for (i = 0; i < numFields; i++) {
- defString += "(field i32) ";
- }
- defString += "))";
-
- let insnString = "(struct.new $s ";
- for (i = 0; i < numFields; i++) {
- insnString += "(i32.const 1337) ";
- }
- insnString += ")";
-
- return "(module " +
- defString +
- " (func (export \"create\") (result eqref) " +
- insnString +
- "))";
-}
-
-{
- // 10_000 fields is allowable
- let exports = wasmEvalText(structNewOfManyFields(10000)).exports;
- let s = exports.create();
- assertEq(s, s);
-}
-{
- // but 10_001 is not
- assertErrorMessage(() => wasmEvalText(structNewOfManyFields(10001)),
- WebAssembly.CompileError,
- /too many fields in struct/);
-}
-
-// FIXME: also check struct.new_default, once it is available in both compilers.
-
// Exercise stack maps and GC
{
// Zeal will cause us to allocate structs via instance call, requiring live registers
diff --git a/js/src/jit-test/tests/wasm/globals.js b/js/src/jit-test/tests/wasm/globals.js
index 5a6d83e348..bbba095bb9 100644
--- a/js/src/jit-test/tests/wasm/globals.js
+++ b/js/src/jit-test/tests/wasm/globals.js
@@ -1,5 +1,3 @@
-// |jit-test| test-also=--wasm-extended-const; test-also=--no-wasm-extended-const
-
const { Instance, Module, LinkError } = WebAssembly;
// Locally-defined globals
@@ -44,68 +42,65 @@ testInner('i32', 13, 37, x => x|0);
testInner('f32', 13.37, 0.1989, Math.fround);
testInner('f64', 13.37, 0.1989, x => +x);
-// Extended const stuff
-if (wasmExtendedConstEnabled()) {
- // Basic global shenanigans
- {
- const module = wasmEvalText(`(module
- ;; -2 * (5 - (-10 + 20)) = 10
- (global i32 (i32.mul (i32.const -2) (i32.sub (i32.const 5) (i32.add (i32.const -10) (i32.const 20)))))
- ;; ((1 + 2) - (3 * 4)) = -9
- (global i64 (i64.sub (i64.add (i64.const 1) (i64.const 2)) (i64.mul (i64.const 3) (i64.const 4))))
-
- (func (export "get0") (result i32) global.get 0)
- (func (export "get1") (result i64) global.get 1)
- )`).exports;
-
- assertEq(module.get0(), 10);
- assertEq(module.get1(), -9n);
- }
+// Basic global shenanigans
+{
+ const module = wasmEvalText(`(module
+ ;; -2 * (5 - (-10 + 20)) = 10
+ (global i32 (i32.mul (i32.const -2) (i32.sub (i32.const 5) (i32.add (i32.const -10) (i32.const 20)))))
+ ;; ((1 + 2) - (3 * 4)) = -9
+ (global i64 (i64.sub (i64.add (i64.const 1) (i64.const 2)) (i64.mul (i64.const 3) (i64.const 4))))
+
+ (func (export "get0") (result i32) global.get 0)
+ (func (export "get1") (result i64) global.get 1)
+ )`).exports;
- // Example use of dynamic linking
- {
- // Make a memory for two dynamically-linked modules to share. Each module gets five pages.
- const mem = new WebAssembly.Memory({ initial: 15, maximum: 15 });
-
- const mod1 = new WebAssembly.Module(wasmTextToBinary(`(module
- (memory (import "env" "memory") 15 15)
- (global $memBase (import "env" "__memory_base") i32)
- (data (offset (global.get $memBase)) "Hello from module 1.")
- (data (offset (i32.add (global.get $memBase) (i32.const 65536))) "Goodbye from module 1.")
- )`));
- const instance1 = new WebAssembly.Instance(mod1, {
- env: {
- memory: mem,
- __memory_base: 65536 * 5, // this module's memory starts at page 5
- },
- });
-
- const mod2 = new WebAssembly.Module(wasmTextToBinary(`(module
- (memory (import "env" "memory") 15 15)
- (global $memBase (import "env" "__memory_base") i32)
- (data (offset (global.get $memBase)) "Hello from module 2.")
- (data (offset (i32.add (global.get $memBase) (i32.const 65536))) "Goodbye from module 2.")
- )`));
- const instance2 = new WebAssembly.Instance(mod2, {
- env: {
- memory: mem,
- __memory_base: 65536 * 10, // this module's memory starts at page 10
- },
- });
-
- // All four strings should now be present in the memory.
-
- function assertStringInMem(mem, str, addr) {
- const bytes = new Uint8Array(mem.buffer).slice(addr, addr + str.length);
- let memStr = String.fromCharCode(...bytes);
- assertEq(memStr, str);
- }
+ assertEq(module.get0(), 10);
+ assertEq(module.get1(), -9n);
+}
- assertStringInMem(mem, "Hello from module 1.", 65536 * 5);
- assertStringInMem(mem, "Goodbye from module 1.", 65536 * 6);
- assertStringInMem(mem, "Hello from module 2.", 65536 * 10);
- assertStringInMem(mem, "Goodbye from module 2.", 65536 * 11);
+// Example use of dynamic linking
+{
+ // Make a memory for two dynamically-linked modules to share. Each module gets five pages.
+ const mem = new WebAssembly.Memory({ initial: 15, maximum: 15 });
+
+ const mod1 = new WebAssembly.Module(wasmTextToBinary(`(module
+ (memory (import "env" "memory") 15 15)
+ (global $memBase (import "env" "__memory_base") i32)
+ (data (offset (global.get $memBase)) "Hello from module 1.")
+ (data (offset (i32.add (global.get $memBase) (i32.const 65536))) "Goodbye from module 1.")
+ )`));
+ const instance1 = new WebAssembly.Instance(mod1, {
+ env: {
+ memory: mem,
+ __memory_base: 65536 * 5, // this module's memory starts at page 5
+ },
+ });
+
+ const mod2 = new WebAssembly.Module(wasmTextToBinary(`(module
+ (memory (import "env" "memory") 15 15)
+ (global $memBase (import "env" "__memory_base") i32)
+ (data (offset (global.get $memBase)) "Hello from module 2.")
+ (data (offset (i32.add (global.get $memBase) (i32.const 65536))) "Goodbye from module 2.")
+ )`));
+ const instance2 = new WebAssembly.Instance(mod2, {
+ env: {
+ memory: mem,
+ __memory_base: 65536 * 10, // this module's memory starts at page 10
+ },
+ });
+
+ // All four strings should now be present in the memory.
+
+ function assertStringInMem(mem, str, addr) {
+ const bytes = new Uint8Array(mem.buffer).slice(addr, addr + str.length);
+ let memStr = String.fromCharCode(...bytes);
+ assertEq(memStr, str);
}
+
+ assertStringInMem(mem, "Hello from module 1.", 65536 * 5);
+ assertStringInMem(mem, "Goodbye from module 1.", 65536 * 6);
+ assertStringInMem(mem, "Hello from module 2.", 65536 * 10);
+ assertStringInMem(mem, "Goodbye from module 2.", 65536 * 11);
}
// Semantic errors.
diff --git a/js/src/jit-test/tests/wasm/import-export.js b/js/src/jit-test/tests/wasm/import-export.js
index 4845269f24..ecd02ca143 100644
--- a/js/src/jit-test/tests/wasm/import-export.js
+++ b/js/src/jit-test/tests/wasm/import-export.js
@@ -401,9 +401,10 @@ wasmFailValidateText('(module (export "a" (memory 0)))', /exported memory index
wasmFailValidateText('(module (export "a" (table 0)))', /exported table index out of bounds/);
// Default memory/table rules
-
-wasmFailValidateText('(module (import "a" "b" (memory 1 1)) (memory 1 1))', /already have default memory/);
-wasmFailValidateText('(module (import "a" "b" (memory 1 1)) (import "x" "y" (memory 2 2)))', /already have default memory/);
+if (!wasmMultiMemoryEnabled()) {
+ wasmFailValidateText('(module (import "a" "b" (memory 1 1)) (memory 1 1))', /already have default memory/);
+ wasmFailValidateText('(module (import "a" "b" (memory 1 1)) (import "x" "y" (memory 2 2)))', /already have default memory/);
+}
// Data segments on imports
diff --git a/js/src/jit-test/tests/wasm/memory-control/directives.txt b/js/src/jit-test/tests/wasm/memory-control/directives.txt
index 1092e20d8a..a3daf19d97 100644
--- a/js/src/jit-test/tests/wasm/memory-control/directives.txt
+++ b/js/src/jit-test/tests/wasm/memory-control/directives.txt
@@ -1 +1 @@
-|jit-test| include:wasm.js; test-also=--wasm-compiler=optimizing --wasm-memory-control; test-also=--wasm-compiler=baseline --wasm-memory-control; test-also=--wasm-compiler=optimizing --no-wasm-memory64 --wasm-memory-control; test-also=--wasm-compiler=baseline --no-wasm-memory64 --wasm-memory-control
+|jit-test| include:wasm.js; test-also=--wasm-compiler=optimizing --setpref=wasm_memory_control=true; test-also=--wasm-compiler=baseline --setpref=wasm_memory_control=true; test-also=--wasm-compiler=optimizing --setpref=wasm_memory64=false --setpref=wasm_memory_control=true; test-also=--wasm-compiler=baseline --setpref=wasm_memory64=false --setpref=wasm_memory_control=true
diff --git a/js/src/jit-test/tests/wasm/memory-control/memory-discard.js b/js/src/jit-test/tests/wasm/memory-control/memory-discard.js
index 87f2ae625a..8d2c63ad8a 100644
--- a/js/src/jit-test/tests/wasm/memory-control/memory-discard.js
+++ b/js/src/jit-test/tests/wasm/memory-control/memory-discard.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmMemoryControlEnabled(); test-also=--wasm-memory64; test-also=--no-wasm-memory64
+// |jit-test| skip-if: !wasmMemoryControlEnabled(); test-also=--setpref=wasm_memory64=true; test-also=--setpref=wasm_memory64=false
// This tests memory.discard and WebAssembly.Memory.discard() by placing data
// (the alphabet) halfway across a page boundary, then discarding the first
diff --git a/js/src/jit-test/tests/wasm/memory64/directives.txt b/js/src/jit-test/tests/wasm/memory64/directives.txt
index 98d92a7afc..5fc5acabf0 100644
--- a/js/src/jit-test/tests/wasm/memory64/directives.txt
+++ b/js/src/jit-test/tests/wasm/memory64/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; include:wasm.js; skip-if: !wasmMemory64Enabled()
+|jit-test| test-also=--setpref=wasm_memory64=true; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; include:wasm.js; skip-if: !wasmMemory64Enabled()
diff --git a/js/src/jit-test/tests/wasm/multi-memory/directives.txt b/js/src/jit-test/tests/wasm/multi-memory/directives.txt
index 44374e8ceb..2ed5e3dd2a 100644
--- a/js/src/jit-test/tests/wasm/multi-memory/directives.txt
+++ b/js/src/jit-test/tests/wasm/multi-memory/directives.txt
@@ -1,2 +1,2 @@
-|jit-test| --wasm-multi-memory; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; include:wasm.js; skip-if: !wasmMultiMemoryEnabled()
+|jit-test| --setpref=wasm_multi_memory=true; test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; include:wasm.js; skip-if: !wasmMultiMemoryEnabled()
diff --git a/js/src/jit-test/tests/wasm/multi-value/directives.txt b/js/src/jit-test/tests/wasm/multi-value/directives.txt
index f636e648ec..b5a203297f 100644
--- a/js/src/jit-test/tests/wasm/multi-value/directives.txt
+++ b/js/src/jit-test/tests/wasm/multi-value/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); include:wasm.js
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/oom/breakpoints.js b/js/src/jit-test/tests/wasm/oom/breakpoints.js
index a90f97739a..eabb660e0f 100644
--- a/js/src/jit-test/tests/wasm/oom/breakpoints.js
+++ b/js/src/jit-test/tests/wasm/oom/breakpoints.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
var dbgGlobal = newGlobal({newCompartment: true});
var dbg = new dbgGlobal.Debugger();
dbg.addDebuggee(this);
diff --git a/js/src/jit-test/tests/wasm/oom/exports.js b/js/src/jit-test/tests/wasm/oom/exports.js
index 391850fda7..f75ada6199 100644
--- a/js/src/jit-test/tests/wasm/oom/exports.js
+++ b/js/src/jit-test/tests/wasm/oom/exports.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
let text = `(module
(type (func (param i32) (result i32)))
diff --git a/js/src/jit-test/tests/wasm/oom/jsapi-prototype.js b/js/src/jit-test/tests/wasm/oom/jsapi-prototype.js
index 4888a70db0..f5f319a8c5 100644
--- a/js/src/jit-test/tests/wasm/oom/jsapi-prototype.js
+++ b/js/src/jit-test/tests/wasm/oom/jsapi-prototype.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
oomTest(() => {
let memory = new WebAssembly.Memory({initial: 0});
assertEq(Object.getPrototypeOf(memory), WebAssembly.Memory.prototype, "prototype");
diff --git a/js/src/jit-test/tests/wasm/ref-types/directives.txt b/js/src/jit-test/tests/wasm/ref-types/directives.txt
index ed90b6bd5f..f9c573abef 100644
--- a/js/src/jit-test/tests/wasm/ref-types/directives.txt
+++ b/js/src/jit-test/tests/wasm/ref-types/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); test-also=--wasm-test-serialization; include:wasm.js
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); test-also=--setpref=wasm_test_serialization=true; include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/regress/bug1708124.js b/js/src/jit-test/tests/wasm/regress/bug1708124.js
index dc035b02e5..3cccd59dc0 100644
--- a/js/src/jit-test/tests/wasm/regress/bug1708124.js
+++ b/js/src/jit-test/tests/wasm/regress/bug1708124.js
@@ -1,4 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
// Ensure that trap reporting mechanism doesn't crash under OOM conditions.
oomTest(
diff --git a/js/src/jit-test/tests/wasm/regress/bug1839065.js b/js/src/jit-test/tests/wasm/regress/bug1839065.js
index ecc4c1b90c..e3d84589b6 100644
--- a/js/src/jit-test/tests/wasm/regress/bug1839065.js
+++ b/js/src/jit-test/tests/wasm/regress/bug1839065.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; --wasm-function-references; skip-if: !wasmGcEnabled() || !wasmFunctionReferencesEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmGcEnabled() || !wasmGcEnabled()
function wasmEvalText(str, imports) {
let binary = wasmTextToBinary(str);
m = new WebAssembly.Module(binary);
diff --git a/js/src/jit-test/tests/wasm/regress/bug1839142.js b/js/src/jit-test/tests/wasm/regress/bug1839142.js
index 339c1d7cb9..49ca0117e9 100644
--- a/js/src/jit-test/tests/wasm/regress/bug1839142.js
+++ b/js/src/jit-test/tests/wasm/regress/bug1839142.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; --wasm-function-references; skip-if: !wasmSimdEnabled() || !wasmGcEnabled() || !wasmFunctionReferencesEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmSimdEnabled() || !wasmGcEnabled() || !wasmGcEnabled()
var wasm_code = new Uint8Array([0,97,115,109,1,0,0,0,1,152,128,128,128,0,4,80,0,95,1,126,0,80,0,94,124,1,80,0,96,3,127,127,127,1,127,96,0,0,3,130,128,128,128,0,1,2,4,133,128,128,128,0,1,112,1,1,1,5,132,128,128,128,0,1,1,16,32,13,131,128,128,128,0,1,0,3,6,204,131,128,128,0,62,100,107,0,66,197,129,131,134,140,152,176,224,64,251,0,0,11,127,0,65,196,129,131,134,124,11,100,107,0,66,192,129,131,134,204,132,137,146,36,251,0,0,11,124,1,68,0,0,0,0,0,0,0,0,11,124,1,68,0,0,0,0,0,0,0,0,11,124,1,68,0,0,0,0,0,0,0,0,11,124,1,68,0,0,0,0,0,0,0,0,11,100,107,0,66,192,129,131,134,140,216,53,251,0,0,11,100,107,1,66,210,164,201,146,165,202,148,169,210,0,66,210,164,201,146,165,202,212,156,218,0,66,192,129,131,134,140,152,176,224,64,66,192,129,131,134,140,152,176,224,64,126,125,66,192,129,131,128,130,152,176,224,64,125,66,192,129,131,190,130,152,176,224,36,125,66,164,200,0,125,125,66,0,125,66,0,125,66,0,125,251,0,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,127,0,65,0,11,7,136,128,128,128,0,1,4,109,97,105,110,0,0,9,139,128,128,128,0,1,6,0,65,0,11,112,1,210,0,11,10,141,133,128,128,0,1,138,5,0,65,238,235,177,226,126,253,15,253,83,32,0,65,235,146,246,155,122,65,244,231,246,248,124,253,15,253,164,1,65,230,152,157,154,7,253,15,253,164,1,118,65,167,184,218,133,127,253,15,253,164,1,118,118,66,149,131,127,66,164,128,218,132,206,227,209,231,254,0,65,230,133,189,200,126,65,252,208,237,164,5,254,32,0,132,245,241,222,13,27,254,71,2,211,226,246,158,7,66,243,213,226,237,209,166,141,199,0,68,76,189,205,180,194,110,195,89,36,3,131,253,18,253,127,253,127,253,127,253,127,253,164,1,65,138,173,198,47,65,138,248,237,203,120,65,205,162,146,252,5,65,190,148,192,156,5,254,53,0,200,229,139,195,9,65,167,139,216,173,5,65,215,146,221,45,254,53,0,169,255,135,252,1,254,53,0,193,209,131,217,7,40,2,134,242,184,197,3,65,228,191,145,146,6,65,142,162,226,169,4,254,53,0,168,178,151,189,15,113,109,71,109,107,254,46,0,191,232,145,230,9,67,66,84,34,11,67,88,147,220,200,91,68,233,240,20,66,52,37,190,38,182,187,182,187,182,187,182,187,182,187,57,3,168,169,148,198,10,65,226,162,208,167,7,65,221,226,226,242,120,107,65,140,215,139,233,5,65,141,151,153,19,107,107,65,188,134,175,165,5,65,183,219,200,136,121,107,65,250,197,157,214,123,65,139,168,173,167,126,107,107,107,42,1,249,156,171,169,13,187,182,187,182,187,182,187,182,187,182,65,191,253,243,170,122,253,15,65,203,195,202,169,122,253,15,65,179,204,244,234,123,253,15,253,119,65,166,184,138,186,122,253,15,65,129,140,243,163,6,253,15,253,119,65,229,139,254,233,121,253,15,65,183,191,195,183,122,253,15,253,119,253,119,65,151,211,231,151,122,253,15,253,119,253,119,253,119,65,192,156,192,215,3,65,178,193,209,198,7,107,65,240,157,246,199,6,65,221,225,148,169,1,107,65,145,183,142,141,127,65,188,218,139,244,7,107,107,65,236,243,250,169,127,65,146,241,174,181,120,107,65,139,147,232,229,124,65,255,203,253,217,3,107,107,65,250,197,224,140,2,65,202,242,215,181,3,107,65,135,244,246,28,65,140,170,229,200,123,107,107,107,65,154,217,196,153,1,65,137,128,243,231,123,107,107,107,107,65,227,146,143,180,126,40,1,245,130,139,196,13,40,2,244,172,225,238,10,40,0,216,160,178,215,11,40,1,197,193,230,178,3,40,2,195,241,223,254,2,65,158,240,247,204,124,40,2,140,190,218,180,14,40,2,215,128,167,146,8,40,0,141,143,157,196,10,40,0,147,146,185,143,13,40,1,195,168,134,179,5,107,107,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,253,15,253,119,253,31,0,91,254,46,0,203,243,148,239,8,11]);
var wasm_module = new WebAssembly.Module(wasm_code);
var wasm_instance = new WebAssembly.Instance(wasm_module);
diff --git a/js/src/jit-test/tests/wasm/regress/bug1856733.js b/js/src/jit-test/tests/wasm/regress/bug1856733.js
index 0f3e59dcfe..57e1380780 100644
--- a/js/src/jit-test/tests/wasm/regress/bug1856733.js
+++ b/js/src/jit-test/tests/wasm/regress/bug1856733.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; skip-if: !wasmGcEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmGcEnabled()
// Validates if imported globals are accounted for in init expressions.
diff --git a/js/src/jit-test/tests/wasm/regress/bug1857829.js b/js/src/jit-test/tests/wasm/regress/bug1857829.js
index 037855a4b1..df0598f678 100644
--- a/js/src/jit-test/tests/wasm/regress/bug1857829.js
+++ b/js/src/jit-test/tests/wasm/regress/bug1857829.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; skip-if: !wasmGcEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmGcEnabled()
// Tests if i31ref global value is normalized.
var ins = wasmEvalText(`(module
diff --git a/js/src/jit-test/tests/wasm/regress/bug1858982.js b/js/src/jit-test/tests/wasm/regress/bug1858982.js
index 574c4bfe24..c67ae317f1 100644
--- a/js/src/jit-test/tests/wasm/regress/bug1858982.js
+++ b/js/src/jit-test/tests/wasm/regress/bug1858982.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-tail-calls; --wasm-gc; skip-if: !wasmGcEnabled() || !wasmTailCallsEnabled()
+// |jit-test| --setpref=wasm_tail_calls=true; --setpref=wasm_gc=true; skip-if: !wasmGcEnabled() || !wasmTailCallsEnabled()
// Tests if instance registers were restored properly when call_ref is used
// with tail calls.
diff --git a/js/src/jit-test/tests/wasm/regress/bug1878673.js b/js/src/jit-test/tests/wasm/regress/bug1878673.js
new file mode 100644
index 0000000000..edf2e67187
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/bug1878673.js
@@ -0,0 +1,13 @@
+// Check proper handling of OOM in SIMD loads.
+
+oomTest(function () {
+ let x = wasmTextToBinary(`(module
+ (memory 1 1)
+ (func
+ i32.const 16
+ v128.load8x8_s
+ i16x8.abs
+ drop)
+ )`);
+ new WebAssembly.Module(x);
+});
diff --git a/js/src/jit-test/tests/wasm/regress/bug1880770.js b/js/src/jit-test/tests/wasm/regress/bug1880770.js
new file mode 100644
index 0000000000..0e748137d2
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/bug1880770.js
@@ -0,0 +1,20 @@
+// Check proper handling of OOM during segments creation.
+
+var x = {};
+Object.defineProperty(x, "", {
+ enumerable: true,
+ get: function () {
+ new WebAssembly.Instance(
+ new WebAssembly.Module(
+ wasmTextToBinary(
+ '(func $f (result f32) f32.const 0)(table (export "g") 1 funcref) (elem (i32.const 0) $f)'
+ )
+ )
+ ).exports.g
+ .get(0)
+ .type(WebAssembly, "", WebAssembly.Module, {});
+ },
+});
+oomTest(function () {
+ Object.values(x);
+});
diff --git a/js/src/jit-test/tests/wasm/regress/oom-eval.js b/js/src/jit-test/tests/wasm/regress/oom-eval.js
index 1ce7c26df1..2f70bf8803 100644
--- a/js/src/jit-test/tests/wasm/regress/oom-eval.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-eval.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; allow-oom; skip-if: !wasmIsSupported() || !('oomTest' in this)
+// |jit-test| slow; allow-oom; skip-if: !wasmIsSupported()
function foo() {
var g = newGlobal({sameZoneAs: this});
diff --git a/js/src/jit-test/tests/wasm/regress/oom-init.js b/js/src/jit-test/tests/wasm/regress/oom-init.js
index f08b088107..4c6d0206b6 100644
--- a/js/src/jit-test/tests/wasm/regress/oom-init.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-init.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; allow-oom; skip-if: !wasmIsSupported() || !('oomTest' in this)
+// |jit-test| slow; allow-oom; skip-if: !wasmIsSupported() || !hasFunction.oomTest
Object.getOwnPropertyNames(this);
s = newGlobal();
diff --git a/js/src/jit-test/tests/wasm/regress/oom-masm-baseline.js b/js/src/jit-test/tests/wasm/regress/oom-masm-baseline.js
index 3f74666d0a..f2026a6788 100644
--- a/js/src/jit-test/tests/wasm/regress/oom-masm-baseline.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-masm-baseline.js
@@ -1,4 +1,4 @@
-// |jit-test| slow; skip-if: !('oomTest' in this)
+// |jit-test| slow
// Test baseline compiler only.
if (typeof wasmCompileMode === 'undefined' || wasmCompileMode() != 'baseline')
diff --git a/js/src/jit-test/tests/wasm/regress/oom-wasm-streaming.js b/js/src/jit-test/tests/wasm/regress/oom-wasm-streaming.js
index 5d7e719912..8a3c8e65d4 100644
--- a/js/src/jit-test/tests/wasm/regress/oom-wasm-streaming.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-wasm-streaming.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomAfterAllocations' in this)
-
ignoreUnhandledRejections();
try {
diff --git a/js/src/jit-test/tests/wasm/regress/oom-wasmtexttobinary-block.js b/js/src/jit-test/tests/wasm/regress/oom-wasmtexttobinary-block.js
index 6f3b666873..aaa7ff121f 100644
--- a/js/src/jit-test/tests/wasm/regress/oom-wasmtexttobinary-block.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-wasmtexttobinary-block.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
try {
oomTest((function () {
wasmTextToBinary("(module(func(loop $label1 $label0)))");
diff --git a/js/src/jit-test/tests/wasm/regress/oom-wrong-argument-number-for-import-call.js b/js/src/jit-test/tests/wasm/regress/oom-wrong-argument-number-for-import-call.js
index 2d41b466ea..d4eb2da5a3 100644
--- a/js/src/jit-test/tests/wasm/regress/oom-wrong-argument-number-for-import-call.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-wrong-argument-number-for-import-call.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
function f() {
// Too many results returned.
return [52, 10, 0, 0];
diff --git a/js/src/jit-test/tests/wasm/simd/directives.txt b/js/src/jit-test/tests/wasm/simd/directives.txt
index 651282ecb5..aced6c31a7 100644
--- a/js/src/jit-test/tests/wasm/simd/directives.txt
+++ b/js/src/jit-test/tests/wasm/simd/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=baseline; test-also=--wasm-compiler=optimizing; test-also=--wasm-test-serialization; test-also=--wasm-compiler=optimizing --no-avx; skip-variant-if: --wasm-compiler=optimizing --no-avx, !getBuildConfiguration("x86") && !getBuildConfiguration("x64") || getBuildConfiguration("simulator"); include:wasm.js
+|jit-test| test-also=--wasm-compiler=baseline; test-also=--wasm-compiler=optimizing; test-also=--setpref=wasm_test_serialization=true; test-also=--wasm-compiler=optimizing --no-avx; skip-variant-if: --wasm-compiler=optimizing --no-avx, !getBuildConfiguration("x86") && !getBuildConfiguration("x64") || getBuildConfiguration("simulator"); include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/simd/experimental.js b/js/src/jit-test/tests/wasm/simd/experimental.js
index 3076cce80b..ded45928e6 100644
--- a/js/src/jit-test/tests/wasm/simd/experimental.js
+++ b/js/src/jit-test/tests/wasm/simd/experimental.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-relaxed-simd; skip-if: !wasmRelaxedSimdEnabled()
+// |jit-test| --setpref=wasm_relaxed_simd=true; skip-if: !wasmRelaxedSimdEnabled()
// Experimental opcodes. We have no text parsing support for these yet. The
// tests will be cleaned up and moved into ad-hack.js if the opcodes are
@@ -77,7 +77,7 @@ for ( let [opcode, xs, ys, as, operator] of [[F32x4RelaxedMaddCode, fxs, fys, fa
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
...V128Load(48),
- SimdPrefix, varU32(opcode)])]})])]));
+ SimdPrefix, ...varU32(opcode)])]})])]));
var mem = new (k == 4 ? Float32Array : Float64Array)(ins.exports.mem.buffer);
set(mem, k, xs);
@@ -97,7 +97,7 @@ for ( let [opcode, xs, ys, as, operator] of [[F32x4RelaxedMaddCode, fxs, fys, fa
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(0),
...V128Load(0),
- SimdPrefix, varU32(opcode)])]})])])));
+ SimdPrefix, ...varU32(opcode)])]})])])));
}
// Relaxed swizzle, https://github.com/WebAssembly/relaxed-simd/issues/22
@@ -112,7 +112,7 @@ var ins = wasmValidateAndEval(moduleWithSections([
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
- SimdPrefix, varU32(I8x16RelaxedSwizzleCode)])]})])]));
+ SimdPrefix, ...varU32(I8x16RelaxedSwizzleCode)])]})])]));
var mem = new Uint8Array(ins.exports.mem.buffer);
var test = [1, 4, 3, 7, 123, 0, 8, 222];
set(mem, 16, test);
@@ -134,7 +134,7 @@ assertEq(false, WebAssembly.validate(moduleWithSections([
bodySection([
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
- SimdPrefix, varU32(I8x16RelaxedSwizzleCode)])]})])])));
+ SimdPrefix, ...varU32(I8x16RelaxedSwizzleCode)])]})])])));
// Relaxed MIN/MAX, https://github.com/WebAssembly/relaxed-simd/issues/33
@@ -164,11 +164,11 @@ for (let k of [4, 2]) {
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
- SimdPrefix, varU32(minOpcode)])]}),
+ SimdPrefix, ...varU32(minOpcode)])]}),
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
- SimdPrefix, varU32(maxOpcode)])]})])]));
+ SimdPrefix, ...varU32(maxOpcode)])]})])]));
for (let i = 0; i < minMaxTests.length; i++) {
var Ty = k == 4 ? Float32Array : Float64Array;
var mem = new Ty(ins.exports.mem.buffer);
@@ -198,7 +198,7 @@ for (let k of [4, 2]) {
bodySection([
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(0),
- SimdPrefix, varU32(op)])]})])])));
+ SimdPrefix, ...varU32(op)])]})])])));
}
}
@@ -216,16 +216,16 @@ var ins = wasmValidateAndEval(moduleWithSections([
bodySection([
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
- SimdPrefix, varU32(I32x4RelaxedTruncSSatF32x4Code)])]}),
+ SimdPrefix, ...varU32(I32x4RelaxedTruncSSatF32x4Code)])]}),
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
- SimdPrefix, varU32(I32x4RelaxedTruncUSatF32x4Code)])]}),
+ SimdPrefix, ...varU32(I32x4RelaxedTruncUSatF32x4Code)])]}),
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
- SimdPrefix, varU32(I32x4RelaxedTruncSatF64x2SZeroCode)])]}),
+ SimdPrefix, ...varU32(I32x4RelaxedTruncSatF64x2SZeroCode)])]}),
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
- SimdPrefix, varU32(I32x4RelaxedTruncSatF64x2UZeroCode)])]})])]));
+ SimdPrefix, ...varU32(I32x4RelaxedTruncSatF64x2UZeroCode)])]})])]));
var mem = ins.exports.mem.buffer;
set(new Float32Array(mem), 4, [0, 2.3, -3.4, 100000]);
@@ -260,7 +260,7 @@ for (let op of [I32x4RelaxedTruncSSatF32x4Code, I32x4RelaxedTruncUSatF32x4Code,
exportSection([]),
bodySection([
funcBody({locals:[],
- body: [...V128StoreExpr(0, [SimdPrefix, varU32(op)])]})])])));
+ body: [...V128StoreExpr(0, [SimdPrefix, ...varU32(op)])]})])])));
}
// Relaxed blend / laneselect, https://github.com/WebAssembly/relaxed-simd/issues/17
@@ -281,7 +281,7 @@ for (let [k, opcode, AT] of [[1, I8x16RelaxedLaneSelectCode, Int8Array],
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
...V128Load(48),
- SimdPrefix, varU32(opcode)])]})])]));
+ SimdPrefix, ...varU32(opcode)])]})])]));
var mem = ins.exports.mem.buffer;
var mem8 = new Uint8Array(mem);
@@ -310,7 +310,7 @@ for (let [k, opcode, AT] of [[1, I8x16RelaxedLaneSelectCode, Int8Array],
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(0),
...V128Load(0),
- SimdPrefix, varU32(opcode)])]})])])));
+ SimdPrefix, ...varU32(opcode)])]})])])));
}
@@ -325,7 +325,7 @@ var ins = wasmValidateAndEval(moduleWithSections([
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
- SimdPrefix, varU32(I16x8RelaxedQ15MulrSCode)])]})])]));
+ SimdPrefix, ...varU32(I16x8RelaxedQ15MulrSCode)])]})])]));
var mem16 = new Int16Array(ins.exports.mem.buffer);
for (let [as, bs] of cross([
@@ -355,7 +355,7 @@ var ins = wasmValidateAndEval(moduleWithSections([
funcBody({locals:[],
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
- SimdPrefix, varU32(I16x8DotI8x16I7x16SCode)])]})])]));
+ SimdPrefix, ...varU32(I16x8DotI8x16I7x16SCode)])]})])]));
var mem8 = new Int8Array(ins.exports.mem.buffer);
var mem16 = new Int16Array(ins.exports.mem.buffer);
var test7bit = [1, 2, 3, 4, 5, 64, 65, 127, 127, 0, 0,
@@ -385,7 +385,7 @@ var ins = wasmValidateAndEval(moduleWithSections([
body: [...V128StoreExpr(0, [...V128Load(16),
...V128Load(32),
...V128Load(48),
- SimdPrefix, varU32(I32x4DotI8x16I7x16AddSCode)])]})])]));
+ SimdPrefix, ...varU32(I32x4DotI8x16I7x16AddSCode)])]})])]));
var mem8 = new Int8Array(ins.exports.mem.buffer);
var mem32 = new Int32Array(ins.exports.mem.buffer);
var test7bit = [1, 2, 3, 4, 5, 64, 65, 127, 127, 0, 0,
diff --git a/js/src/jit-test/tests/wasm/spec/exception-handling/directives.txt b/js/src/jit-test/tests/wasm/spec/exception-handling/directives.txt
index 51f58354cb..77f4127908 100644
--- a/js/src/jit-test/tests/wasm/spec/exception-handling/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/exception-handling/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-exceptions; --wasm-exnref; skip-if: !wasmExceptionsEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_exnref=true; skip-if: !wasmExnRefEnabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/extended-const/directives.txt b/js/src/jit-test/tests/wasm/spec/extended-const/directives.txt
index 5b3d5f6d83..e93d8c8df0 100644
--- a/js/src/jit-test/tests/wasm/spec/extended-const/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/extended-const/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-extended-const; --no-wasm-gc; skip-if: !wasmExtendedConstEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_gc=false \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/function-references/directives.txt b/js/src/jit-test/tests/wasm/spec/function-references/directives.txt
index bb76560525..a9cf6401e1 100644
--- a/js/src/jit-test/tests/wasm/spec/function-references/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/function-references/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-function-references; skip-if: !wasmFunctionReferencesEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_gc=true; skip-if: !wasmGcEnabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js b/js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js
index 3ea51a8cb0..df24798146 100644
--- a/js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/function-references/return_call_ref.wast.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-tail-calls; skip-if: !wasmTailCallsEnabled()
+// |jit-test| --setpref=wasm_tail_calls=true; skip-if: !wasmTailCallsEnabled()
/* Copyright 2021 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/js/src/jit-test/tests/wasm/spec/gc/directives.txt b/js/src/jit-test/tests/wasm/spec/gc/directives.txt
index c071d8b980..a9cf6401e1 100644
--- a/js/src/jit-test/tests/wasm/spec/gc/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/gc/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-gc; skip-if: !wasmGcEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_gc=true; skip-if: !wasmGcEnabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/memory64/directives.txt b/js/src/jit-test/tests/wasm/spec/memory64/directives.txt
index bbc47fb788..740ef217b9 100644
--- a/js/src/jit-test/tests/wasm/spec/memory64/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/memory64/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; skip-if: !wasmMemory64Enabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_memory64=true; skip-if: !wasmMemory64Enabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js b/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js
index 341742ab86..0af2f021bb 100644
--- a/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/memory64/memory64.wast.js
@@ -27,17 +27,19 @@ let $2 = instantiate(`(module (memory i64 1 256))`);
// ./test/core/memory64.wast:6
let $3 = instantiate(`(module (memory i64 0 65536))`);
-// ./test/core/memory64.wast:8
-assert_invalid(
- () => instantiate(`(module (memory i64 0) (memory i64 0))`),
- `multiple memories`,
-);
-
-// ./test/core/memory64.wast:9
-assert_invalid(
- () => instantiate(`(module (memory (import "spectest" "memory") i64 0) (memory i64 0))`),
- `multiple memories`,
-);
+if (!wasmMultiMemoryEnabled()) {
+ // ./test/core/memory64.wast:8
+ assert_invalid(
+ () => instantiate(`(module (memory i64 0) (memory i64 0))`),
+ `multiple memories`,
+ );
+
+ // ./test/core/memory64.wast:9
+ assert_invalid(
+ () => instantiate(`(module (memory (import "spectest" "memory") i64 0) (memory i64 0))`),
+ `multiple memories`,
+ );
+}
// ./test/core/memory64.wast:11
let $4 = instantiate(`(module (memory i64 (data)) (func (export "memsize") (result i64) (memory.size)))`);
diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/directives.txt b/js/src/jit-test/tests/wasm/spec/multi-memory/directives.txt
index 9211583549..7c18d36751 100644
--- a/js/src/jit-test/tests/wasm/spec/multi-memory/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/multi-memory/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-multi-memory; skip-if: !wasmMultiMemoryEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_multi_memory=true; skip-if: !wasmMultiMemoryEnabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js b/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js
index a96781e8ed..e03b3f35db 100644
--- a/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js
+++ b/js/src/jit-test/tests/wasm/spec/multi-memory/harness/harness.js
@@ -19,6 +19,15 @@ if (!wasmIsSupported()) {
quit();
}
+function partialOobWriteMayWritePartialData() {
+ let arm_native = getBuildConfiguration("arm") && !getBuildConfiguration("arm-simulator");
+ let arm64_native = getBuildConfiguration("arm64") && !getBuildConfiguration("arm64-simulator");
+ return arm_native || arm64_native;
+}
+
+let native_arm = getBuildConfiguration("arm") && !getBuildConfiguration("arm-simulator");
+let native_arm64 = getBuildConfiguration("arm64") && !getBuildConfiguration("arm64-simulator");
+
function bytes(type, bytes) {
var typedBuffer = new Uint8Array(bytes);
return wasmGlobalFromArrayBuffer(type, typedBuffer.buffer);
diff --git a/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js b/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js
index ef68a1c0ec..35b8bd5a80 100644
--- a/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/multi-memory/memory_trap1.wast.js
@@ -562,11 +562,15 @@ assert_trap(() => invoke($0, `i64.load32_u`, [-3]), `out of bounds memory access
// ./test/core/multi-memory/memory_trap1.wast:234
assert_trap(() => invoke($0, `i64.load32_u`, [-4]), `out of bounds memory access`);
-// ./test/core/multi-memory/memory_trap1.wast:237
-assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 7523094288207667809n)]);
+// Bug 1842293 - do not observe the partial store caused by bug 1666747 on
+// some native platforms.
+if (!partialOobWriteMayWritePartialData()) {
+ // ./test/core/multi-memory/memory_trap1.wast:237
+ assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 7523094288207667809n)]);
-// ./test/core/multi-memory/memory_trap1.wast:238
-assert_return(() => invoke($0, `i64.load`, [0]), [value("i64", 7523094288207667809n)]);
+ // ./test/core/multi-memory/memory_trap1.wast:238
+ assert_return(() => invoke($0, `i64.load`, [0]), [value("i64", 7523094288207667809n)]);
+}
// ./test/core/multi-memory/memory_trap1.wast:242
assert_return(() => invoke($0, `i64.store`, [65528, 0n]), []);
@@ -574,14 +578,18 @@ assert_return(() => invoke($0, `i64.store`, [65528, 0n]), []);
// ./test/core/multi-memory/memory_trap1.wast:243
assert_trap(() => invoke($0, `i32.store`, [65533, 305419896]), `out of bounds memory access`);
-// ./test/core/multi-memory/memory_trap1.wast:244
-assert_return(() => invoke($0, `i32.load`, [65532]), [value("i32", 0)]);
+if (!partialOobWriteMayWritePartialData()) {
+ // ./test/core/multi-memory/memory_trap1.wast:244
+ assert_return(() => invoke($0, `i32.load`, [65532]), [value("i32", 0)]);
+}
// ./test/core/multi-memory/memory_trap1.wast:245
assert_trap(() => invoke($0, `i64.store`, [65529, 1311768467294899695n]), `out of bounds memory access`);
-// ./test/core/multi-memory/memory_trap1.wast:246
-assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 0n)]);
+if (!partialOobWriteMayWritePartialData()) {
+ // ./test/core/multi-memory/memory_trap1.wast:246
+ assert_return(() => invoke($0, `i64.load`, [65528]), [value("i64", 0n)]);
+}
// ./test/core/multi-memory/memory_trap1.wast:247
assert_trap(
@@ -589,8 +597,10 @@ assert_trap(
`out of bounds memory access`,
);
-// ./test/core/multi-memory/memory_trap1.wast:248
-assert_return(() => invoke($0, `f32.load`, [65532]), [value("f32", 0)]);
+if (!partialOobWriteMayWritePartialData()) {
+ // ./test/core/multi-memory/memory_trap1.wast:248
+ assert_return(() => invoke($0, `f32.load`, [65532]), [value("f32", 0)]);
+}
// ./test/core/multi-memory/memory_trap1.wast:249
assert_trap(
@@ -598,5 +608,7 @@ assert_trap(
`out of bounds memory access`,
);
-// ./test/core/multi-memory/memory_trap1.wast:250
-assert_return(() => invoke($0, `f64.load`, [65528]), [value("f64", 0)]);
+if (!partialOobWriteMayWritePartialData()) {
+ // ./test/core/multi-memory/memory_trap1.wast:250
+ assert_return(() => invoke($0, `f64.load`, [65528]), [value("f64", 0)]);
+}
diff --git a/js/src/jit-test/tests/wasm/spec/relaxed-simd/directives.txt b/js/src/jit-test/tests/wasm/spec/relaxed-simd/directives.txt
index 625758af79..bbeff405b1 100644
--- a/js/src/jit-test/tests/wasm/spec/relaxed-simd/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/relaxed-simd/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-relaxed-simd; skip-if: !wasmRelaxedSimdEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_relaxed_simd=true; skip-if: !wasmRelaxedSimdEnabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/spec/directives.txt b/js/src/jit-test/tests/wasm/spec/spec/directives.txt
index 9fa4f75347..b26fb254a9 100644
--- a/js/src/jit-test/tests/wasm/spec/spec/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/spec/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; test-also=--no-avx; skip-variant-if: --no-avx, !getBuildConfiguration('x86') && !getBuildConfiguration('x64') || getBuildConfiguration('simulator') \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; test-also=--no-avx; skip-variant-if: --no-avx, !getBuildConfiguration('x86') && !getBuildConfiguration('x64') || getBuildConfiguration('simulator') \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/spec/spec/global.wast.js b/js/src/jit-test/tests/wasm/spec/spec/global.wast.js
index e3351d3421..7f2502cc27 100644
--- a/js/src/jit-test/tests/wasm/spec/spec/global.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/spec/global.wast.js
@@ -1,4 +1,4 @@
-// |jit-test| --no-wasm-gc
+// |jit-test| --setpref=wasm_gc=false
/* Copyright 2021 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js b/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js
index 831bb26d69..211f610b69 100644
--- a/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/spec/memory.wast.js
@@ -33,14 +33,16 @@ let $4 = instantiate(`(module (memory 1 256))`);
// ./test/core/memory.wast:8
let $5 = instantiate(`(module (memory 0 65536))`);
-// ./test/core/memory.wast:10
-assert_invalid(() => instantiate(`(module (memory 0) (memory 0))`), `multiple memories`);
-
-// ./test/core/memory.wast:11
-assert_invalid(
- () => instantiate(`(module (memory (import "spectest" "memory") 0) (memory 0))`),
- `multiple memories`,
-);
+if (!wasmMultiMemoryEnabled()) {
+ // ./test/core/memory.wast:10
+ assert_invalid(() => instantiate(`(module (memory 0) (memory 0))`), `multiple memories`);
+
+ // ./test/core/memory.wast:11
+ assert_invalid(
+ () => instantiate(`(module (memory (import "spectest" "memory") 0) (memory 0))`),
+ `multiple memories`,
+ );
+}
// ./test/core/memory.wast:13
let $6 = instantiate(`(module (memory (data)) (func (export "memsize") (result i32) (memory.size)))`);
diff --git a/js/src/jit-test/tests/wasm/spec/tail-call/directives.txt b/js/src/jit-test/tests/wasm/spec/tail-call/directives.txt
index 223b6b843a..fbd3639abc 100644
--- a/js/src/jit-test/tests/wasm/spec/tail-call/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/tail-call/directives.txt
@@ -1 +1 @@
-|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--wasm-test-serialization; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --wasm-tail-calls; skip-if: !wasmTailCallsEnabled() \ No newline at end of file
+|jit-test| test-also=--wasm-compiler=optimizing; test-also=--wasm-compiler=baseline; test-also=--setpref=wasm_test_serialization=true; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemorySupported(); local-include:harness/harness.js; --setpref=wasm_tail_calls=true; skip-if: !wasmTailCallsEnabled() \ No newline at end of file
diff --git a/js/src/jit-test/tests/wasm/tail-calls/bug1862473.js b/js/src/jit-test/tests/wasm/tail-calls/bug1862473.js
index 57e5ea2118..fd44aa8f26 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/bug1862473.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/bug1862473.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; skip-if: !wasmGcEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmGcEnabled()
var ins = wasmEvalText(`(module
(func $func1)
diff --git a/js/src/jit-test/tests/wasm/tail-calls/bug1865044.js b/js/src/jit-test/tests/wasm/tail-calls/bug1865044.js
index 042e641f32..bf984e7b87 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/bug1865044.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/bug1865044.js
@@ -1,4 +1,4 @@
-// |jit-test| --more-compartments; skip-variant-if: --wasm-test-serialization, true; skip-variant-if: --wasm-compiler=ion, true; skip-if: !wasmGcEnabled() || !('Function' in WebAssembly)
+// |jit-test| --more-compartments; skip-variant-if: --setpref=wasm_test_serialization=true, true; skip-variant-if: --wasm-compiler=ion, true; skip-if: !wasmGcEnabled() || !('Function' in WebAssembly)
a = newGlobal();
a.b = this;
diff --git a/js/src/jit-test/tests/wasm/tail-calls/bug1871605.js b/js/src/jit-test/tests/wasm/tail-calls/bug1871605.js
index 84f37a3e42..3403cea102 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/bug1871605.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/bug1871605.js
@@ -1,4 +1,4 @@
-// |jit-test| --more-compartments; skip-variant-if: --wasm-test-serialization, true; skip-variant-if: --wasm-compiler=ion, true; skip-if: !wasmGcEnabled()
+// |jit-test| --more-compartments; skip-variant-if: --setpref=wasm_test_serialization=true, true; skip-variant-if: --wasm-compiler=ion, true; skip-if: !wasmGcEnabled()
var dbg = newGlobal()
dbg.parent = this
diff --git a/js/src/jit-test/tests/wasm/tail-calls/bug1871606.js b/js/src/jit-test/tests/wasm/tail-calls/bug1871606.js
index aa520d1f53..ec43665c62 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/bug1871606.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/bug1871606.js
@@ -1,4 +1,4 @@
-// |jit-test| --more-compartments; skip-variant-if: --wasm-test-serialization, true; skip-variant-if: --wasm-compiler=ion, true
+// |jit-test| --more-compartments; skip-variant-if: --setpref=wasm_test_serialization=true, true; skip-variant-if: --wasm-compiler=ion, true
dbg = newGlobal();
dbg.b = this;
dbg.eval("(" + function() {
diff --git a/js/src/jit-test/tests/wasm/tail-calls/bug1871951.js b/js/src/jit-test/tests/wasm/tail-calls/bug1871951.js
index 6b3e481d59..6dc8b69548 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/bug1871951.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/bug1871951.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-variant-if: --wasm-test-serialization, true; skip-if: !wasmGcEnabled()
+// |jit-test| skip-variant-if: --setpref=wasm_test_serialization=true, true; skip-if: !wasmGcEnabled()
gczeal(18)
function a(str, imports) {
diff --git a/js/src/jit-test/tests/wasm/tail-calls/directives.txt b/js/src/jit-test/tests/wasm/tail-calls/directives.txt
index 66e8161b9d..eac1ba242f 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/directives.txt
+++ b/js/src/jit-test/tests/wasm/tail-calls/directives.txt
@@ -1 +1 @@
-|jit-test| --wasm-tail-calls; test-also=--wasm-compiler=baseline; test-also=--wasm-compiler=ion; test-also=--wasm-test-serialization; skip-if: !wasmTailCallsEnabled(); include:wasm.js
+|jit-test| --setpref=wasm_tail_calls=true; test-also=--wasm-compiler=baseline; test-also=--wasm-compiler=ion; test-also=--setpref=wasm_test_serialization=true; skip-if: !wasmTailCallsEnabled(); include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/tail-calls/exceptions.js b/js/src/jit-test/tests/wasm/tail-calls/exceptions.js
index fd05f37e53..c4c774a4f3 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/exceptions.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/exceptions.js
@@ -1,5 +1,3 @@
-// |jit-test| --wasm-exceptions; skip-if: !wasmExceptionsEnabled()
-
// Simple test with return_call.
var ins = wasmEvalText(`(module
(tag $exn)
diff --git a/js/src/jit-test/tests/wasm/tail-calls/gc.js b/js/src/jit-test/tests/wasm/tail-calls/gc.js
index 10e5971e6d..7f4df4c69a 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/gc.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/gc.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-function-references; --wasm-gc; skip-if: !wasmGcEnabled() || getBuildConfiguration("simulator")
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmGcEnabled() || getBuildConfiguration("simulator")
// Tests GC references passed as arguments during return calls.
// Similar to js/src/jit-test/tests/wasm/gc/trailers-gc-stress.js
diff --git a/js/src/jit-test/tests/wasm/tail-calls/litmus3.js b/js/src/jit-test/tests/wasm/tail-calls/litmus3.js
index bdd0918717..4fe367fb29 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/litmus3.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/litmus3.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmTailCallsEnabled() || !wasmExceptionsEnabled()
+// |jit-test| skip-if: !wasmTailCallsEnabled()
// Mutually recursive functions implement a multi-entry loop using tail calls,
// with exception handling.
diff --git a/js/src/jit-test/tests/wasm/tail-calls/litmus4.js b/js/src/jit-test/tests/wasm/tail-calls/litmus4.js
index c16f712aac..6562f76686 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/litmus4.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/litmus4.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmTailCallsEnabled() || !wasmExceptionsEnabled()
+// |jit-test| skip-if: !wasmTailCallsEnabled()
// Mutually recursive functions implement a multi-entry loop using indirect tail
// calls, with exception handling.
diff --git a/js/src/jit-test/tests/wasm/tail-calls/litmus8.js b/js/src/jit-test/tests/wasm/tail-calls/litmus8.js
index 4602664488..126367a0a0 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/litmus8.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/litmus8.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmTailCallsEnabled() || !wasmExceptionsEnabled()
+// |jit-test| skip-if: !wasmTailCallsEnabled()
// Tail-call litmus test with multiple results
//
diff --git a/js/src/jit-test/tests/wasm/tail-calls/litmus9.js b/js/src/jit-test/tests/wasm/tail-calls/litmus9.js
index 3bbde27111..513e5c058e 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/litmus9.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/litmus9.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !wasmTailCallsEnabled() || !wasmExceptionsEnabled()
+// |jit-test| skip-if: !wasmTailCallsEnabled()
// Tail-call litmus test with multiple results
//
diff --git a/js/src/jit-test/tests/wasm/tail-calls/return_call_ref.js b/js/src/jit-test/tests/wasm/tail-calls/return_call_ref.js
index c5f250a41c..947da02ef5 100644
--- a/js/src/jit-test/tests/wasm/tail-calls/return_call_ref.js
+++ b/js/src/jit-test/tests/wasm/tail-calls/return_call_ref.js
@@ -1,4 +1,4 @@
-// |jit-test| --wasm-gc; skip-if: !wasmGcEnabled()
+// |jit-test| --setpref=wasm_gc=true; skip-if: !wasmGcEnabled()
var ins = wasmEvalText(`(module
(type $t (func (param i64 i64 funcref) (result i64)))
(elem declare func $fac-acc $fac-acc-broken)
diff --git a/js/src/jit-test/tests/wasm/testing/directives.txt b/js/src/jit-test/tests/wasm/testing/directives.txt
new file mode 100644
index 0000000000..01f722ba1e
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/testing/directives.txt
@@ -0,0 +1 @@
+|jit-test| include:wasm.js
diff --git a/js/src/jit-test/tests/wasm/testing/global-lossless-invoke.js b/js/src/jit-test/tests/wasm/testing/global-lossless-invoke.js
new file mode 100644
index 0000000000..66db0499d9
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/testing/global-lossless-invoke.js
@@ -0,0 +1,13 @@
+// |jit-test| skip-if: !('wasmLosslessInvoke' in this)
+
+let bytecode = wasmTextToBinary(`(module
+ (func (export "f") (result i32)
+ i32.const 1
+ )
+)`);
+let g = newGlobal({sameCompartmentAs: wasmLosslessInvoke});
+let m = new g.WebAssembly.Module(bytecode);
+let i = new g.WebAssembly.Instance(m);
+
+assertEq(i.exports.f(), 1);
+assertEq(wasmLosslessInvoke(i.exports.f).value, 1);
diff --git a/js/src/jit-test/tests/xdr/bug1390856.js b/js/src/jit-test/tests/xdr/bug1390856.js
index b1dbbffdfa..bb8940eae6 100644
--- a/js/src/jit-test/tests/xdr/bug1390856.js
+++ b/js/src/jit-test/tests/xdr/bug1390856.js
@@ -1,4 +1,4 @@
-// |jit-test| skip-if: !('oomTest' in this) || helperThreadCount() === 0
+// |jit-test| skip-if: helperThreadCount() === 0
// Test main thread encode/decode OOM
oomTest(function() {
diff --git a/js/src/jit-test/tests/xdr/bug1427860.js b/js/src/jit-test/tests/xdr/bug1427860.js
index fd6d6f0411..befa6782ae 100644
--- a/js/src/jit-test/tests/xdr/bug1427860.js
+++ b/js/src/jit-test/tests/xdr/bug1427860.js
@@ -1,4 +1,4 @@
-// |jit-test| --code-coverage; skip-if: !('oomAtAllocation' in this)
+// |jit-test| --code-coverage
let x = cacheEntry("function inner() { return 3; }; inner()");
evaluate(x, { saveIncrementalBytecode: true });
diff --git a/js/src/jit-test/tests/xdr/incremental-oom.js b/js/src/jit-test/tests/xdr/incremental-oom.js
index ef202d112d..a0e1cdb0a0 100644
--- a/js/src/jit-test/tests/xdr/incremental-oom.js
+++ b/js/src/jit-test/tests/xdr/incremental-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// Delazify a function while encoding bytecode.
oomTest(() => {
let code = cacheEntry(`
diff --git a/js/src/jit-test/tests/xdr/module-oom.js b/js/src/jit-test/tests/xdr/module-oom.js
index 14aef8e0af..951b6212a6 100644
--- a/js/src/jit-test/tests/xdr/module-oom.js
+++ b/js/src/jit-test/tests/xdr/module-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
// OOM tests for xdr module parsing.
const sa =
diff --git a/js/src/jit-test/tests/xdr/stencil-oom.js b/js/src/jit-test/tests/xdr/stencil-oom.js
index f57e8f82f8..70b4398f21 100644
--- a/js/src/jit-test/tests/xdr/stencil-oom.js
+++ b/js/src/jit-test/tests/xdr/stencil-oom.js
@@ -1,5 +1,3 @@
-// |jit-test| skip-if: !('oomTest' in this)
-
const sa = `
function f(x, y) { return x + y }
let a = 10, b = 20;
diff --git a/js/src/jit/AtomicOp.h b/js/src/jit/AtomicOp.h
index 90edb631cb..ed9a6dd74c 100644
--- a/js/src/jit/AtomicOp.h
+++ b/js/src/jit/AtomicOp.h
@@ -7,17 +7,19 @@
#ifndef jit_AtomicOp_h
#define jit_AtomicOp_h
+#include <stdint.h>
+
namespace js {
namespace jit {
// Types of atomic operation, shared by MIR and LIR.
-enum AtomicOp {
- AtomicFetchAddOp,
- AtomicFetchSubOp,
- AtomicFetchAndOp,
- AtomicFetchOrOp,
- AtomicFetchXorOp
+enum class AtomicOp {
+ Add,
+ Sub,
+ And,
+ Or,
+ Xor,
};
// Memory barrier types, shared by MIR and LIR.
@@ -26,7 +28,7 @@ enum AtomicOp {
// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
// but there's been no reason to use it yet.
-enum MemoryBarrierBits {
+enum MemoryBarrierBits : uint8_t {
MembarLoadLoad = 1,
MembarLoadStore = 2,
MembarStoreStore = 4,
@@ -41,16 +43,16 @@ enum MemoryBarrierBits {
static inline constexpr MemoryBarrierBits operator|(MemoryBarrierBits a,
MemoryBarrierBits b) {
- return MemoryBarrierBits(int(a) | int(b));
+ return MemoryBarrierBits(static_cast<uint8_t>(a) | static_cast<uint8_t>(b));
}
static inline constexpr MemoryBarrierBits operator&(MemoryBarrierBits a,
MemoryBarrierBits b) {
- return MemoryBarrierBits(int(a) & int(b));
+ return MemoryBarrierBits(static_cast<uint8_t>(a) & static_cast<uint8_t>(b));
}
static inline constexpr MemoryBarrierBits operator~(MemoryBarrierBits a) {
- return MemoryBarrierBits(~int(a));
+ return MemoryBarrierBits(~static_cast<uint8_t>(a));
}
// Standard barrier bits for a full barrier.
diff --git a/js/src/jit/BaselineCacheIRCompiler.cpp b/js/src/jit/BaselineCacheIRCompiler.cpp
index 171771ed51..92490ef8b8 100644
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -2170,8 +2170,13 @@ void ShapeListObject::trace(JSTracer* trc, JSObject* obj) {
}
bool ShapeListObject::traceWeak(JSTracer* trc) {
+ uint32_t length = getDenseInitializedLength();
+ if (length == 0) {
+ return false; // Object may be uninitialized.
+ }
+
const HeapSlot* src = elements_;
- const HeapSlot* end = src + getDenseInitializedLength();
+ const HeapSlot* end = src + length;
HeapSlot* dst = elements_;
while (src != end) {
Shape* shape = static_cast<Shape*>(src->toPrivate());
@@ -2184,7 +2189,7 @@ bool ShapeListObject::traceWeak(JSTracer* trc) {
}
MOZ_ASSERT(dst <= end);
- size_t length = dst - elements_;
+ length = dst - elements_;
setDenseInitializedLength(length);
return length != 0;
@@ -3446,6 +3451,9 @@ void BaselineCacheIRCompiler::createThis(Register argcReg, Register calleeReg,
// Restore saved registers.
masm.PopRegsInMask(liveNonGCRegs);
+
+ // Restore ICStubReg. The stub might have been moved if CreateThisFromIC
+ // discarded JIT code.
Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
masm.loadPtr(stubAddr, ICStubReg);
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
index c2245e38b5..68dbd6bfee 100644
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -520,9 +520,14 @@ enum class NativeGetPropKind {
static NativeGetPropKind IsCacheableGetPropCall(NativeObject* obj,
NativeObject* holder,
- PropertyInfo prop) {
+ PropertyInfo prop,
+ jsbytecode* pc = nullptr) {
MOZ_ASSERT(IsCacheableProtoChain(obj, holder));
+ if (pc && JSOp(*pc) == JSOp::GetBoundName) {
+ return NativeGetPropKind::None;
+ }
+
if (!prop.isAccessorProperty()) {
return NativeGetPropKind::None;
}
@@ -615,7 +620,7 @@ static NativeGetPropKind CanAttachNativeGetProp(JSContext* cx, JSObject* obj,
return NativeGetPropKind::Slot;
}
- return IsCacheableGetPropCall(nobj, *holder, propInfo->ref());
+ return IsCacheableGetPropCall(nobj, *holder, propInfo->ref(), pc);
}
if (!prop.isFound()) {
@@ -1975,6 +1980,46 @@ AttachDecision GetPropIRGenerator::tryAttachProxy(HandleObject obj,
MOZ_CRASH("Unexpected ProxyStubType");
}
+const JSClass* js::jit::ClassFor(GuardClassKind kind) {
+ switch (kind) {
+ case GuardClassKind::Array:
+ return &ArrayObject::class_;
+ case GuardClassKind::PlainObject:
+ return &PlainObject::class_;
+ case GuardClassKind::FixedLengthArrayBuffer:
+ return &FixedLengthArrayBufferObject::class_;
+ case GuardClassKind::ResizableArrayBuffer:
+ return &ResizableArrayBufferObject::class_;
+ case GuardClassKind::FixedLengthSharedArrayBuffer:
+ return &FixedLengthSharedArrayBufferObject::class_;
+ case GuardClassKind::GrowableSharedArrayBuffer:
+ return &GrowableSharedArrayBufferObject::class_;
+ case GuardClassKind::FixedLengthDataView:
+ return &FixedLengthDataViewObject::class_;
+ case GuardClassKind::ResizableDataView:
+ return &ResizableDataViewObject::class_;
+ case GuardClassKind::MappedArguments:
+ return &MappedArgumentsObject::class_;
+ case GuardClassKind::UnmappedArguments:
+ return &UnmappedArgumentsObject::class_;
+ case GuardClassKind::WindowProxy:
+ // Caller needs to handle this case, see
+ // JSRuntime::maybeWindowProxyClass().
+ break;
+ case GuardClassKind::JSFunction:
+ // Caller needs to handle this case. Can be either |js::FunctionClass| or
+ // |js::ExtendedFunctionClass|.
+ break;
+ case GuardClassKind::BoundFunction:
+ return &BoundFunctionObject::class_;
+ case GuardClassKind::Set:
+ return &SetObject::class_;
+ case GuardClassKind::Map:
+ return &MapObject::class_;
+ }
+ MOZ_CRASH("unexpected kind");
+}
+
// Guards the class of an object. Because shape implies class, and a shape guard
// is faster than a class guard, if this is our first time attaching a stub, we
// instead generate a shape guard.
@@ -1983,25 +2028,16 @@ void IRGenerator::emitOptimisticClassGuard(ObjOperandId objId, JSObject* obj,
#ifdef DEBUG
switch (kind) {
case GuardClassKind::Array:
- MOZ_ASSERT(obj->is<ArrayObject>());
- break;
case GuardClassKind::PlainObject:
- MOZ_ASSERT(obj->is<PlainObject>());
- break;
case GuardClassKind::FixedLengthArrayBuffer:
- MOZ_ASSERT(obj->is<FixedLengthArrayBufferObject>());
- break;
+ case GuardClassKind::ResizableArrayBuffer:
case GuardClassKind::FixedLengthSharedArrayBuffer:
- MOZ_ASSERT(obj->is<FixedLengthSharedArrayBufferObject>());
- break;
+ case GuardClassKind::GrowableSharedArrayBuffer:
case GuardClassKind::FixedLengthDataView:
- MOZ_ASSERT(obj->is<FixedLengthDataViewObject>());
- break;
+ case GuardClassKind::ResizableDataView:
case GuardClassKind::Set:
- MOZ_ASSERT(obj->is<SetObject>());
- break;
case GuardClassKind::Map:
- MOZ_ASSERT(obj->is<MapObject>());
+ MOZ_ASSERT(obj->hasClass(ClassFor(kind)));
break;
case GuardClassKind::MappedArguments:
@@ -2077,8 +2113,7 @@ AttachDecision GetPropIRGenerator::tryAttachObjectLength(HandleObject obj,
AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
ObjOperandId objId,
HandleId id) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
@@ -2120,31 +2155,52 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
}
}
- auto* tarr = &obj->as<FixedLengthTypedArrayObject>();
+ auto* tarr = &obj->as<TypedArrayObject>();
maybeEmitIdGuard(id);
// Emit all the normal guards for calling this native, but specialize
// callNativeGetterResult.
EmitCallGetterResultGuards(writer, tarr, holder, id, *prop, objId, mode_);
if (isLength) {
- if (tarr->length() <= INT32_MAX) {
- writer.loadArrayBufferViewLengthInt32Result(objId);
+ size_t length = tarr->length().valueOr(0);
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (length <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objId);
+ }
} else {
- writer.loadArrayBufferViewLengthDoubleResult(objId);
+ if (length <= INT32_MAX) {
+ writer.resizableTypedArrayLengthInt32Result(objId);
+ } else {
+ writer.resizableTypedArrayLengthDoubleResult(objId);
+ }
}
trackAttached("GetProp.TypedArrayLength");
} else if (isByteOffset) {
- if (tarr->byteOffset() <= INT32_MAX) {
+ // byteOffset doesn't need to use different code paths for fixed-length and
+ // resizable TypedArrays.
+ size_t byteOffset = tarr->byteOffset().valueOr(0);
+ if (byteOffset <= INT32_MAX) {
writer.arrayBufferViewByteOffsetInt32Result(objId);
} else {
writer.arrayBufferViewByteOffsetDoubleResult(objId);
}
trackAttached("GetProp.TypedArrayByteOffset");
} else {
- if (tarr->byteLength() <= INT32_MAX) {
- writer.typedArrayByteLengthInt32Result(objId);
+ size_t byteLength = tarr->byteLength().valueOr(0);
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (byteLength <= INT32_MAX) {
+ writer.typedArrayByteLengthInt32Result(objId);
+ } else {
+ writer.typedArrayByteLengthDoubleResult(objId);
+ }
} else {
- writer.typedArrayByteLengthDoubleResult(objId);
+ if (byteLength <= INT32_MAX) {
+ writer.resizableTypedArrayByteLengthInt32Result(objId);
+ } else {
+ writer.resizableTypedArrayByteLengthDoubleResult(objId);
+ }
}
trackAttached("GetProp.TypedArrayByteLength");
}
@@ -2156,11 +2212,10 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArray(HandleObject obj,
AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
ObjOperandId objId,
HandleId id) {
- // TODO: Support resizable dataviews. (bug 1842999)
- if (!obj->is<FixedLengthDataViewObject>()) {
+ if (!obj->is<DataViewObject>()) {
return AttachDecision::NoAction;
}
- auto* dv = &obj->as<FixedLengthDataViewObject>();
+ auto* dv = &obj->as<DataViewObject>();
if (mode_ != ICState::Mode::Specialized) {
return AttachDecision::NoAction;
@@ -2181,6 +2236,12 @@ AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
return AttachDecision::NoAction;
}
+ // byteOffset and byteLength both throw when the ArrayBuffer is out-of-bounds.
+ if (dv->is<ResizableDataViewObject>() &&
+ dv->as<ResizableDataViewObject>().isOutOfBounds()) {
+ return AttachDecision::NoAction;
+ }
+
NativeObject* holder = nullptr;
Maybe<PropertyInfo> prop;
NativeGetPropKind kind =
@@ -2205,18 +2266,33 @@ AttachDecision GetPropIRGenerator::tryAttachDataView(HandleObject obj,
// callNativeGetterResult.
EmitCallGetterResultGuards(writer, dv, holder, id, *prop, objId, mode_);
writer.guardHasAttachedArrayBuffer(objId);
+ if (dv->is<ResizableDataViewObject>()) {
+ writer.guardResizableArrayBufferViewInBounds(objId);
+ }
if (isByteOffset) {
- if (dv->byteOffset() <= INT32_MAX) {
+ // byteOffset doesn't need to use different code paths for fixed-length and
+ // resizable DataViews.
+ size_t byteOffset = dv->byteOffset().valueOr(0);
+ if (byteOffset <= INT32_MAX) {
writer.arrayBufferViewByteOffsetInt32Result(objId);
} else {
writer.arrayBufferViewByteOffsetDoubleResult(objId);
}
trackAttached("GetProp.DataViewByteOffset");
} else {
- if (dv->byteLength() <= INT32_MAX) {
- writer.loadArrayBufferViewLengthInt32Result(objId);
+ size_t byteLength = dv->byteLength().valueOr(0);
+ if (dv->is<FixedLengthDataViewObject>()) {
+ if (byteLength <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objId);
+ }
} else {
- writer.loadArrayBufferViewLengthDoubleResult(objId);
+ if (byteLength <= INT32_MAX) {
+ writer.resizableDataViewByteLengthInt32Result(objId);
+ } else {
+ writer.resizableDataViewByteLengthDoubleResult(objId);
+ }
}
trackAttached("GetProp.DataViewByteLength");
}
@@ -2232,11 +2308,6 @@ AttachDecision GetPropIRGenerator::tryAttachArrayBufferMaybeShared(
}
auto* buf = &obj->as<ArrayBufferObjectMaybeShared>();
- // TODO: Support resizable buffers. (bug 1842999)
- if (buf->isResizable()) {
- return AttachDecision::NoAction;
- }
-
if (mode_ != ICState::Mode::Specialized) {
return AttachDecision::NoAction;
}
@@ -2273,10 +2344,18 @@ AttachDecision GetPropIRGenerator::tryAttachArrayBufferMaybeShared(
// Emit all the normal guards for calling this native, but specialize
// callNativeGetterResult.
EmitCallGetterResultGuards(writer, buf, holder, id, *prop, objId, mode_);
- if (buf->byteLength() <= INT32_MAX) {
- writer.loadArrayBufferByteLengthInt32Result(objId);
+ if (!buf->is<GrowableSharedArrayBufferObject>()) {
+ if (buf->byteLength() <= INT32_MAX) {
+ writer.loadArrayBufferByteLengthInt32Result(objId);
+ } else {
+ writer.loadArrayBufferByteLengthDoubleResult(objId);
+ }
} else {
- writer.loadArrayBufferByteLengthDoubleResult(objId);
+ if (buf->byteLength() <= INT32_MAX) {
+ writer.growableSharedArrayBufferByteLengthInt32Result(objId);
+ } else {
+ writer.growableSharedArrayBufferByteLengthDoubleResult(objId);
+ }
}
writer.returnFromIC();
@@ -3044,9 +3123,8 @@ AttachDecision GetPropIRGenerator::tryAttachSparseElement(
// For Uint32Array we let the stub return an Int32 if we have not seen a
// double, to allow better codegen in Warp while avoiding bailout loops.
-static bool ForceDoubleForUint32Array(FixedLengthTypedArrayObject* tarr,
- uint64_t index) {
- MOZ_ASSERT(index < tarr->length());
+static bool ForceDoubleForUint32Array(TypedArrayObject* tarr, uint64_t index) {
+ MOZ_ASSERT(index < tarr->length().valueOr(0));
if (tarr->type() != Scalar::Type::Uint32) {
// Return value is only relevant for Uint32Array.
@@ -3059,10 +3137,27 @@ static bool ForceDoubleForUint32Array(FixedLengthTypedArrayObject* tarr,
return res.isDouble();
}
+static ArrayBufferViewKind ToArrayBufferViewKind(const TypedArrayObject* obj) {
+ if (obj->is<FixedLengthTypedArrayObject>()) {
+ return ArrayBufferViewKind::FixedLength;
+ }
+
+ MOZ_ASSERT(obj->is<ResizableTypedArrayObject>());
+ return ArrayBufferViewKind::Resizable;
+}
+
+static ArrayBufferViewKind ToArrayBufferViewKind(const DataViewObject* obj) {
+ if (obj->is<FixedLengthDataViewObject>()) {
+ return ArrayBufferViewKind::FixedLength;
+ }
+
+ MOZ_ASSERT(obj->is<ResizableDataViewObject>());
+ return ArrayBufferViewKind::Resizable;
+}
+
AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
HandleObject obj, ObjOperandId objId) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
@@ -3070,12 +3165,12 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
return AttachDecision::NoAction;
}
- auto* tarr = &obj->as<FixedLengthTypedArrayObject>();
+ auto* tarr = &obj->as<TypedArrayObject>();
bool handleOOB = false;
int64_t indexInt64;
if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
- uint64_t(indexInt64) >= tarr->length()) {
+ uint64_t(indexInt64) >= tarr->length().valueOr(0)) {
handleOOB = true;
}
@@ -3092,8 +3187,9 @@ AttachDecision GetPropIRGenerator::tryAttachTypedArrayElement(
ValOperandId keyId = getElemKeyValueId();
IntPtrOperandId intPtrIndexId = guardToIntPtrIndex(idVal_, keyId, handleOOB);
+ auto viewKind = ToArrayBufferViewKind(tarr);
writer.loadTypedArrayElementResult(objId, intPtrIndexId, tarr->type(),
- handleOOB, forceDoubleForUint32);
+ handleOOB, forceDoubleForUint32, viewKind);
writer.returnFromIC();
trackAttached("GetProp.TypedElement");
@@ -3376,7 +3472,7 @@ AttachDecision GetNameIRGenerator::tryAttachGlobalNameGetter(ObjOperandId objId,
GlobalObject* global = &globalLexical->global();
- NativeGetPropKind kind = IsCacheableGetPropCall(global, holder, *prop);
+ NativeGetPropKind kind = IsCacheableGetPropCall(global, holder, *prop, pc_);
if (kind != NativeGetPropKind::NativeGetter &&
kind != NativeGetPropKind::ScriptedGetter) {
return AttachDecision::NoAction;
@@ -3957,11 +4053,19 @@ AttachDecision HasPropIRGenerator::tryAttachNative(NativeObject* obj,
return AttachDecision::Attach;
}
+static void EmitGuardTypedArray(CacheIRWriter& writer, TypedArrayObject* obj,
+ ObjOperandId objId) {
+ if (obj->is<FixedLengthTypedArrayObject>()) {
+ writer.guardIsFixedLengthTypedArray(objId);
+ } else {
+ writer.guardIsResizableTypedArray(objId);
+ }
+}
+
AttachDecision HasPropIRGenerator::tryAttachTypedArray(HandleObject obj,
ObjOperandId objId,
ValOperandId keyId) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
@@ -3970,10 +4074,14 @@ AttachDecision HasPropIRGenerator::tryAttachTypedArray(HandleObject obj,
return AttachDecision::NoAction;
}
- writer.guardIsFixedLengthTypedArray(objId);
+ auto* tarr = &obj->as<TypedArrayObject>();
+ EmitGuardTypedArray(writer, tarr, objId);
+
IntPtrOperandId intPtrIndexId =
guardToIntPtrIndex(idVal_, keyId, /* supportOOB = */ true);
- writer.loadTypedArrayElementExistsResult(objId, intPtrIndexId);
+
+ auto viewKind = ToArrayBufferViewKind(tarr);
+ writer.loadTypedArrayElementExistsResult(objId, intPtrIndexId, viewKind);
writer.returnFromIC();
trackAttached("HasProp.TypedArrayObject");
@@ -4940,15 +5048,14 @@ AttachDecision SetPropIRGenerator::tryAttachAddOrUpdateSparseElement(
AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
HandleObject obj, ObjOperandId objId, ValOperandId rhsId) {
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!obj->is<FixedLengthTypedArrayObject>()) {
+ if (!obj->is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!idVal_.isNumber()) {
return AttachDecision::NoAction;
}
- auto* tarr = &obj->as<FixedLengthTypedArrayObject>();
+ auto* tarr = &obj->as<TypedArrayObject>();
Scalar::Type elementType = tarr->type();
// Don't attach if the input type doesn't match the guard added below.
@@ -4959,7 +5066,7 @@ AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
bool handleOOB = false;
int64_t indexInt64;
if (!ValueIsInt64Index(idVal_, &indexInt64) || indexInt64 < 0 ||
- uint64_t(indexInt64) >= tarr->length()) {
+ uint64_t(indexInt64) >= tarr->length().valueOr(0)) {
handleOOB = true;
}
@@ -4980,8 +5087,9 @@ AttachDecision SetPropIRGenerator::tryAttachSetTypedArrayElement(
ValOperandId keyId = setElemKeyValueId();
IntPtrOperandId indexId = guardToIntPtrIndex(idVal_, keyId, handleOOB);
+ auto viewKind = ToArrayBufferViewKind(tarr);
writer.storeTypedArrayElement(objId, elementType, indexId, rhsValId,
- handleOOB);
+ handleOOB, viewKind);
writer.returnFromIC();
trackAttached(handleOOB ? "SetTypedElementOOB" : "SetTypedElement");
@@ -6470,9 +6578,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachArrayIsArray() {
AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
Scalar::Type type) {
// Ensure |this| is a DataViewObject.
- // TODO: Support resizable dataviews. (bug 1842999)
- if (!thisval_.isObject() ||
- !thisval_.toObject().is<FixedLengthDataViewObject>()) {
+ if (!thisval_.isObject() || !thisval_.toObject().is<DataViewObject>()) {
return AttachDecision::NoAction;
}
@@ -6488,11 +6594,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
return AttachDecision::NoAction;
}
- auto* dv = &thisval_.toObject().as<FixedLengthDataViewObject>();
+ auto* dv = &thisval_.toObject().as<DataViewObject>();
// Bounds check the offset.
- if (offsetInt64 < 0 ||
- !dv->offsetIsInBounds(Scalar::byteSize(type), offsetInt64)) {
+ size_t byteLength = dv->byteLength().valueOr(0);
+ if (offsetInt64 < 0 || !DataViewObject::offsetIsInBounds(
+ Scalar::byteSize(type), offsetInt64, byteLength)) {
return AttachDecision::NoAction;
}
@@ -6501,7 +6608,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
bool forceDoubleForUint32 = false;
if (type == Scalar::Uint32) {
bool isLittleEndian = argc_ > 1 && args_[1].toBoolean();
- uint32_t res = dv->read<uint32_t>(offsetInt64, isLittleEndian);
+ uint32_t res = dv->read<uint32_t>(offsetInt64, byteLength, isLittleEndian);
forceDoubleForUint32 = res >= INT32_MAX;
}
@@ -6515,8 +6622,14 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
ValOperandId thisValId =
writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
ObjOperandId objId = writer.guardToObject(thisValId);
- emitOptimisticClassGuard(objId, &thisval_.toObject(),
- GuardClassKind::FixedLengthDataView);
+
+ if (dv->is<FixedLengthDataViewObject>()) {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::FixedLengthDataView);
+ } else {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::ResizableDataView);
+ }
// Convert offset to intPtr.
ValOperandId offsetId =
@@ -6533,8 +6646,10 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
boolLittleEndianId = writer.loadBooleanConstant(false);
}
+ auto viewKind = ToArrayBufferViewKind(dv);
writer.loadDataViewValueResult(objId, intPtrOffsetId, boolLittleEndianId,
- type, forceDoubleForUint32);
+ type, forceDoubleForUint32, viewKind);
+
writer.returnFromIC();
trackAttached("DataViewGet");
@@ -6544,9 +6659,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewGet(
AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
Scalar::Type type) {
// Ensure |this| is a DataViewObject.
- // TODO: Support resizable dataviews. (bug 1842999)
- if (!thisval_.isObject() ||
- !thisval_.toObject().is<FixedLengthDataViewObject>()) {
+ if (!thisval_.isObject() || !thisval_.toObject().is<DataViewObject>()) {
return AttachDecision::NoAction;
}
@@ -6565,11 +6678,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
return AttachDecision::NoAction;
}
- auto* dv = &thisval_.toObject().as<FixedLengthDataViewObject>();
+ auto* dv = &thisval_.toObject().as<DataViewObject>();
// Bounds check the offset.
- if (offsetInt64 < 0 ||
- !dv->offsetIsInBounds(Scalar::byteSize(type), offsetInt64)) {
+ size_t byteLength = dv->byteLength().valueOr(0);
+ if (offsetInt64 < 0 || !DataViewObject::offsetIsInBounds(
+ Scalar::byteSize(type), offsetInt64, byteLength)) {
return AttachDecision::NoAction;
}
@@ -6583,8 +6697,14 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
ValOperandId thisValId =
writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
ObjOperandId objId = writer.guardToObject(thisValId);
- emitOptimisticClassGuard(objId, &thisval_.toObject(),
- GuardClassKind::FixedLengthDataView);
+
+ if (dv->is<FixedLengthDataViewObject>()) {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::FixedLengthDataView);
+ } else {
+ emitOptimisticClassGuard(objId, &thisval_.toObject(),
+ GuardClassKind::ResizableDataView);
+ }
// Convert offset to intPtr.
ValOperandId offsetId =
@@ -6606,8 +6726,10 @@ AttachDecision InlinableNativeIRGenerator::tryAttachDataViewSet(
boolLittleEndianId = writer.loadBooleanConstant(false);
}
+ auto viewKind = ToArrayBufferViewKind(dv);
writer.storeDataViewValueResult(objId, intPtrOffsetId, numericValueId,
- boolLittleEndianId, type);
+ boolLittleEndianId, type, viewKind);
+
writer.returnFromIC();
trackAttached("DataViewSet");
@@ -6949,19 +7071,84 @@ AttachDecision InlinableNativeIRGenerator::tryAttachGuardToClass(
return AttachDecision::Attach;
}
+AttachDecision InlinableNativeIRGenerator::tryAttachGuardToClass(
+ GuardClassKind kind) {
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Class must match.
+ const JSClass* clasp = ClassFor(kind);
+ if (args_[0].toObject().getClass() != clasp) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Guard that the object has the correct class.
+ writer.guardClass(objId, kind);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GuardToClass");
+ return AttachDecision::Attach;
+}
+
+AttachDecision InlinableNativeIRGenerator::tryAttachGuardToEitherClass(
+ GuardClassKind kind1, GuardClassKind kind2) {
+ MOZ_ASSERT(kind1 != kind2,
+ "prefer tryAttachGuardToClass for the same class case");
+
+ // Self-hosted code calls this with an object argument.
+ MOZ_ASSERT(argc_ == 1);
+ MOZ_ASSERT(args_[0].isObject());
+
+ // Class must match.
+ const JSClass* clasp1 = ClassFor(kind1);
+ const JSClass* clasp2 = ClassFor(kind2);
+ const JSClass* objClass = args_[0].toObject().getClass();
+ if (objClass != clasp1 && objClass != clasp2) {
+ return AttachDecision::NoAction;
+ }
+
+ // Initialize the input operand.
+ initializeInputOperand();
+
+ // Note: we don't need to call emitNativeCalleeGuard for intrinsics.
+
+ // Guard that the argument is an object.
+ ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+ ObjOperandId objId = writer.guardToObject(argId);
+
+ // Guard that the object has the correct class.
+ writer.guardEitherClass(objId, kind1, kind2);
+
+ // Return the object.
+ writer.loadObjectResult(objId);
+ writer.returnFromIC();
+
+ trackAttached("GuardToEitherClass");
+ return AttachDecision::Attach;
+}
+
AttachDecision InlinableNativeIRGenerator::tryAttachGuardToArrayBuffer() {
- // TODO: Support resizable ArrayBuffers (bug 1842999), for now simply
- // pass through to tryAttachGuardToClass which guards on
- // FixedLengthArrayBufferObject.
- return tryAttachGuardToClass(InlinableNative::IntrinsicGuardToArrayBuffer);
+ return tryAttachGuardToEitherClass(GuardClassKind::FixedLengthArrayBuffer,
+ GuardClassKind::ResizableArrayBuffer);
}
AttachDecision InlinableNativeIRGenerator::tryAttachGuardToSharedArrayBuffer() {
- // TODO: Support resizable SharedArrayBuffers (bug 1842999), for now simply
- // pass through to tryAttachGuardToClass which guards on
- // FixedLengthSharedArrayBufferObject.
- return tryAttachGuardToClass(
- InlinableNative::IntrinsicGuardToSharedArrayBuffer);
+ return tryAttachGuardToEitherClass(
+ GuardClassKind::FixedLengthSharedArrayBuffer,
+ GuardClassKind::GrowableSharedArrayBuffer);
}
AttachDecision InlinableNativeIRGenerator::tryAttachHasClass(
@@ -8924,7 +9111,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachReflectGetPrototypeOf() {
return AttachDecision::Attach;
}
-static bool AtomicsMeetsPreconditions(FixedLengthTypedArrayObject* typedArray,
+static bool AtomicsMeetsPreconditions(TypedArrayObject* typedArray,
const Value& index) {
switch (typedArray->type()) {
case Scalar::Int8:
@@ -8954,7 +9141,8 @@ static bool AtomicsMeetsPreconditions(FixedLengthTypedArrayObject* typedArray,
if (!ValueIsInt64Index(index, &indexInt64)) {
return false;
}
- if (indexInt64 < 0 || uint64_t(indexInt64) >= typedArray->length()) {
+ if (indexInt64 < 0 ||
+ uint64_t(indexInt64) >= typedArray->length().valueOr(0)) {
return false;
}
@@ -8971,17 +9159,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsCompareExchange() {
return AttachDecision::NoAction;
}
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number), expected, replacement.
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!args_[1].isNumber()) {
return AttachDecision::NoAction;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return AttachDecision::NoAction;
}
@@ -9022,8 +9208,10 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsCompareExchange() {
OperandId numericReplacementId =
emitNumericGuard(replacementId, args_[3], elementType);
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsCompareExchangeResult(objId, intPtrIndexId, numericExpectedId,
- numericReplacementId, typedArray->type());
+ numericReplacementId, typedArray->type(),
+ viewKind);
writer.returnFromIC();
trackAttached("AtomicsCompareExchange");
@@ -9040,17 +9228,15 @@ bool InlinableNativeIRGenerator::canAttachAtomicsReadWriteModify() {
return false;
}
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number), value.
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return false;
}
if (!args_[1].isNumber()) {
return false;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return false;
}
@@ -9064,7 +9250,7 @@ InlinableNativeIRGenerator::AtomicsReadWriteModifyOperands
InlinableNativeIRGenerator::emitAtomicsReadWriteModifyOperands() {
MOZ_ASSERT(canAttachAtomicsReadWriteModify());
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
// Initialize the input operand.
initializeInputOperand();
@@ -9099,10 +9285,11 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsExchange() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsExchangeResult(objId, intPtrIndexId, numericValueId,
- typedArray->type());
+ typedArray->type(), viewKind);
writer.returnFromIC();
trackAttached("AtomicsExchange");
@@ -9117,11 +9304,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsAdd() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsAddResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsAdd");
@@ -9136,11 +9324,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsSub() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsSubResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsSub");
@@ -9155,11 +9344,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsAnd() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsAndResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsAnd");
@@ -9174,11 +9364,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsOr() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsOrResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsOr");
@@ -9193,11 +9384,12 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsXor() {
auto [objId, intPtrIndexId, numericValueId] =
emitAtomicsReadWriteModifyOperands();
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
bool forEffect = ignoresResult();
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsXorResult(objId, intPtrIndexId, numericValueId,
- typedArray->type(), forEffect);
+ typedArray->type(), forEffect, viewKind);
writer.returnFromIC();
trackAttached("AtomicsXor");
@@ -9214,17 +9406,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsLoad() {
return AttachDecision::NoAction;
}
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number).
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!args_[1].isNumber()) {
return AttachDecision::NoAction;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return AttachDecision::NoAction;
}
@@ -9245,7 +9435,8 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsLoad() {
IntPtrOperandId intPtrIndexId =
guardToIntPtrIndex(args_[1], indexId, /* supportOOB = */ false);
- writer.atomicsLoadResult(objId, intPtrIndexId, typedArray->type());
+ auto viewKind = ToArrayBufferViewKind(typedArray);
+ writer.atomicsLoadResult(objId, intPtrIndexId, typedArray->type(), viewKind);
writer.returnFromIC();
trackAttached("AtomicsLoad");
@@ -9271,17 +9462,15 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsStore() {
// obviously unused or if the argument is already Int32 and thus requires no
// conversion.
- // TODO: Support resizable typed arrays. (bug 1842999)
// Arguments: typedArray, index (number), value.
- if (!args_[0].isObject() ||
- !args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
+ if (!args_[0].isObject() || !args_[0].toObject().is<TypedArrayObject>()) {
return AttachDecision::NoAction;
}
if (!args_[1].isNumber()) {
return AttachDecision::NoAction;
}
- auto* typedArray = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* typedArray = &args_[0].toObject().as<TypedArrayObject>();
if (!AtomicsMeetsPreconditions(typedArray, args_[1])) {
return AttachDecision::NoAction;
}
@@ -9323,8 +9512,9 @@ AttachDecision InlinableNativeIRGenerator::tryAttachAtomicsStore() {
numericValueId = emitNumericGuard(valueId, args_[2], elementType);
}
+ auto viewKind = ToArrayBufferViewKind(typedArray);
writer.atomicsStoreResult(objId, intPtrIndexId, numericValueId,
- typedArray->type());
+ typedArray->type(), viewKind);
writer.returnFromIC();
trackAttached("AtomicsStore");
@@ -10182,12 +10372,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayByteOffset() {
MOZ_ASSERT(args_[0].isObject());
MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
- return AttachDecision::NoAction;
- }
-
- auto* tarr = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ auto* tarr = &args_[0].toObject().as<TypedArrayObject>();
// Initialize the input operand.
initializeInputOperand();
@@ -10196,12 +10381,25 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayByteOffset() {
ValOperandId argId = writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
ObjOperandId objArgId = writer.guardToObject(argId);
- writer.guardIsFixedLengthTypedArray(objArgId);
- if (tarr->byteOffset() <= INT32_MAX) {
- writer.arrayBufferViewByteOffsetInt32Result(objArgId);
+
+ EmitGuardTypedArray(writer, tarr, objArgId);
+
+ size_t byteOffset = tarr->byteOffsetMaybeOutOfBounds();
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (byteOffset <= INT32_MAX) {
+ writer.arrayBufferViewByteOffsetInt32Result(objArgId);
+ } else {
+ writer.arrayBufferViewByteOffsetDoubleResult(objArgId);
+ }
} else {
- writer.arrayBufferViewByteOffsetDoubleResult(objArgId);
+ if (byteOffset <= INT32_MAX) {
+ writer.resizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(objArgId);
+ } else {
+ writer.resizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
+ objArgId);
+ }
}
+
writer.returnFromIC();
trackAttached("IntrinsicTypedArrayByteOffset");
@@ -10229,7 +10427,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayElementSize() {
}
AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
- bool isPossiblyWrapped) {
+ bool isPossiblyWrapped, bool allowOutOfBounds) {
// Self-hosted code calls this with a single, possibly wrapped,
// TypedArrayObject argument.
MOZ_ASSERT(argc_ == 1);
@@ -10242,12 +10440,19 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
MOZ_ASSERT(args_[0].toObject().is<TypedArrayObject>());
- // TODO: Support resizable typed arrays. (bug 1842999)
- if (!args_[0].toObject().is<FixedLengthTypedArrayObject>()) {
- return AttachDecision::NoAction;
- }
+ auto* tarr = &args_[0].toObject().as<TypedArrayObject>();
- auto* tarr = &args_[0].toObject().as<FixedLengthTypedArrayObject>();
+ // Don't optimize when a resizable TypedArray is out-of-bounds and
+ // out-of-bounds isn't allowed.
+ auto length = tarr->length();
+ if (length.isNothing() && !tarr->hasDetachedBuffer()) {
+ MOZ_ASSERT(tarr->is<ResizableTypedArrayObject>());
+ MOZ_ASSERT(tarr->isOutOfBounds());
+
+ if (!allowOutOfBounds) {
+ return AttachDecision::NoAction;
+ }
+ }
// Initialize the input operand.
initializeInputOperand();
@@ -10261,11 +10466,24 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
writer.guardIsNotProxy(objArgId);
}
- writer.guardIsFixedLengthTypedArray(objArgId);
- if (tarr->length() <= INT32_MAX) {
- writer.loadArrayBufferViewLengthInt32Result(objArgId);
+ EmitGuardTypedArray(writer, tarr, objArgId);
+
+ if (tarr->is<FixedLengthTypedArrayObject>()) {
+ if (length.valueOr(0) <= INT32_MAX) {
+ writer.loadArrayBufferViewLengthInt32Result(objArgId);
+ } else {
+ writer.loadArrayBufferViewLengthDoubleResult(objArgId);
+ }
} else {
- writer.loadArrayBufferViewLengthDoubleResult(objArgId);
+ if (!allowOutOfBounds) {
+ writer.guardResizableArrayBufferViewInBoundsOrDetached(objArgId);
+ }
+
+ if (length.valueOr(0) <= INT32_MAX) {
+ writer.resizableTypedArrayLengthInt32Result(objArgId);
+ } else {
+ writer.resizableTypedArrayLengthDoubleResult(objArgId);
+ }
}
writer.returnFromIC();
@@ -10273,13 +10491,6 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayLength(
return AttachDecision::Attach;
}
-AttachDecision
-InlinableNativeIRGenerator::tryAttachTypedArrayLengthZeroOnOutOfBounds() {
- // We don't yet inline resizable buffers, so this operation is equivalent to
- // the inline code path for tryAttachTypedArrayLength().
- return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false);
-}
-
AttachDecision InlinableNativeIRGenerator::tryAttachArrayBufferByteLength(
bool isPossiblyWrapped) {
// Self-hosted code calls this with a single, possibly wrapped,
@@ -10296,11 +10507,6 @@ AttachDecision InlinableNativeIRGenerator::tryAttachArrayBufferByteLength(
auto* buffer = &args_[0].toObject().as<ArrayBufferObject>();
- // TODO: Support resizable buffers. (bug 1842999)
- if (buffer->isResizable()) {
- return AttachDecision::NoAction;
- }
-
// Initialize the input operand.
initializeInputOperand();
@@ -10662,14 +10868,6 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayConstructor() {
if (args_[0].isObject() && args_[0].toObject().is<ProxyObject>()) {
return AttachDecision::NoAction;
}
- if (args_[0].isObject() &&
- args_[0].toObject().is<ResizableArrayBufferObject>()) {
- return AttachDecision::NoAction;
- }
- if (args_[0].isObject() &&
- args_[0].toObject().is<GrowableSharedArrayBufferObject>()) {
- return AttachDecision::NoAction;
- }
#ifdef JS_CODEGEN_X86
// Unfortunately NewTypedArrayFromArrayBufferResult needs more registers than
@@ -10714,9 +10912,13 @@ AttachDecision InlinableNativeIRGenerator::tryAttachTypedArrayConstructor() {
// From ArrayBuffer.
if (obj->is<FixedLengthArrayBufferObject>()) {
writer.guardClass(objId, GuardClassKind::FixedLengthArrayBuffer);
- } else {
- MOZ_ASSERT(obj->is<FixedLengthSharedArrayBufferObject>());
+ } else if (obj->is<FixedLengthSharedArrayBufferObject>()) {
writer.guardClass(objId, GuardClassKind::FixedLengthSharedArrayBuffer);
+ } else if (obj->is<ResizableArrayBufferObject>()) {
+ writer.guardClass(objId, GuardClassKind::ResizableArrayBuffer);
+ } else {
+ MOZ_ASSERT(obj->is<GrowableSharedArrayBufferObject>());
+ writer.guardClass(objId, GuardClassKind::GrowableSharedArrayBuffer);
}
ValOperandId byteOffsetId;
if (argc_ > 1) {
@@ -11584,7 +11786,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
// Map intrinsics.
case InlinableNative::IntrinsicGuardToMapObject:
- return tryAttachGuardToClass(native);
+ return tryAttachGuardToClass(GuardClassKind::Map);
case InlinableNative::IntrinsicGetNextMapEntryForIterator:
return tryAttachGetNextMapSetEntryForIterator(/* isMap = */ true);
@@ -11612,7 +11814,7 @@ AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
// Set intrinsics.
case InlinableNative::IntrinsicGuardToSetObject:
- return tryAttachGuardToClass(native);
+ return tryAttachGuardToClass(GuardClassKind::Set);
case InlinableNative::IntrinsicGetNextSetEntryForIterator:
return tryAttachGetNextMapSetEntryForIterator(/* isMap = */ false);
@@ -11642,11 +11844,14 @@ AttachDecision InlinableNativeIRGenerator::tryAttachStub() {
case InlinableNative::IntrinsicTypedArrayElementSize:
return tryAttachTypedArrayElementSize();
case InlinableNative::IntrinsicTypedArrayLength:
- return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false);
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false,
+ /* allowOutOfBounds = */ false);
case InlinableNative::IntrinsicTypedArrayLengthZeroOnOutOfBounds:
- return tryAttachTypedArrayLengthZeroOnOutOfBounds();
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ false,
+ /* allowOutOfBounds = */ true);
case InlinableNative::IntrinsicPossiblyWrappedTypedArrayLength:
- return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ true);
+ return tryAttachTypedArrayLength(/* isPossiblyWrapped = */ true,
+ /* allowOutOfBounds = */ false);
// Reflect natives.
case InlinableNative::ReflectGetPrototypeOf:
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
index b483257d12..9bedbb7ddc 100644
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -515,8 +515,11 @@ enum class GuardClassKind : uint8_t {
Array,
PlainObject,
FixedLengthArrayBuffer,
+ ResizableArrayBuffer,
FixedLengthSharedArrayBuffer,
+ GrowableSharedArrayBuffer,
FixedLengthDataView,
+ ResizableDataView,
MappedArguments,
UnmappedArguments,
WindowProxy,
@@ -526,6 +529,13 @@ enum class GuardClassKind : uint8_t {
Map,
};
+const JSClass* ClassFor(GuardClassKind kind);
+
+enum class ArrayBufferViewKind : uint8_t {
+ FixedLength,
+ Resizable,
+};
+
} // namespace jit
} // namespace js
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
index 73f3831371..1467cebe08 100644
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -18,7 +18,6 @@
#include "jsmath.h"
#include "builtin/DataViewObject.h"
-#include "builtin/MapObject.h"
#include "builtin/Object.h"
#include "gc/GCEnum.h"
#include "gc/SweepingAPI.h" // js::gc::AutoLockStoreBuffer
@@ -1365,6 +1364,8 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
const CacheIRStubInfo* stubInfo) {
using Type = StubField::Type;
+ bool isDead = false;
+
uint32_t field = 0;
size_t offset = 0;
while (true) {
@@ -1375,7 +1376,7 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
@@ -1384,7 +1385,7 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
@@ -1393,7 +1394,7 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
@@ -1403,12 +1404,13 @@ bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
auto r = TraceWeakEdge(trc, &getterSetterField,
"cacheir-weak-getter-setter");
if (r.isDead()) {
- return false;
+ isDead = true;
}
break;
}
case Type::Limit:
- return true; // Done.
+ // Done.
+ return !isDead;
case Type::RawInt32:
case Type::RawPointer:
case Type::Shape:
@@ -2148,6 +2150,30 @@ bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
return true;
}
+static const JSClass* ClassFor(JSContext* cx, GuardClassKind kind) {
+ switch (kind) {
+ case GuardClassKind::Array:
+ case GuardClassKind::PlainObject:
+ case GuardClassKind::FixedLengthArrayBuffer:
+ case GuardClassKind::ResizableArrayBuffer:
+ case GuardClassKind::FixedLengthSharedArrayBuffer:
+ case GuardClassKind::GrowableSharedArrayBuffer:
+ case GuardClassKind::FixedLengthDataView:
+ case GuardClassKind::ResizableDataView:
+ case GuardClassKind::MappedArguments:
+ case GuardClassKind::UnmappedArguments:
+ case GuardClassKind::Set:
+ case GuardClassKind::Map:
+ case GuardClassKind::BoundFunction:
+ return ClassFor(kind);
+ case GuardClassKind::WindowProxy:
+ return cx->runtime()->maybeWindowProxyClass();
+ case GuardClassKind::JSFunction:
+ MOZ_CRASH("must be handled by caller");
+ }
+ MOZ_CRASH("unexpected kind");
+}
+
bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
@@ -2169,44 +2195,7 @@ bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
return true;
}
- const JSClass* clasp = nullptr;
- switch (kind) {
- case GuardClassKind::Array:
- clasp = &ArrayObject::class_;
- break;
- case GuardClassKind::PlainObject:
- clasp = &PlainObject::class_;
- break;
- case GuardClassKind::FixedLengthArrayBuffer:
- clasp = &FixedLengthArrayBufferObject::class_;
- break;
- case GuardClassKind::FixedLengthSharedArrayBuffer:
- clasp = &FixedLengthSharedArrayBufferObject::class_;
- break;
- case GuardClassKind::FixedLengthDataView:
- clasp = &FixedLengthDataViewObject::class_;
- break;
- case GuardClassKind::MappedArguments:
- clasp = &MappedArgumentsObject::class_;
- break;
- case GuardClassKind::UnmappedArguments:
- clasp = &UnmappedArgumentsObject::class_;
- break;
- case GuardClassKind::WindowProxy:
- clasp = cx_->runtime()->maybeWindowProxyClass();
- break;
- case GuardClassKind::Set:
- clasp = &SetObject::class_;
- break;
- case GuardClassKind::Map:
- clasp = &MapObject::class_;
- break;
- case GuardClassKind::BoundFunction:
- clasp = &BoundFunctionObject::class_;
- break;
- case GuardClassKind::JSFunction:
- MOZ_CRASH("JSFunction handled before switch");
- }
+ const JSClass* clasp = ClassFor(cx_, kind);
MOZ_ASSERT(clasp);
if (objectGuardNeedsSpectreMitigations(objId)) {
@@ -2220,6 +2209,39 @@ bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
return true;
}
+bool CacheIRCompiler::emitGuardEitherClass(ObjOperandId objId,
+ GuardClassKind kind1,
+ GuardClassKind kind2) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // We don't yet need this case, so it's unsupported for now.
+ MOZ_ASSERT(kind1 != GuardClassKind::JSFunction &&
+ kind2 != GuardClassKind::JSFunction);
+
+ const JSClass* clasp1 = ClassFor(cx_, kind1);
+ MOZ_ASSERT(clasp1);
+
+ const JSClass* clasp2 = ClassFor(cx_, kind2);
+ MOZ_ASSERT(clasp2);
+
+ if (objectGuardNeedsSpectreMitigations(objId)) {
+ masm.branchTestObjClass(Assembler::NotEqual, obj, {clasp1, clasp2}, scratch,
+ obj, failure->label());
+ } else {
+ masm.branchTestObjClassNoSpectreMitigations(
+ Assembler::NotEqual, obj, {clasp1, clasp2}, scratch, failure->label());
+ }
+
+ return true;
+}
+
bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
@@ -2569,6 +2591,22 @@ bool CacheIRCompiler::emitGuardIsFixedLengthTypedArray(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegister scratch(allocator, masm);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadObjClassUnsafe(obj, scratch);
+ masm.branchIfClassIsNotResizableTypedArray(scratch, failure->label());
+ return true;
+}
+
bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
@@ -2796,7 +2834,7 @@ bool CacheIRCompiler::emitStringToAtom(StringOperandId stringId) {
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &done);
- masm.lookupStringInAtomCacheLastLookups(str, scratch, &vmCall);
+ masm.lookupStringInAtomCacheLastLookups(str, scratch, str, &vmCall);
masm.jump(&done);
masm.bind(&vmCall);
@@ -4760,17 +4798,30 @@ bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
}
bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
- ObjOperandId objId, IntPtrOperandId indexId) {
+ ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Maybe<AutoScratchRegister> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm);
+ }
Label outOfBounds, done;
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ } else {
+ // Bounds check doesn't require synchronization. See IsValidIntegerIndex
+ // abstract operation which reads the underlying buffer byte length using
+ // "unordered" memory order.
+ auto sync = Synchronization::None();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, *scratch2);
+ }
masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
EmitStoreBoolean(masm, true, output);
masm.jump(&done);
@@ -5039,6 +5090,46 @@ bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
return true;
}
+bool CacheIRCompiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
+ scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ Register obj = allocator.useRegister(masm, objId);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
+ scratch2);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -5081,6 +5172,100 @@ bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitResizableTypedArrayByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+ masm.typedArrayElementSize(obj, scratch2);
+
+ masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
+ failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableTypedArrayByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.typedArrayElementSize(obj, scratch2);
+ masm.mulPtr(scratch2, scratch1);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableTypedArrayLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableTypedArrayLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -5093,6 +5278,92 @@ bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitResizableDataViewByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
+ masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitResizableDataViewByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+ AutoScratchRegister scratch2(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch1, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
+bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
+ masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
+
+ masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
+ return true;
+}
+
+bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthDoubleResult(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoOutputRegister output(*this);
+ AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Register obj = allocator.useRegister(masm, objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
+
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertIntPtrToDouble(scratch, fpscratch);
+ masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
+ return true;
+}
+
bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -5108,6 +5379,42 @@ bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
return true;
}
+bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBounds(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoScratchRegister scratch(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.branchIfResizableArrayBufferViewOutOfBounds(obj, scratch,
+ failure->label());
+ return true;
+}
+
+bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBoundsOrDetached(
+ ObjOperandId objId) {
+ JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+ AutoScratchRegister scratch(allocator, masm);
+ Register obj = allocator.useRegister(masm, objId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ Label done;
+ masm.branchIfResizableArrayBufferViewInBounds(obj, scratch, &done);
+ masm.branchIfHasAttachedArrayBuffer(obj, scratch, failure->label());
+ masm.bind(&done);
+ return true;
+}
+
bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
@@ -6220,8 +6527,8 @@ bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
Scalar::Type elementType,
IntPtrOperandId indexId,
- uint32_t rhsId,
- bool handleOOB) {
+ uint32_t rhsId, bool handleOOB,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
@@ -6261,7 +6568,8 @@ bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
AutoScratchRegister scratch1(allocator, masm);
Maybe<AutoScratchRegister> scratch2;
Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
- if (Scalar::isBigIntType(elementType)) {
+ if (Scalar::isBigIntType(elementType) ||
+ viewKind == ArrayBufferViewKind::Resizable) {
scratch2.emplace(allocator, masm);
} else {
spectreScratch.emplace(allocator, masm);
@@ -6276,10 +6584,9 @@ bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
// Bounds check.
Label done;
- Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
- masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
- handleOOB ? &done : failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2,
+ spectreScratch,
+ handleOOB ? &done : failure->label());
// Load the elements vector.
masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
@@ -6348,9 +6655,61 @@ static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
masm.bind(&done);
}
+void CacheIRCompiler::emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind,
+ Register obj, Register index,
+ Register scratch,
+ Register maybeScratch,
+ Register spectreScratch,
+ Label* fail) {
+ // |index| must not alias any scratch register.
+ MOZ_ASSERT(index != scratch);
+ MOZ_ASSERT(index != maybeScratch);
+ MOZ_ASSERT(index != spectreScratch);
+
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
+ } else {
+ if (maybeScratch == InvalidReg) {
+ // Spill |index| to use it as an additional scratch register.
+ masm.push(index);
+
+ maybeScratch = index;
+ } else {
+ // Use |maybeScratch| when no explicit |spectreScratch| is present.
+ if (spectreScratch == InvalidReg) {
+ spectreScratch = maybeScratch;
+ }
+ }
+
+ // Bounds check doesn't require synchronization. See IsValidIntegerIndex
+ // abstract operation which reads the underlying buffer byte length using
+ // "unordered" memory order.
+ auto sync = Synchronization::None();
+
+ masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, maybeScratch);
+
+ if (maybeScratch == index) {
+ // Restore |index|.
+ masm.pop(index);
+ }
+
+ masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
+ }
+}
+
+void CacheIRCompiler::emitTypedArrayBoundsCheck(
+ ArrayBufferViewKind viewKind, Register obj, Register index,
+ Register scratch, mozilla::Maybe<Register> maybeScratch,
+ mozilla::Maybe<Register> spectreScratch, Label* fail) {
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch,
+ maybeScratch.valueOr(InvalidReg),
+ spectreScratch.valueOr(InvalidReg), fail);
+}
+
bool CacheIRCompiler::emitLoadTypedArrayElementResult(
ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
- bool handleOOB, bool forceDoubleForUint32) {
+ bool handleOOB, bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
@@ -6372,9 +6731,8 @@ bool CacheIRCompiler::emitLoadTypedArrayElementResult(
// Bounds check.
Label outOfBounds;
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
- masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
- handleOOB ? &outOfBounds : failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2, scratch2,
+ handleOOB ? &outOfBounds : failure->label());
// Allocate BigInt if needed. The code after this should be infallible.
Maybe<Register> bigInt;
@@ -6437,11 +6795,40 @@ bool CacheIRCompiler::emitLoadTypedArrayElementResult(
return true;
}
-static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
- Register obj, Register offset,
- Register scratch, Label* fail) {
+void CacheIRCompiler::emitDataViewBoundsCheck(ArrayBufferViewKind viewKind,
+ size_t byteSize, Register obj,
+ Register offset, Register scratch,
+ Register maybeScratch,
+ Label* fail) {
+ // |offset| must not alias any scratch register.
+ MOZ_ASSERT(offset != scratch);
+ MOZ_ASSERT(offset != maybeScratch);
+
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
+ } else {
+ if (maybeScratch == InvalidReg) {
+ // Spill |offset| to use it as an additional scratch register.
+ masm.push(offset);
+
+ maybeScratch = offset;
+ }
+
+ // Bounds check doesn't require synchronization. See GetViewValue and
+ // SetViewValue abstract operations which read the underlying buffer byte
+ // length using "unordered" memory order.
+ auto sync = Synchronization::None();
+
+ masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch,
+ maybeScratch);
+
+ if (maybeScratch == offset) {
+ // Restore |offset|.
+ masm.pop(offset);
+ }
+ }
+
// Ensure both offset < length and offset + (byteSize - 1) < length.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
if (byteSize == 1) {
masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
} else {
@@ -6456,7 +6843,7 @@ static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
bool CacheIRCompiler::emitLoadDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId,
BooleanOperandId littleEndianId, Scalar::Type elementType,
- bool forceDoubleForUint32) {
+ bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
@@ -6469,6 +6856,18 @@ bool CacheIRCompiler::emitLoadDataViewValueResult(
Register64 outputReg64 = output.valueReg().toRegister64();
Register outputScratch = outputReg64.scratchReg();
+ Register boundsCheckScratch;
+#ifndef JS_CODEGEN_X86
+ Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ maybeBoundsCheckScratch.emplace(allocator, masm);
+ boundsCheckScratch = *maybeBoundsCheckScratch;
+ }
+#else
+ // Not enough registers on x86, so use the other part of outputReg64.
+ boundsCheckScratch = outputReg64.secondScratchReg();
+#endif
+
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
@@ -6476,8 +6875,8 @@ bool CacheIRCompiler::emitLoadDataViewValueResult(
const size_t byteSize = Scalar::byteSize(elementType);
- EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
- failure->label());
+ emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, outputScratch,
+ boundsCheckScratch, failure->label());
masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
@@ -6612,7 +7011,8 @@ bool CacheIRCompiler::emitLoadDataViewValueResult(
bool CacheIRCompiler::emitStoreDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
- BooleanOperandId littleEndianId, Scalar::Type elementType) {
+ BooleanOperandId littleEndianId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
@@ -6686,6 +7086,24 @@ bool CacheIRCompiler::emitStoreDataViewValueResult(
}
#endif
+ Register boundsCheckScratch;
+#ifndef JS_CODEGEN_X86
+ Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ if (scratch2.constructed<AutoScratchRegister>()) {
+ boundsCheckScratch = scratch2.ref<AutoScratchRegister>().get();
+ } else if (scratch2.constructed<AutoScratchRegister64>()) {
+ boundsCheckScratch =
+ scratch2.ref<AutoScratchRegister64>().get().scratchReg();
+ } else {
+ maybeBoundsCheckScratch.emplace(allocator, masm);
+ boundsCheckScratch = *maybeBoundsCheckScratch;
+ }
+ }
+#else
+ // Not enough registers on x86.
+#endif
+
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
@@ -6693,8 +7111,8 @@ bool CacheIRCompiler::emitStoreDataViewValueResult(
const size_t byteSize = Scalar::byteSize(elementType);
- EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
- failure->label());
+ emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, scratch1,
+ boundsCheckScratch, failure->label());
masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
BaseIndex dest(scratch1, offset, TimesOne);
@@ -8903,7 +9321,8 @@ bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
- uint32_t replacementId, Scalar::Type elementType) {
+ uint32_t replacementId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<AutoOutputRegister> output;
@@ -8936,8 +9355,17 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
: callvm->outputValueReg().scratchReg();
MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
+ Maybe<AutoScratchRegister> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+#ifdef JS_CODEGEN_X86
+ // Not enough spare registers on x86.
+#else
+ scratch2.emplace(allocator, masm);
+#endif
+ }
+
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -8950,8 +9378,8 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// Atomic operations are highly platform-dependent, for example x86/x64 has
// specific requirements on which registers are used; MIPS needs multiple
@@ -8966,8 +9394,8 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
masm.Push(index);
masm.Push(obj);
- using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t,
- const BigInt*, const BigInt*);
+ using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t, const BigInt*,
+ const BigInt*);
callvm->call<Fn, jit::AtomicsCompareExchange64>();
return true;
}
@@ -9004,15 +9432,20 @@ bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
- Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
+ Scalar::Type elementType, ArrayBufferViewKind viewKind,
+ AtomicsReadWriteModifyFn fn) {
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
Register value = allocator.useRegister(masm, Int32OperandId(valueId));
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm, output);
+ }
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -9020,8 +9453,8 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
}
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// See comment in emitAtomicsCompareExchange for why we use an ABI call.
{
@@ -9054,15 +9487,20 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
- ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ ArrayBufferViewKind viewKind) {
AutoCallVM callvm(masm, this, allocator);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
+ Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm, callvm.output());
+ }
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -9075,8 +9513,8 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// See comment in emitAtomicsCompareExchange for why we use a VM call.
@@ -9093,95 +9531,88 @@ bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
IntPtrOperandId indexId,
uint32_t valueId,
- Scalar::Type elementType) {
+ Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
- objId, indexId, valueId);
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
+ viewKind,
AtomicsExchange(elementType));
}
-bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsAddResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsAdd(elementType));
+ viewKind, AtomicsAdd(elementType));
}
-bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsSubResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsSub(elementType));
+ viewKind, AtomicsSub(elementType));
}
-bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsAndResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsAnd(elementType));
+ viewKind, AtomicsAnd(elementType));
}
-bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsOrResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsOr(elementType));
+ viewKind, AtomicsOr(elementType));
}
-bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool CacheIRCompiler::emitAtomicsXorResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (Scalar::isBigIntType(elementType)) {
- return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
- valueId);
+ return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(
+ objId, indexId, valueId, viewKind);
}
return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
- AtomicsXor(elementType));
+ viewKind, AtomicsXor(elementType));
}
bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
IntPtrOperandId indexId,
- Scalar::Type elementType) {
+ Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<AutoOutputRegister> output;
@@ -9195,7 +9626,13 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm,
output ? *output : callvm->output());
- AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
+ Maybe<AutoSpectreBoundsScratchRegister> spectreTemp;
+ Maybe<AutoScratchRegister> scratch2;
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ spectreTemp.emplace(allocator, masm);
+ } else {
+ scratch2.emplace(allocator, masm);
+ }
AutoAvailableFloatRegister floatReg(*this, FloatReg0);
FailurePath* failure;
@@ -9209,8 +9646,8 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
// Atomic operations are highly platform-dependent, for example x86/arm32 has
// specific requirements on which registers are used. Therefore we're using a
@@ -9221,7 +9658,7 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
masm.Push(index);
masm.Push(obj);
- using Fn = BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t);
+ using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
callvm->call<Fn, jit::AtomicsLoad64>();
return true;
}
@@ -9250,7 +9687,8 @@ bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
IntPtrOperandId indexId,
uint32_t valueId,
- Scalar::Type elementType) {
+ Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
@@ -9264,9 +9702,13 @@ bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
}
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+ Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
+ if (viewKind == ArrayBufferViewKind::Resizable) {
+ scratch2.emplace(allocator, masm, output);
+ }
// Not enough registers on X86.
- Register spectreTemp = Register::Invalid();
+ constexpr auto spectreTemp = mozilla::Nothing{};
FailurePath* failure;
if (!addFailurePath(&failure)) {
@@ -9274,8 +9716,8 @@ bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
}
// Bounds check.
- masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
- masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
+ emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
+ spectreTemp, failure->label());
if (!Scalar::isBigIntType(elementType)) {
// Load the elements vector.
@@ -9302,7 +9744,7 @@ bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
volatileRegs.takeUnchecked(scratch);
masm.PushRegsInMask(volatileRegs);
- using Fn = void (*)(FixedLengthTypedArrayObject*, size_t, const BigInt*);
+ using Fn = void (*)(TypedArrayObject*, size_t, const BigInt*);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(obj);
masm.passABIArg(index);
diff --git a/js/src/jit/CacheIRCompiler.h b/js/src/jit/CacheIRCompiler.h
index 3b8941e242..69b1dd34ac 100644
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -25,7 +25,6 @@ class BigInt;
namespace js {
-class FixedLengthTypedArrayObject;
class TypedArrayObject;
enum class UnaryMathFunction : uint8_t;
@@ -846,21 +845,37 @@ class MOZ_RAII CacheIRCompiler {
bool emitDoubleIncDecResult(bool isInc, NumberOperandId inputId);
- using AtomicsReadWriteModifyFn = int32_t (*)(FixedLengthTypedArrayObject*,
- size_t, int32_t);
+ void emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind, Register obj,
+ Register index, Register scratch,
+ Register maybeScratch, Register spectreScratch,
+ Label* fail);
+
+ void emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind, Register obj,
+ Register index, Register scratch,
+ mozilla::Maybe<Register> maybeScratch,
+ mozilla::Maybe<Register> spectreScratch,
+ Label* fail);
+
+ void emitDataViewBoundsCheck(ArrayBufferViewKind viewKind, size_t byteSize,
+ Register obj, Register offset, Register scratch,
+ Register maybeScratch, Label* fail);
+
+ using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
+ int32_t);
[[nodiscard]] bool emitAtomicsReadModifyWriteResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
- Scalar::Type elementType, AtomicsReadWriteModifyFn fn);
+ Scalar::Type elementType, ArrayBufferViewKind viewKind,
+ AtomicsReadWriteModifyFn fn);
- using AtomicsReadWriteModify64Fn =
- JS::BigInt* (*)(JSContext*, FixedLengthTypedArrayObject*, size_t,
- const JS::BigInt*);
+ using AtomicsReadWriteModify64Fn = JS::BigInt* (*)(JSContext*,
+ TypedArrayObject*, size_t,
+ const JS::BigInt*);
template <AtomicsReadWriteModify64Fn fn>
- [[nodiscard]] bool emitAtomicsReadModifyWriteResult64(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId);
+ [[nodiscard]] bool emitAtomicsReadModifyWriteResult64(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ ArrayBufferViewKind viewKind);
void emitActivateIterator(Register objBeingIterated, Register iterObject,
Register nativeIter, Register scratch,
diff --git a/js/src/jit/CacheIRGenerator.h b/js/src/jit/CacheIRGenerator.h
index 9880b82b71..2e15b2d8a6 100644
--- a/js/src/jit/CacheIRGenerator.h
+++ b/js/src/jit/CacheIRGenerator.h
@@ -636,6 +636,9 @@ class MOZ_RAII InlinableNativeIRGenerator {
AttachDecision tryAttachIsConstructor();
AttachDecision tryAttachIsCrossRealmArrayConstructor();
AttachDecision tryAttachGuardToClass(InlinableNative native);
+ AttachDecision tryAttachGuardToClass(GuardClassKind kind);
+ AttachDecision tryAttachGuardToEitherClass(GuardClassKind kind1,
+ GuardClassKind kind2);
AttachDecision tryAttachGuardToArrayBuffer();
AttachDecision tryAttachGuardToSharedArrayBuffer();
AttachDecision tryAttachHasClass(const JSClass* clasp,
@@ -693,8 +696,8 @@ class MOZ_RAII InlinableNativeIRGenerator {
AttachDecision tryAttachIsTypedArrayConstructor();
AttachDecision tryAttachTypedArrayByteOffset();
AttachDecision tryAttachTypedArrayElementSize();
- AttachDecision tryAttachTypedArrayLength(bool isPossiblyWrapped);
- AttachDecision tryAttachTypedArrayLengthZeroOnOutOfBounds();
+ AttachDecision tryAttachTypedArrayLength(bool isPossiblyWrapped,
+ bool allowOutOfBounds);
AttachDecision tryAttachArrayBufferByteLength(bool isPossiblyWrapped);
AttachDecision tryAttachIsConstructing();
AttachDecision tryAttachGetNextMapSetEntryForIterator(bool isMap);
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
index ccaf64d924..974404d5c0 100644
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -272,6 +272,16 @@
obj: ObjId
kind: GuardClassKindImm
+# Guard per GuardClassKind.
+- name: GuardEitherClass
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+ kind1: GuardClassKindImm
+ kind2: GuardClassKindImm
+
# Guard on a realm fuse.
- name: GuardFuse
shared: true
@@ -472,6 +482,13 @@
args:
obj: ObjId
+- name: GuardIsResizableTypedArray
+ shared: true
+ transpile: true
+ cost_estimate: 1
+ args:
+ obj: ObjId
+
- name: GuardHasProxyHandler
shared: false
transpile: true
@@ -1189,6 +1206,20 @@
args:
obj: ObjId
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: TypedArrayByteLengthInt32Result
shared: true
transpile: true
@@ -1203,6 +1234,34 @@
args:
obj: ObjId
+- name: ResizableTypedArrayByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableTypedArrayLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: TypedArrayElementSizeResult
shared: true
transpile: true
@@ -1210,6 +1269,34 @@
args:
obj: ObjId
+- name: ResizableDataViewByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: ResizableDataViewByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GrowableSharedArrayBufferByteLengthInt32Result
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GrowableSharedArrayBufferByteLengthDoubleResult
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: GuardHasAttachedArrayBuffer
shared: true
transpile: true
@@ -1217,6 +1304,20 @@
args:
obj: ObjId
+- name: GuardResizableArrayBufferViewInBounds
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
+- name: GuardResizableArrayBufferViewInBoundsOrDetached
+ shared: true
+ transpile: true
+ cost_estimate: 2
+ args:
+ obj: ObjId
+
- name: NewArrayIteratorResult
shared: true
transpile: true
@@ -1615,6 +1716,7 @@
index: IntPtrId
rhs: RawId
handleOOB: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsCompareExchangeResult
shared: true
@@ -1626,6 +1728,7 @@
expected: RawId
replacement: RawId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsExchangeResult
shared: true
@@ -1636,6 +1739,7 @@
index: IntPtrId
value: RawId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsAddResult
shared: true
@@ -1647,6 +1751,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsSubResult
shared: true
@@ -1658,6 +1763,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsAndResult
shared: true
@@ -1669,6 +1775,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsOrResult
shared: true
@@ -1680,6 +1787,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsXorResult
shared: true
@@ -1691,6 +1799,7 @@
value: RawId
elementType: ScalarTypeImm
forEffect: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsLoadResult
shared: true
@@ -1700,6 +1809,7 @@
obj: ObjId
index: IntPtrId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsStoreResult
shared: true
@@ -1710,6 +1820,7 @@
index: IntPtrId
value: RawId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: AtomicsIsLockFreeResult
shared: true
@@ -2051,6 +2162,7 @@
args:
obj: ObjId
index: IntPtrId
+ viewKind: ArrayBufferViewKindImm
- name: LoadDenseElementHoleExistsResult
shared: true
@@ -2070,6 +2182,7 @@
elementType: ScalarTypeImm
handleOOB: BoolImm
forceDoubleForUint32: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: LoadDataViewValueResult
shared: true
@@ -2081,6 +2194,7 @@
littleEndian: BooleanId
elementType: ScalarTypeImm
forceDoubleForUint32: BoolImm
+ viewKind: ArrayBufferViewKindImm
- name: StoreDataViewValueResult
shared: true
@@ -2092,6 +2206,7 @@
value: RawId
littleEndian: BooleanId
elementType: ScalarTypeImm
+ viewKind: ArrayBufferViewKindImm
- name: LoadInt32ArrayLengthResult
shared: true
diff --git a/js/src/jit/CacheIRReader.h b/js/src/jit/CacheIRReader.h
index affefdac01..54b298c999 100644
--- a/js/src/jit/CacheIRReader.h
+++ b/js/src/jit/CacheIRReader.h
@@ -96,6 +96,9 @@ class MOZ_RAII CacheIRReader {
uint32_t stubOffset() { return buffer_.readByte() * sizeof(uintptr_t); }
GuardClassKind guardClassKind() { return GuardClassKind(buffer_.readByte()); }
+ ArrayBufferViewKind arrayBufferViewKind() {
+ return ArrayBufferViewKind(buffer_.readByte());
+ }
ValueType valueType() { return ValueType(buffer_.readByte()); }
wasm::ValType::Kind wasmValType() {
return wasm::ValType::Kind(buffer_.readByte());
diff --git a/js/src/jit/CacheIRSpewer.cpp b/js/src/jit/CacheIRSpewer.cpp
index 921da75d61..613e0f7d85 100644
--- a/js/src/jit/CacheIRSpewer.cpp
+++ b/js/src/jit/CacheIRSpewer.cpp
@@ -106,6 +106,9 @@ class MOZ_RAII CacheIROpsJitSpewer {
void spewGuardClassKindImm(const char* name, GuardClassKind kind) {
out_.printf("%s GuardClassKind(%u)", name, unsigned(kind));
}
+ void spewArrayBufferViewKindImm(const char* name, ArrayBufferViewKind kind) {
+ out_.printf("%s ArrayBufferViewKind(%u)", name, unsigned(kind));
+ }
void spewWasmValTypeImm(const char* name, wasm::ValType::Kind kind) {
out_.printf("%s WasmValTypeKind(%u)", name, unsigned(kind));
}
@@ -251,6 +254,9 @@ class MOZ_RAII CacheIROpsJSONSpewer {
void spewGuardClassKindImm(const char* name, GuardClassKind kind) {
spewArgImpl(name, "Imm", unsigned(kind));
}
+ void spewArrayBufferViewKindImm(const char* name, ArrayBufferViewKind kind) {
+ spewArgImpl(name, "Imm", unsigned(kind));
+ }
void spewRealmFuseIndexImm(const char* name, RealmFuses::FuseIndex kind) {
spewArgImpl(name, "Imm", unsigned(kind));
}
diff --git a/js/src/jit/CacheIRWriter.h b/js/src/jit/CacheIRWriter.h
index 454a1b2511..6a32885d7c 100644
--- a/js/src/jit/CacheIRWriter.h
+++ b/js/src/jit/CacheIRWriter.h
@@ -262,6 +262,11 @@ class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter {
"GuardClassKind must fit in a byte");
buffer_.writeByte(uint8_t(kind));
}
+ void writeArrayBufferViewKindImm(ArrayBufferViewKind kind) {
+ static_assert(sizeof(ArrayBufferViewKind) == sizeof(uint8_t),
+ "ArrayBufferViewKind must fit in a byte");
+ buffer_.writeByte(uint8_t(kind));
+ }
void writeValueTypeImm(ValueType type) {
static_assert(sizeof(ValueType) == sizeof(uint8_t),
"ValueType must fit in uint8_t");
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
index 2c41acc736..10a69f0cb3 100644
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -2167,8 +2167,8 @@ class CreateDependentString {
NotInlineString,
Count
};
- mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
- joins_;
+ mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
+ fallbacks_, joins_;
public:
CreateDependentString(CharEncoding encoding, Register string, Register temp1,
@@ -4632,6 +4632,17 @@ void CodeGenerator::visitGuardIsFixedLengthTypedArray(
bailoutFrom(&bail, guard->snapshot());
}
+void CodeGenerator::visitGuardIsResizableTypedArray(
+ LGuardIsResizableTypedArray* guard) {
+ Register obj = ToRegister(guard->input());
+ Register temp = ToRegister(guard->temp0());
+
+ Label bail;
+ masm.loadObjClassUnsafe(obj, temp);
+ masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
+ bailoutFrom(&bail, guard->snapshot());
+}
+
void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
Register obj = ToRegister(guard->input());
@@ -9660,6 +9671,68 @@ void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
masm.typedArrayElementSize(obj, out);
}
+void CodeGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
+ LResizableTypedArrayByteOffsetMaybeOutOfBounds* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, out, temp);
+}
+
+void CodeGenerator::visitResizableTypedArrayLength(
+ LResizableTypedArrayLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadResizableTypedArrayLengthIntPtr(lir->synchronization(), obj, out,
+ temp);
+}
+
+void CodeGenerator::visitResizableDataViewByteLength(
+ LResizableDataViewByteLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+ Register temp = ToRegister(lir->temp0());
+
+ masm.loadResizableDataViewByteLengthIntPtr(lir->synchronization(), obj, out,
+ temp);
+}
+
+void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
+ LGrowableSharedArrayBufferByteLength* lir) {
+ Register obj = ToRegister(lir->object());
+ Register out = ToRegister(lir->output());
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto sync = Synchronization::Load();
+
+ masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
+}
+
+void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
+ LGuardResizableArrayBufferViewInBounds* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp0());
+
+ Label bail;
+ masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
+void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
+ LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
+ Register obj = ToRegister(lir->object());
+ Register temp = ToRegister(lir->temp0());
+
+ Label done, bail;
+ masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
+ masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
+ masm.bind(&done);
+ bailoutFrom(&bail, lir->snapshot());
+}
+
void CodeGenerator::visitGuardHasAttachedArrayBuffer(
LGuardHasAttachedArrayBuffer* lir) {
Register obj = ToRegister(lir->object());
@@ -15039,15 +15112,19 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
// REG DUMP AREA, if any.
size_t regDumpWords = 0;
const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
- GeneralRegisterForwardIterator wasmAnyRefRegsIter(wasmAnyRefRegs);
+ const LiveGeneralRegisterSet slotsOrElementsRegs =
+ safepoint.slotsOrElementsRegs();
+ const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
+ wasmAnyRefRegs.set(), slotsOrElementsRegs.set()));
+ GeneralRegisterForwardIterator refRegsIter(refRegs);
switch (safepoint.wasmSafepointKind()) {
case WasmSafepointKind::LirCall:
case WasmSafepointKind::CodegenCall: {
size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
regDumpWords += spilledNumWords;
- for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
- Register reg = *wasmAnyRefRegsIter;
+ for (; refRegsIter.more(); ++refRegsIter) {
+ Register reg = *refRegsIter;
size_t offsetFromSpillBase =
safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
sizeof(void*);
@@ -15055,9 +15132,13 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
offsetFromSpillBase <= spilledNumWords);
size_t index = spilledNumWords - offsetFromSpillBase;
- stackMap->set(index, wasm::StackMap::AnyRef);
+ if (wasmAnyRefRegs.has(reg)) {
+ stackMap->set(index, wasm::StackMap::AnyRef);
+ } else {
+ MOZ_ASSERT(slotsOrElementsRegs.has(reg));
+ stackMap->set(index, wasm::StackMap::ArrayDataPointer);
+ }
}
-
// Float and vector registers do not have to be handled; they cannot
// contain wasm anyrefs, and they are spilled after general-purpose
// registers. Gprs are therefore closest to the spill base and thus their
@@ -15066,8 +15147,8 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
case WasmSafepointKind::Trap: {
regDumpWords += trapExitLayoutNumWords;
- for (; wasmAnyRefRegsIter.more(); ++wasmAnyRefRegsIter) {
- Register reg = *wasmAnyRefRegsIter;
+ for (; refRegsIter.more(); ++refRegsIter) {
+ Register reg = *refRegsIter;
size_t offsetFromTop = trapExitLayout.getOffset(reg);
// If this doesn't hold, the associated register wasn't saved by
@@ -15080,7 +15161,12 @@ static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
// offset up from the bottom of the (integer register) save area.
size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
- stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
+ if (wasmAnyRefRegs.has(reg)) {
+ stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
+ } else {
+ MOZ_ASSERT(slotsOrElementsRegs.has(reg));
+ stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
+ }
}
} break;
default:
@@ -17263,25 +17349,20 @@ void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
void CodeGenerator::visitLoadTypedArrayElementHole(
LLoadTypedArrayElementHole* lir) {
- Register object = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ Register length = ToRegister(lir->length());
const ValueOperand out = ToOutValue(lir);
- // Load the length.
Register scratch = out.scratchReg();
- Register scratch2 = ToRegister(lir->temp0());
- Register index = ToRegister(lir->index());
- masm.loadArrayBufferViewLengthIntPtr(object, scratch);
// Load undefined if index >= length.
Label outOfBounds, done;
- masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
-
- // Load the elements vector.
- masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
+ masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
Scalar::Type arrayType = lir->mir()->arrayType();
Label fail;
- BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
+ BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
MacroAssembler::Uint32Mode uint32Mode =
lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
: MacroAssembler::Uint32Mode::FailOnDouble;
@@ -17301,37 +17382,38 @@ void CodeGenerator::visitLoadTypedArrayElementHole(
void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
LLoadTypedArrayElementHoleBigInt* lir) {
- Register object = ToRegister(lir->object());
+ Register elements = ToRegister(lir->elements());
+ Register index = ToRegister(lir->index());
+ Register length = ToRegister(lir->length());
const ValueOperand out = ToOutValue(lir);
- // On x86 there are not enough registers. In that case reuse the output's
- // type register as temporary.
+ Register temp = ToRegister(lir->temp());
+
+ // On x86 there are not enough registers. In that case reuse the output
+ // registers as temporaries.
#ifdef JS_CODEGEN_X86
- MOZ_ASSERT(lir->temp()->isBogusTemp());
- Register temp = out.typeReg();
+ MOZ_ASSERT(lir->temp64().isBogusTemp());
+ Register64 temp64 = out.toRegister64();
#else
- Register temp = ToRegister(lir->temp());
-#endif
Register64 temp64 = ToRegister64(lir->temp64());
-
- // Load the length.
- Register scratch = out.scratchReg();
- Register index = ToRegister(lir->index());
- masm.loadArrayBufferViewLengthIntPtr(object, scratch);
+#endif
// Load undefined if index >= length.
Label outOfBounds, done;
- masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
-
- // Load the elements vector.
- masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
+ masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
Scalar::Type arrayType = lir->mir()->arrayType();
- BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
+ BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
masm.load64(source, temp64);
+#ifdef JS_CODEGEN_X86
+ Register bigInt = temp;
+ Register maybeTemp = InvalidReg;
+#else
Register bigInt = out.scratchReg();
- emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
+ Register maybeTemp = temp;
+#endif
+ emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
masm.jump(&done);
@@ -17679,6 +17761,10 @@ void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
masm.bind(&skip);
}
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
Register value = ToRegister(lir->value());
Register output = ToRegister(lir->output());
@@ -18453,6 +18539,24 @@ void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
bailoutFrom(&notEqual, ins->snapshot());
}
+void CodeGenerator::visitGuardToEitherClass(LGuardToEitherClass* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp0());
+
+ // branchTestObjClass may zero the object register on speculative paths
+ // (we should have a defineReuseInput allocation in this case).
+ Register spectreRegToZero = lhs;
+
+ Label notEqual;
+
+ masm.branchTestObjClass(Assembler::NotEqual, lhs,
+ {ins->mir()->getClass1(), ins->mir()->getClass2()},
+ temp, spectreRegToZero, &notEqual);
+
+ // Can't return null-return here, so bail.
+ bailoutFrom(&notEqual, ins->snapshot());
+}
+
void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
Register lhs = ToRegister(ins->lhs());
Register temp = ToRegister(ins->temp0());
@@ -20133,7 +20237,8 @@ void CodeGenerator::visitToHashableString(LToHashableString* ins) {
Address(input, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &isAtom);
- masm.lookupStringInAtomCacheLastLookups(input, output, ool->entry());
+ masm.lookupStringInAtomCacheLastLookups(input, output, output, ool->entry());
+ masm.jump(ool->rejoin());
masm.bind(&isAtom);
masm.movePtr(input, output);
masm.bind(ool->rejoin());
diff --git a/js/src/jit/Disassemble.cpp b/js/src/jit/Disassemble.cpp
index 652c381ce7..df768d4fd1 100644
--- a/js/src/jit/Disassemble.cpp
+++ b/js/src/jit/Disassemble.cpp
@@ -22,6 +22,8 @@
# include "jit/arm64/vixl/Instructions-vixl.h" // vixl::Instruction
# elif defined(JS_CODEGEN_ARM)
# include "jit/arm/disasm/Disasm-arm.h" // js::jit::disasm::*
+# elif defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/disasm/Disasm-riscv64.h" // js::jit::disasm::*
# endif
#endif
@@ -99,6 +101,31 @@ void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
}
}
+#elif defined(JS_JITSPEW) && defined(JS_CODEGEN_RISCV64)
+
+bool HasDisassembler() { return true; }
+
+void Disassemble(uint8_t* code, size_t length, InstrCallback callback) {
+ disasm::NameConverter converter;
+ disasm::Disassembler d(converter);
+
+ uint8_t* instr = code;
+ uint8_t* end = code + length;
+
+ while (instr < end) {
+ EmbeddedVector<char, ReasonableBufferSize> buffer;
+ buffer[0] = '\0';
+ uint8_t* next_instr = instr + d.InstructionDecode(buffer, instr);
+
+ JS::UniqueChars formatted =
+ JS_smprintf("0x%p %08x %s", instr, *reinterpret_cast<int32_t*>(instr),
+ buffer.start());
+ callback(formatted.get());
+
+ instr = next_instr;
+ }
+}
+
#else
bool HasDisassembler() { return false; }
diff --git a/js/src/jit/ExecutableAllocator.h b/js/src/jit/ExecutableAllocator.h
index 85c01562c3..02c8727e85 100644
--- a/js/src/jit/ExecutableAllocator.h
+++ b/js/src/jit/ExecutableAllocator.h
@@ -72,7 +72,8 @@ class ExecutablePool {
bool m_mark : 1;
// Number of bytes currently allocated for each CodeKind.
- mozilla::EnumeratedArray<CodeKind, CodeKind::Count, size_t> m_codeBytes;
+ mozilla::EnumeratedArray<CodeKind, size_t, size_t(CodeKind::Count)>
+ m_codeBytes;
public:
void release(bool willDestroy = false);
diff --git a/js/src/jit/GenerateAtomicOperations.py b/js/src/jit/GenerateAtomicOperations.py
index 8e37e5dcd6..9194b8b685 100644
--- a/js/src/jit/GenerateAtomicOperations.py
+++ b/js/src/jit/GenerateAtomicOperations.py
@@ -50,8 +50,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
# - MacroAssembler::wasmLoad
if cpu_arch in ("x86", "x86_64"):
insns = ""
- if barrier:
- insns += fmt_insn("mfence")
if size == 8:
insns += fmt_insn("movb (%[arg]), %[res]")
elif size == 16:
@@ -61,8 +59,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
else:
assert size == 64
insns += fmt_insn("movq (%[arg]), %[res]")
- if barrier:
- insns += fmt_insn("mfence")
return """
INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
%(cpp_type)s res;
@@ -78,8 +74,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
}
if cpu_arch == "aarch64":
insns = ""
- if barrier:
- insns += fmt_insn("dmb ish")
if size == 8:
insns += fmt_insn("ldrb %w[res], [%x[arg]]")
elif size == 16:
@@ -106,8 +100,6 @@ def gen_load(fun_name, cpp_type, size, barrier):
}
if cpu_arch == "arm":
insns = ""
- if barrier:
- insns += fmt_insn("dmb sy")
if size == 8:
insns += fmt_insn("ldrb %[res], [%[arg]]")
elif size == 16:
@@ -141,8 +133,6 @@ def gen_store(fun_name, cpp_type, size, barrier):
# - MacroAssembler::wasmStore
if cpu_arch in ("x86", "x86_64"):
insns = ""
- if barrier:
- insns += fmt_insn("mfence")
if size == 8:
insns += fmt_insn("movb %[val], (%[addr])")
elif size == 16:
diff --git a/js/src/jit/GenerateCacheIRFiles.py b/js/src/jit/GenerateCacheIRFiles.py
index 5cecf82e64..d71c70b753 100644
--- a/js/src/jit/GenerateCacheIRFiles.py
+++ b/js/src/jit/GenerateCacheIRFiles.py
@@ -82,6 +82,7 @@ arg_writer_info = {
"BoolImm": ("bool", "writeBoolImm"),
"ByteImm": ("uint32_t", "writeByteImm"), # uint32_t to enable fits-in-byte asserts.
"GuardClassKindImm": ("GuardClassKind", "writeGuardClassKindImm"),
+ "ArrayBufferViewKindImm": ("ArrayBufferViewKind", "writeArrayBufferViewKindImm"),
"ValueTypeImm": ("ValueType", "writeValueTypeImm"),
"JSWhyMagicImm": ("JSWhyMagic", "writeJSWhyMagicImm"),
"CallFlagsImm": ("CallFlags", "writeCallFlagsImm"),
@@ -184,6 +185,11 @@ arg_reader_info = {
"BoolImm": ("bool", "", "reader.readBool()"),
"ByteImm": ("uint8_t", "", "reader.readByte()"),
"GuardClassKindImm": ("GuardClassKind", "", "reader.guardClassKind()"),
+ "ArrayBufferViewKindImm": (
+ "ArrayBufferViewKind",
+ "",
+ "reader.arrayBufferViewKind()",
+ ),
"ValueTypeImm": ("ValueType", "", "reader.valueType()"),
"JSWhyMagicImm": ("JSWhyMagic", "", "reader.whyMagic()"),
"CallFlagsImm": ("CallFlags", "", "reader.callFlags()"),
@@ -272,6 +278,7 @@ arg_spewer_method = {
"BoolImm": "spewBoolImm",
"ByteImm": "spewByteImm",
"GuardClassKindImm": "spewGuardClassKindImm",
+ "ArrayBufferViewKindImm": "spewArrayBufferViewKindImm",
"ValueTypeImm": "spewValueTypeImm",
"JSWhyMagicImm": "spewJSWhyMagicImm",
"CallFlagsImm": "spewCallFlagsImm",
@@ -415,6 +422,7 @@ arg_length = {
"JSOpImm": 1,
"ValueTypeImm": 1,
"GuardClassKindImm": 1,
+ "ArrayBufferViewKindImm": 1,
"JSWhyMagicImm": 1,
"WasmValTypeImm": 1,
"Int32Imm": 4,
diff --git a/js/src/jit/IonAnalysis.cpp b/js/src/jit/IonAnalysis.cpp
index a0c9a51c39..543ed0eb83 100644
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -747,13 +747,13 @@ static bool IsDiamondPattern(MBasicBlock* initialBlock) {
MTest* initialTest = ins->toTest();
MBasicBlock* trueBranch = initialTest->ifTrue();
- if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1) {
+ if (trueBranch->numPredecessors() != 1 || !trueBranch->lastIns()->isGoto()) {
return false;
}
MBasicBlock* falseBranch = initialTest->ifFalse();
if (falseBranch->numPredecessors() != 1 ||
- falseBranch->numSuccessors() != 1) {
+ !falseBranch->lastIns()->isGoto()) {
return false;
}
@@ -2228,6 +2228,7 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
phi->replaceOperand(i, in->toBox()->input());
} else {
MInstruction* replacement;
+ MBasicBlock* predecessor = phi->block()->getPredecessor(i);
if (phiType == MIRType::Double && IsFloatType(in->type())) {
// Convert int32 operands to double.
@@ -2239,14 +2240,14 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
// See comment below
if (in->type() != MIRType::Value) {
MBox* box = MBox::New(alloc(), in);
- in->block()->insertBefore(in->block()->lastIns(), box);
+ predecessor->insertAtEnd(box);
in = box;
}
MUnbox* unbox =
MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
unbox->setBailoutKind(BailoutKind::SpeculativePhi);
- in->block()->insertBefore(in->block()->lastIns(), unbox);
+ predecessor->insertAtEnd(unbox);
replacement = MToFloat32::New(alloc(), in);
}
} else {
@@ -2255,7 +2256,7 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
// below.
if (in->type() != MIRType::Value) {
MBox* box = MBox::New(alloc(), in);
- in->block()->insertBefore(in->block()->lastIns(), box);
+ predecessor->insertAtEnd(box);
in = box;
}
@@ -2265,7 +2266,7 @@ bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
}
replacement->setBailoutKind(BailoutKind::SpeculativePhi);
- in->block()->insertBefore(in->block()->lastIns(), replacement);
+ predecessor->insertAtEnd(replacement);
phi->replaceOperand(i, replacement);
}
}
@@ -4452,6 +4453,10 @@ static bool NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use) {
if (use->type() == MIRType::BigInt) {
return true;
}
+ if (use->isLoadTypedArrayElementHole() &&
+ Scalar::isBigIntType(use->toLoadTypedArrayElementHole()->arrayType())) {
+ return true;
+ }
MBasicBlock* block = use->block();
MInstructionIterator iter(block->begin(slotsOrElements));
diff --git a/js/src/jit/IonOptimizationLevels.h b/js/src/jit/IonOptimizationLevels.h
index e68dfaa124..92e4586131 100644
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -181,8 +181,8 @@ class OptimizationInfo {
class OptimizationLevelInfo {
private:
- mozilla::EnumeratedArray<OptimizationLevel, OptimizationLevel::Count,
- OptimizationInfo>
+ mozilla::EnumeratedArray<OptimizationLevel, OptimizationInfo,
+ size_t(OptimizationLevel::Count)>
infos_;
public:
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
index 7b3cb1184e..176b988e05 100644
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -20,7 +20,6 @@
#include "jit/JitRuntime.h"
#include "jit/JitSpewer.h"
#include "jit/LIR.h"
-#include "jit/PcScriptCache.h"
#include "jit/Recover.h"
#include "jit/Safepoints.h"
#include "jit/ScriptFromCalleeToken.h"
@@ -922,32 +921,32 @@ static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
return;
}
- size_t nargs = layout->numActualArgs();
- size_t nformals = 0;
-
JSFunction* fun = CalleeTokenToFunction(layout->calleeToken());
+
+ size_t numFormals = fun->nargs();
+ size_t numArgs = std::max(layout->numActualArgs(), numFormals);
+ size_t firstArg = 0;
+
if (frame.type() != FrameType::JSJitToWasm &&
!frame.isExitFrameLayout<CalledFromJitExitFrameLayout>() &&
!fun->nonLazyScript()->mayReadFrameArgsDirectly()) {
- nformals = fun->nargs();
+ firstArg = numFormals;
}
- size_t newTargetOffset = std::max(nargs, fun->nargs());
-
Value* argv = layout->thisAndActualArgs();
// Trace |this|.
TraceRoot(trc, argv, "ion-thisv");
- // Trace actual arguments beyond the formals. Note + 1 for thisv.
- for (size_t i = nformals + 1; i < nargs + 1; i++) {
- TraceRoot(trc, &argv[i], "ion-argv");
+ // Trace arguments. Note + 1 for thisv.
+ for (size_t i = firstArg; i < numArgs; i++) {
+ TraceRoot(trc, &argv[i + 1], "ion-argv");
}
// Always trace the new.target from the frame. It's not in the snapshots.
// +1 to pass |this|
if (CalleeTokenIsConstructing(layout->calleeToken())) {
- TraceRoot(trc, &argv[1 + newTargetOffset], "ion-newTarget");
+ TraceRoot(trc, &argv[1 + numArgs], "ion-newTarget");
}
}
@@ -1539,90 +1538,6 @@ JSScript* GetTopJitJSScript(JSContext* cx) {
return frame.script();
}
-void GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes) {
- JitSpew(JitSpew_IonSnapshots, "Recover PC & Script from the last frame.");
-
- // Recover the return address so that we can look it up in the
- // PcScriptCache, as script/pc computation is expensive.
- JitActivationIterator actIter(cx);
- OnlyJSJitFrameIter it(actIter);
- uint8_t* retAddr;
- if (it.frame().isExitFrame()) {
- ++it;
-
- // Skip baseline interpreter entry frames.
- // Can exist before rectifier frames.
- if (it.frame().isBaselineInterpreterEntry()) {
- ++it;
- }
-
- // Skip rectifier frames.
- if (it.frame().isRectifier()) {
- ++it;
- MOZ_ASSERT(it.frame().isBaselineStub() || it.frame().isBaselineJS() ||
- it.frame().isIonJS());
- }
-
- // Skip Baseline/Ion stub and IC call frames.
- if (it.frame().isBaselineStub()) {
- ++it;
- MOZ_ASSERT(it.frame().isBaselineJS());
- } else if (it.frame().isIonICCall()) {
- ++it;
- MOZ_ASSERT(it.frame().isIonJS());
- }
-
- MOZ_ASSERT(it.frame().isBaselineJS() || it.frame().isIonJS());
-
- // Don't use the return address and the cache if the BaselineFrame is
- // running in the Baseline Interpreter. In this case the bytecode pc is
- // cheap to get, so we won't benefit from the cache, and the return address
- // does not map to a single bytecode pc.
- if (it.frame().isBaselineJS() &&
- it.frame().baselineFrame()->runningInInterpreter()) {
- it.frame().baselineScriptAndPc(scriptRes, pcRes);
- return;
- }
-
- retAddr = it.frame().resumePCinCurrentFrame();
- } else {
- MOZ_ASSERT(it.frame().isBailoutJS());
- retAddr = it.frame().returnAddress();
- }
-
- MOZ_ASSERT(retAddr);
-
- uint32_t hash = PcScriptCache::Hash(retAddr);
-
- // Lazily initialize the cache. The allocation may safely fail and will not
- // GC.
- if (MOZ_UNLIKELY(cx->ionPcScriptCache == nullptr)) {
- cx->ionPcScriptCache =
- MakeUnique<PcScriptCache>(cx->runtime()->gc.gcNumber());
- }
-
- if (cx->ionPcScriptCache.ref() &&
- cx->ionPcScriptCache->get(cx->runtime(), hash, retAddr, scriptRes,
- pcRes)) {
- return;
- }
-
- // Lookup failed: undertake expensive process to determine script and pc.
- if (it.frame().isIonJS() || it.frame().isBailoutJS()) {
- InlineFrameIterator ifi(cx, &it.frame());
- *scriptRes = ifi.script();
- *pcRes = ifi.pc();
- } else {
- MOZ_ASSERT(it.frame().isBaselineJS());
- it.frame().baselineScriptAndPc(scriptRes, pcRes);
- }
-
- // Add entry to cache.
- if (cx->ionPcScriptCache.ref()) {
- cx->ionPcScriptCache->add(hash, retAddr, *pcRes, *scriptRes);
- }
-}
-
RInstructionResults::RInstructionResults(JitFrameLayout* fp)
: results_(nullptr), fp_(fp), initialized_(false) {}
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
index fe9b2942d3..ab882e7986 100644
--- a/js/src/jit/JitFrames.h
+++ b/js/src/jit/JitFrames.h
@@ -771,8 +771,6 @@ class InvalidationBailoutStack {
void checkInvariants() const;
};
-void GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes);
-
// Baseline requires one slot for this/argument type checks.
static const uint32_t MinJITStackSize = 1;
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
index f8cdbef8ba..e9d389cf60 100644
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -376,6 +376,10 @@ DefaultJitOptions::DefaultJitOptions() {
// ***** Irregexp shim flags *****
+ // Whether the stage 3 regexp modifiers proposal is enabled.
+ SET_DEFAULT(js_regexp_modifiers, false);
+ // Whether the stage 3 duplicate named capture groups proposal is enabled.
+ SET_DEFAULT(js_regexp_duplicate_named_groups, false);
// V8 uses this for differential fuzzing to handle stack overflows.
// We address the same problem in StackLimitCheck::HasOverflowed.
SET_DEFAULT(correctness_fuzzer_suppressions, false);
diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
index fd5a9726ed..d1fcae081c 100644
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -143,6 +143,8 @@ struct DefaultJitOptions {
// Irregexp shim flags
bool correctness_fuzzer_suppressions;
bool enable_regexp_unaligned_accesses;
+ bool js_regexp_modifiers;
+ bool js_regexp_duplicate_named_groups;
bool regexp_possessive_quantifier;
bool regexp_optimization;
bool regexp_peephole_optimization;
diff --git a/js/src/jit/JitRuntime.h b/js/src/jit/JitRuntime.h
index d0ce8422de..7d038ed0e2 100644
--- a/js/src/jit/JitRuntime.h
+++ b/js/src/jit/JitRuntime.h
@@ -75,15 +75,15 @@ enum class BailoutReturnKind {
class BaselineICFallbackCode {
JitCode* code_ = nullptr;
using OffsetArray =
- mozilla::EnumeratedArray<BaselineICFallbackKind,
- BaselineICFallbackKind::Count, uint32_t>;
+ mozilla::EnumeratedArray<BaselineICFallbackKind, uint32_t,
+ size_t(BaselineICFallbackKind::Count)>;
OffsetArray offsets_ = {};
// Keep track of offset into various baseline stubs' code at return
// point from called script.
using BailoutReturnArray =
- mozilla::EnumeratedArray<BailoutReturnKind, BailoutReturnKind::Count,
- uint32_t>;
+ mozilla::EnumeratedArray<BailoutReturnKind, uint32_t,
+ size_t(BailoutReturnKind::Count)>;
BailoutReturnArray bailoutReturnOffsets_ = {};
public:
@@ -175,13 +175,13 @@ class JitRuntime {
WriteOnceData<uint32_t> doubleToInt32ValueStubOffset_{0};
// Thunk to do a generic call from Ion.
- mozilla::EnumeratedArray<IonGenericCallKind, IonGenericCallKind::Count,
- WriteOnceData<uint32_t>>
+ mozilla::EnumeratedArray<IonGenericCallKind, WriteOnceData<uint32_t>,
+ size_t(IonGenericCallKind::Count)>
ionGenericCallStubOffset_;
// Thunk used by the debugger for breakpoint and step mode.
- mozilla::EnumeratedArray<DebugTrapHandlerKind, DebugTrapHandlerKind::Count,
- WriteOnceData<JitCode*>>
+ mozilla::EnumeratedArray<DebugTrapHandlerKind, WriteOnceData<JitCode*>,
+ size_t(DebugTrapHandlerKind::Count)>
debugTrapHandlers_;
// BaselineInterpreter state.
diff --git a/js/src/jit/JitScript.cpp b/js/src/jit/JitScript.cpp
index f2f6ee2c25..62a14a70b6 100644
--- a/js/src/jit/JitScript.cpp
+++ b/js/src/jit/JitScript.cpp
@@ -517,7 +517,13 @@ void ICScript::purgeStubs(Zone* zone, ICStubSpace& newStubSpace) {
if (fallback->trialInliningState() == TrialInliningState::Inlined &&
hasInlinedChild(fallback->pcOffset())) {
MOZ_ASSERT(active());
- MOZ_ASSERT(findInlinedChild(fallback->pcOffset())->active());
+#ifdef DEBUG
+ // The callee script must be active. Also assert its bytecode size field
+ // is valid, because this helps catch memory safety issues (bug 1871947).
+ ICScript* callee = findInlinedChild(fallback->pcOffset());
+ MOZ_ASSERT(callee->active());
+ MOZ_ASSERT(callee->bytecodeSize() < inliningRoot()->totalBytecodeSize());
+#endif
JSRuntime* rt = zone->runtimeFromMainThread();
ICCacheIRStub* prev = nullptr;
@@ -718,6 +724,9 @@ static void MarkActiveICScriptsAndCopyStubs(
ICCacheIRStub* newStub = stub->clone(cx->runtime(), newStubSpace);
layout->setStubPtr(newStub);
+ // If this is a trial-inlining call site, also preserve the callee
+ // ICScript. Inlined constructor calls invoke CreateThisFromIC (which
+ // can trigger GC) before using the inlined ICScript.
JSJitFrameIter parentFrame(frame);
++parentFrame;
BaselineFrame* blFrame = parentFrame.baselineFrame();
diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp
index 6fcd25d6e3..11e3165240 100644
--- a/js/src/jit/JitSpewer.cpp
+++ b/js/src/jit/JitSpewer.cpp
@@ -369,7 +369,6 @@ static void PrintHelpAndExit(int status = 0) {
"compiled functions only).\n"
" profiling Profiling-related information\n"
" dump-mir-expr Dump the MIR expressions\n"
- " scriptstats Tracelogger summary stats\n"
" warp-snapshots WarpSnapshots created by WarpOracle\n"
" warp-transpiler Warp CacheIR transpiler\n"
" warp-trial-inlining Trial inlining for Warp\n"
@@ -475,8 +474,6 @@ void jit::CheckLogging() {
EnableChannel(JitSpew_Profiling);
} else if (IsFlag(found, "dump-mir-expr")) {
EnableChannel(JitSpew_MIRExpressions);
- } else if (IsFlag(found, "scriptstats")) {
- EnableChannel(JitSpew_ScriptStats);
} else if (IsFlag(found, "warp-snapshots")) {
EnableChannel(JitSpew_WarpSnapshots);
} else if (IsFlag(found, "warp-transpiler")) {
diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h
index 2cc56d9cf7..bfc92c74f2 100644
--- a/js/src/jit/JitSpewer.h
+++ b/js/src/jit/JitSpewer.h
@@ -69,8 +69,6 @@ namespace jit {
_(MarkLoadsUsedAsPropertyKeys) \
/* Output a list of MIR expressions */ \
_(MIRExpressions) \
- /* Spew Tracelogger summary stats */ \
- _(ScriptStats) \
\
/* BASELINE COMPILER SPEW */ \
\
diff --git a/js/src/jit/JitZone.h b/js/src/jit/JitZone.h
index a17b73c20e..d4f2350b8d 100644
--- a/js/src/jit/JitZone.h
+++ b/js/src/jit/JitZone.h
@@ -141,7 +141,8 @@ class JitZone {
Count
};
- mozilla::EnumeratedArray<StubIndex, StubIndex::Count, WeakHeapPtr<JitCode*>>
+ mozilla::EnumeratedArray<StubIndex, WeakHeapPtr<JitCode*>,
+ size_t(StubIndex::Count)>
stubs_;
mozilla::Maybe<IonCompilationId> currentCompilationId_;
diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml
index 44ef48a4d8..f13c4b0745 100644
--- a/js/src/jit/LIROps.yaml
+++ b/js/src/jit/LIROps.yaml
@@ -1875,6 +1875,49 @@
operands:
object: WordSized
+# Read the length of a resizable typed array.
+- name: ResizableTypedArrayLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+ arguments:
+ synchronization: js::jit::Synchronization
+ num_temps: 1
+
+# Read the possibly out-of-bounds byteOffset of a resizable typed array.
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBounds
+ result_type: WordSized
+ operands:
+ object: WordSized
+ num_temps: 1
+
+# Read the byte length of a resizable data view.
+- name: ResizableDataViewByteLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+ arguments:
+ synchronization: js::jit::Synchronization
+ num_temps: 1
+
+# Read the byte length of a growable shared array buffer.
+- name: GrowableSharedArrayBufferByteLength
+ result_type: WordSized
+ operands:
+ object: WordSized
+
+# Guard a resizable array buffer view is in-bounds.
+- name: GuardResizableArrayBufferViewInBounds
+ operands:
+ object: WordSized
+ num_temps: 1
+
+# Guard a resizable array buffer view is in-bounds.
+- name: GuardResizableArrayBufferViewInBoundsOrDetached
+ operands:
+ object: WordSized
+ num_temps: 1
+
- name: GuardHasAttachedArrayBuffer
operands:
object: WordSized
@@ -2052,9 +2095,9 @@
- name: LoadTypedArrayElementHole
result_type: BoxedValue
operands:
- object: WordSized
+ elements: WordSized
index: WordSized
- num_temps: 1
+ length: WordSized
mir_op: true
- name: LoadTypedArrayElementHoleBigInt
@@ -2941,6 +2984,11 @@
object: WordSized
num_temps: 1
+- name: GuardIsResizableTypedArray
+ operands:
+ object: WordSized
+ num_temps: 1
+
- name: GuardHasProxyHandler
operands:
object: WordSized
@@ -3069,6 +3117,13 @@
num_temps: 1
mir_op: true
+- name: GuardToEitherClass
+ result_type: WordSized
+ operands:
+ lhs: WordSized
+ num_temps: 1
+ mir_op: true
+
- name: GuardToFunction
result_type: WordSized
operands:
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index 8a28ea123c..b0007a114d 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3828,6 +3828,20 @@ void LIRGenerator::visitGetNextEntryForIterator(MGetNextEntryForIterator* ins) {
define(lir, ins);
}
+static auto SynchronizeLoad(MemoryBarrierRequirement requiresBarrier) {
+ if (requiresBarrier == MemoryBarrierRequirement::Required) {
+ return Synchronization::Load();
+ }
+ return Synchronization::None();
+}
+
+static auto SynchronizeStore(MemoryBarrierRequirement requiresBarrier) {
+ if (requiresBarrier == MemoryBarrierRequirement::Required) {
+ return Synchronization::Store();
+ }
+ return Synchronization::None();
+}
+
void LIRGenerator::visitArrayBufferByteLength(MArrayBufferByteLength* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
MOZ_ASSERT(ins->type() == MIRType::IntPtr);
@@ -3870,6 +3884,70 @@ void LIRGenerator::visitTypedArrayElementSize(MTypedArrayElementSize* ins) {
ins);
}
+void LIRGenerator::visitResizableTypedArrayLength(
+ MResizableTypedArrayLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto sync = SynchronizeLoad(ins->requiresMemoryBarrier());
+ auto* lir = new (alloc())
+ LResizableTypedArrayLength(useRegister(ins->object()), temp(), sync);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitResizableTypedArrayByteOffsetMaybeOutOfBounds(
+ MResizableTypedArrayByteOffsetMaybeOutOfBounds* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir = new (alloc()) LResizableTypedArrayByteOffsetMaybeOutOfBounds(
+ useRegister(ins->object()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitResizableDataViewByteLength(
+ MResizableDataViewByteLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto sync = SynchronizeLoad(ins->requiresMemoryBarrier());
+ auto* lir = new (alloc())
+ LResizableDataViewByteLength(useRegister(ins->object()), temp(), sync);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGrowableSharedArrayBufferByteLength(
+ MGrowableSharedArrayBufferByteLength* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::IntPtr);
+
+ auto* lir = new (alloc())
+ LGrowableSharedArrayBufferByteLength(useRegisterAtStart(ins->object()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitGuardResizableArrayBufferViewInBounds(
+ MGuardResizableArrayBufferViewInBounds* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardResizableArrayBufferViewInBounds(
+ useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
+void LIRGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
+ MGuardResizableArrayBufferViewInBoundsOrDetached* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc()) LGuardResizableArrayBufferViewInBoundsOrDetached(
+ useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
void LIRGenerator::visitGuardHasAttachedArrayBuffer(
MGuardHasAttachedArrayBuffer* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
@@ -4298,8 +4376,9 @@ void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
MOZ_ASSERT(IsNumericType(ins->type()) || ins->type() == MIRType::Boolean);
- if (Scalar::isBigIntType(ins->storageType()) &&
- ins->requiresMemoryBarrier()) {
+ auto sync = SynchronizeLoad(ins->requiresMemoryBarrier());
+
+ if (Scalar::isBigIntType(ins->storageType()) && !sync.isNone()) {
lowerAtomicLoad64(ins);
return;
}
@@ -4310,8 +4389,7 @@ void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
// NOTE: the generated code must match the assembly code in gen_load in
// GenerateAtomicOperations.py
- Synchronization sync = Synchronization::Load();
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierBefore);
add(fence, ins);
}
@@ -4338,7 +4416,7 @@ void LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) {
assignSafepoint(lir, ins);
}
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierAfter);
add(fence, ins);
}
@@ -4431,29 +4509,32 @@ void LIRGenerator::visitClampToUint8(MClampToUint8* ins) {
void LIRGenerator::visitLoadTypedArrayElementHole(
MLoadTypedArrayElementHole* ins) {
- MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+ MOZ_ASSERT(ins->length()->type() == MIRType::IntPtr);
MOZ_ASSERT(ins->type() == MIRType::Value);
- const LUse object = useRegister(ins->object());
+ const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegister(ins->index());
+ const LAllocation length = useRegister(ins->length());
if (!Scalar::isBigIntType(ins->arrayType())) {
- auto* lir = new (alloc()) LLoadTypedArrayElementHole(object, index, temp());
+ auto* lir =
+ new (alloc()) LLoadTypedArrayElementHole(elements, index, length);
if (ins->fallible()) {
assignSnapshot(lir, ins->bailoutKind());
}
defineBox(lir, ins);
} else {
#ifdef JS_CODEGEN_X86
- LDefinition tmp = LDefinition::BogusTemp();
+ LInt64Definition temp64 = LInt64Definition::BogusTemp();
#else
- LDefinition tmp = temp();
+ LInt64Definition temp64 = tempInt64();
#endif
- auto* lir = new (alloc())
- LLoadTypedArrayElementHoleBigInt(object, index, tmp, tempInt64());
+ auto* lir = new (alloc()) LLoadTypedArrayElementHoleBigInt(
+ elements, index, length, temp(), temp64);
defineBox(lir, ins);
assignSafepoint(lir, ins);
}
@@ -4474,7 +4555,9 @@ void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
MOZ_ASSERT(ins->value()->type() == MIRType::Int32);
}
- if (ins->isBigIntWrite() && ins->requiresMemoryBarrier()) {
+ auto sync = SynchronizeStore(ins->requiresMemoryBarrier());
+
+ if (ins->isBigIntWrite() && !sync.isNone()) {
lowerAtomicStore64(ins);
return;
}
@@ -4500,8 +4583,7 @@ void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
//
// NOTE: the generated code must match the assembly code in gen_store in
// GenerateAtomicOperations.py
- Synchronization sync = Synchronization::Store();
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierBefore);
add(fence, ins);
}
@@ -4511,7 +4593,7 @@ void LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) {
add(new (alloc()) LStoreUnboxedBigInt(elements, index, value, tempInt64()),
ins);
}
- if (ins->requiresMemoryBarrier()) {
+ if (!sync.isNone()) {
LMemoryBarrier* fence = new (alloc()) LMemoryBarrier(sync.barrierAfter);
add(fence, ins);
}
@@ -5154,6 +5236,17 @@ void LIRGenerator::visitGuardIsFixedLengthTypedArray(
redefine(ins, ins->object());
}
+void LIRGenerator::visitGuardIsResizableTypedArray(
+ MGuardIsResizableTypedArray* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ auto* lir = new (alloc())
+ LGuardIsResizableTypedArray(useRegister(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ add(lir, ins);
+ redefine(ins, ins->object());
+}
+
void LIRGenerator::visitGuardHasProxyHandler(MGuardHasProxyHandler* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
@@ -5694,6 +5787,15 @@ void LIRGenerator::visitGuardToClass(MGuardToClass* ins) {
defineReuseInput(lir, ins, 0);
}
+void LIRGenerator::visitGuardToEitherClass(MGuardToEitherClass* ins) {
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+ MOZ_ASSERT(ins->type() == MIRType::Object);
+ auto* lir = new (alloc())
+ LGuardToEitherClass(useRegisterAtStart(ins->object()), temp());
+ assignSnapshot(lir, ins->bailoutKind());
+ defineReuseInput(lir, ins, 0);
+}
+
void LIRGenerator::visitGuardToFunction(MGuardToFunction* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
MOZ_ASSERT(ins->type() == MIRType::Object);
@@ -7018,6 +7120,11 @@ void LIRGenerator::visitMapObjectSize(MMapObjectSize* ins) {
define(lir, ins);
}
+void LIRGenerator::visitPostIntPtrConversion(MPostIntPtrConversion* ins) {
+ // This operation is a no-op.
+ redefine(ins, ins->input());
+}
+
void LIRGenerator::visitConstant(MConstant* ins) {
if (!IsFloatingPointType(ins->type()) && ins->canEmitAtUses()) {
emitAtUses(ins);
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index dbaa73c9dd..c6daecb166 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -6365,6 +6365,81 @@ AliasSet MGuardHasAttachedArrayBuffer::getAliasSet() const {
return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot);
}
+AliasSet MResizableTypedArrayByteOffsetMaybeOutOfBounds::getAliasSet() const {
+ // Loads the byteOffset and additionally checks for detached buffers, so the
+ // alias set also has to include |ObjectFields| and |FixedSlot|.
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot);
+}
+
+AliasSet MResizableTypedArrayLength::getAliasSet() const {
+ // Loads the length and byteOffset slots, the shared-elements flag, the
+ // auto-length fixed slot, and the shared raw-buffer length.
+ auto flags = AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::SharedArrayRawBufferLength;
+
+ // When a barrier is needed make the instruction effectful by giving it a
+ // "store" effect. Also prevent reordering LoadUnboxedScalar before this
+ // instruction by including |UnboxedElement| in the alias set.
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return AliasSet::Store(flags | AliasSet::UnboxedElement);
+ }
+ return AliasSet::Load(flags);
+}
+
+bool MResizableTypedArrayLength::congruentTo(const MDefinition* ins) const {
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MResizableDataViewByteLength::getAliasSet() const {
+ // Loads the length and byteOffset slots, the shared-elements flag, the
+ // auto-length fixed slot, and the shared raw-buffer length.
+ auto flags = AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot |
+ AliasSet::SharedArrayRawBufferLength;
+
+ // When a barrier is needed make the instruction effectful by giving it a
+ // "store" effect. Also prevent reordering LoadUnboxedScalar before this
+ // instruction by including |UnboxedElement| in the alias set.
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return AliasSet::Store(flags | AliasSet::UnboxedElement);
+ }
+ return AliasSet::Load(flags);
+}
+
+bool MResizableDataViewByteLength::congruentTo(const MDefinition* ins) const {
+ if (requiresMemoryBarrier() == MemoryBarrierRequirement::Required) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
+AliasSet MGrowableSharedArrayBufferByteLength::getAliasSet() const {
+ // Requires a barrier, so make the instruction effectful by giving it a
+ // "store" effect. Also prevent reordering LoadUnboxedScalar before this
+ // instruction by including |UnboxedElement| in the alias set.
+ return AliasSet::Store(AliasSet::FixedSlot |
+ AliasSet::SharedArrayRawBufferLength |
+ AliasSet::UnboxedElement);
+}
+
+AliasSet MGuardResizableArrayBufferViewInBounds::getAliasSet() const {
+ // Additionally reads the |initialLength| and |initialByteOffset| slots, but
+ // since these can't change after construction, we don't need to track them.
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset);
+}
+
+AliasSet MGuardResizableArrayBufferViewInBoundsOrDetached::getAliasSet() const {
+ // Loads the byteOffset and additionally checks for detached buffers, so the
+ // alias set also has to include |ObjectFields| and |FixedSlot|.
+ return AliasSet::Load(AliasSet::ArrayBufferViewLengthOrOffset |
+ AliasSet::ObjectFields | AliasSet::FixedSlot);
+}
+
AliasSet MArrayPush::getAliasSet() const {
return AliasSet::Store(AliasSet::ObjectFields | AliasSet::Element);
}
@@ -6882,6 +6957,16 @@ MDefinition* MGuardToClass::foldsTo(TempAllocator& alloc) {
return object();
}
+MDefinition* MGuardToEitherClass::foldsTo(TempAllocator& alloc) {
+ const JSClass* clasp = GetObjectKnownJSClass(object());
+ if (!clasp || (getClass1() != clasp && getClass2() != clasp)) {
+ return this;
+ }
+
+ AssertKnownClass(alloc, this, object());
+ return object();
+}
+
MDefinition* MGuardToFunction::foldsTo(TempAllocator& alloc) {
if (GetObjectKnownClass(object()) != KnownClass::Function) {
return this;
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index 07701847eb..d882665a65 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -421,10 +421,13 @@ class AliasSet {
// The generation counter associated with the global object
GlobalGenerationCounter = 1 << 26,
- Last = GlobalGenerationCounter,
+ // The SharedArrayRawBuffer::length field.
+ SharedArrayRawBufferLength = 1 << 27,
+
+ Last = SharedArrayRawBufferLength,
Any = Last | (Last - 1),
- NumCategories = 27,
+ NumCategories = 28,
// Indicates load or store.
Store_ = 1 << 31
@@ -657,7 +660,13 @@ class MDefinition : public MNode {
virtual HashNumber valueHash() const;
virtual bool congruentTo(const MDefinition* ins) const { return false; }
const MDefinition* skipObjectGuards() const;
+
+ // Note that, for a call `congruentIfOperandsEqual(ins)` inside some class
+ // MFoo, if `true` is returned then we are ensured that `ins` is also an
+ // MFoo, so it is safe to do `ins->toMFoo()` without first checking whether
+ // `ins->isMFoo()`.
bool congruentIfOperandsEqual(const MDefinition* ins) const;
+
virtual MDefinition* foldsTo(TempAllocator& alloc);
virtual void analyzeEdgeCasesForward();
virtual void analyzeEdgeCasesBackward();
@@ -1277,6 +1286,35 @@ class MVariadicT : public T {
// initializes the operands_ array and must be checked for OOM.
using MVariadicInstruction = MVariadicT<MInstruction>;
+// All barriered operations:
+// - MCompareExchangeTypedArrayElement
+// - MExchangeTypedArrayElement
+// - MAtomicTypedArrayElementBinop
+// - MGrowableSharedArrayBufferByteLength
+//
+// And operations which are optionally barriered:
+// - MLoadUnboxedScalar
+// - MStoreUnboxedScalar
+// - MResizableTypedArrayLength
+// - MResizableDataViewByteLength
+//
+// Must have the following attributes:
+//
+// - Not movable
+// - Not removable
+// - Not congruent with any other instruction
+// - Effectful (they alias every TypedArray store)
+//
+// The intended effect of those constraints is to prevent all loads and stores
+// preceding the barriered operation from being moved to after the barriered
+// operation, and vice versa, and to prevent the barriered operation from being
+// removed or hoisted.
+
+enum class MemoryBarrierRequirement : bool {
+ NotRequired,
+ Required,
+};
+
MIR_OPCODE_CLASS_GENERATED
// Truncation barrier. This is intended for protecting its input against
@@ -7040,44 +7078,22 @@ class MArrayPopShift : public MUnaryInstruction,
ALLOW_CLONE(MArrayPopShift)
};
-// All barriered operations - MCompareExchangeTypedArrayElement,
-// MExchangeTypedArrayElement, and MAtomicTypedArrayElementBinop, as
-// well as MLoadUnboxedScalar and MStoreUnboxedScalar when they are
-// marked as requiring a memory barrer - have the following
-// attributes:
-//
-// - Not movable
-// - Not removable
-// - Not congruent with any other instruction
-// - Effectful (they alias every TypedArray store)
-//
-// The intended effect of those constraints is to prevent all loads
-// and stores preceding the barriered operation from being moved to
-// after the barriered operation, and vice versa, and to prevent the
-// barriered operation from being removed or hoisted.
-
-enum MemoryBarrierRequirement {
- DoesNotRequireMemoryBarrier,
- DoesRequireMemoryBarrier
-};
-
-// Also see comments at MMemoryBarrierRequirement, above.
-
// Load an unboxed scalar value from an array buffer view or other object.
class MLoadUnboxedScalar : public MBinaryInstruction,
public NoTypePolicy::Data {
int32_t offsetAdjustment_ = 0;
Scalar::Type storageType_;
- bool requiresBarrier_;
+ MemoryBarrierRequirement requiresBarrier_;
- MLoadUnboxedScalar(
- MDefinition* elements, MDefinition* index, Scalar::Type storageType,
- MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+ MLoadUnboxedScalar(MDefinition* elements, MDefinition* index,
+ Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier =
+ MemoryBarrierRequirement::NotRequired)
: MBinaryInstruction(classOpcode, elements, index),
storageType_(storageType),
- requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier) {
+ requiresBarrier_(requiresBarrier) {
setResultType(MIRType::Value);
- if (requiresBarrier_) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
setGuard(); // Not removable or movable
} else {
setMovable();
@@ -7097,7 +7113,7 @@ class MLoadUnboxedScalar : public MBinaryInstruction,
// Bailout if the result does not fit in an int32.
return storageType_ == Scalar::Uint32 && type() == MIRType::Int32;
}
- bool requiresMemoryBarrier() const { return requiresBarrier_; }
+ auto requiresMemoryBarrier() const { return requiresBarrier_; }
int32_t offsetAdjustment() const { return offsetAdjustment_; }
void setOffsetAdjustment(int32_t offsetAdjustment) {
offsetAdjustment_ = offsetAdjustment;
@@ -7105,14 +7121,14 @@ class MLoadUnboxedScalar : public MBinaryInstruction,
AliasSet getAliasSet() const override {
// When a barrier is needed make the instruction effectful by
// giving it a "store" effect.
- if (requiresBarrier_) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
return AliasSet::Store(AliasSet::UnboxedElement);
}
return AliasSet::Load(AliasSet::UnboxedElement);
}
bool congruentTo(const MDefinition* ins) const override {
- if (requiresBarrier_) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
return false;
}
if (!ins->isLoadUnboxedScalar()) {
@@ -7198,26 +7214,29 @@ class MLoadDataViewElement : public MTernaryInstruction,
};
// Load a value from a typed array. Out-of-bounds accesses are handled in-line.
-class MLoadTypedArrayElementHole : public MBinaryInstruction,
- public SingleObjectPolicy::Data {
+class MLoadTypedArrayElementHole : public MTernaryInstruction,
+ public NoTypePolicy::Data {
Scalar::Type arrayType_;
bool forceDouble_;
- MLoadTypedArrayElementHole(MDefinition* object, MDefinition* index,
- Scalar::Type arrayType, bool forceDouble)
- : MBinaryInstruction(classOpcode, object, index),
+ MLoadTypedArrayElementHole(MDefinition* elements, MDefinition* index,
+ MDefinition* length, Scalar::Type arrayType,
+ bool forceDouble)
+ : MTernaryInstruction(classOpcode, elements, index, length),
arrayType_(arrayType),
forceDouble_(forceDouble) {
setResultType(MIRType::Value);
setMovable();
+ MOZ_ASSERT(elements->type() == MIRType::Elements);
MOZ_ASSERT(index->type() == MIRType::IntPtr);
+ MOZ_ASSERT(length->type() == MIRType::IntPtr);
MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::MaxTypedArrayViewType);
}
public:
INSTRUCTION_HEADER(LoadTypedArrayElementHole)
TRIVIAL_NEW_WRAPPERS
- NAMED_OPERANDS((0, object), (1, index))
+ NAMED_OPERANDS((0, elements), (1, index), (2, length))
Scalar::Type arrayType() const { return arrayType_; }
bool forceDouble() const { return forceDouble_; }
@@ -7239,8 +7258,7 @@ class MLoadTypedArrayElementHole : public MBinaryInstruction,
return congruentIfOperandsEqual(other);
}
AliasSet getAliasSet() const override {
- return AliasSet::Load(AliasSet::UnboxedElement | AliasSet::ObjectFields |
- AliasSet::ArrayBufferViewLengthOrOffset);
+ return AliasSet::Load(AliasSet::UnboxedElement);
}
bool canProduceFloat32() const override {
return arrayType_ == Scalar::Float32;
@@ -7280,16 +7298,16 @@ class StoreUnboxedScalarBase {
class MStoreUnboxedScalar : public MTernaryInstruction,
public StoreUnboxedScalarBase,
public StoreUnboxedScalarPolicy::Data {
- bool requiresBarrier_;
+ MemoryBarrierRequirement requiresBarrier_;
- MStoreUnboxedScalar(
- MDefinition* elements, MDefinition* index, MDefinition* value,
- Scalar::Type storageType,
- MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+ MStoreUnboxedScalar(MDefinition* elements, MDefinition* index,
+ MDefinition* value, Scalar::Type storageType,
+ MemoryBarrierRequirement requiresBarrier =
+ MemoryBarrierRequirement::NotRequired)
: MTernaryInstruction(classOpcode, elements, index, value),
StoreUnboxedScalarBase(storageType),
- requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier) {
- if (requiresBarrier_) {
+ requiresBarrier_(requiresBarrier) {
+ if (requiresBarrier_ == MemoryBarrierRequirement::Required) {
setGuard(); // Not removable or movable
}
MOZ_ASSERT(elements->type() == MIRType::Elements);
@@ -7305,7 +7323,7 @@ class MStoreUnboxedScalar : public MTernaryInstruction,
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::UnboxedElement);
}
- bool requiresMemoryBarrier() const { return requiresBarrier_; }
+ auto requiresMemoryBarrier() const { return requiresBarrier_; }
TruncateKind operandTruncateKind(size_t index) const override;
bool canConsumeFloat32(MUse* use) const override {
@@ -8997,6 +9015,55 @@ class MGuardToClass : public MUnaryInstruction,
}
};
+class MGuardToEitherClass : public MUnaryInstruction,
+ public SingleObjectPolicy::Data {
+ const JSClass* class1_;
+ const JSClass* class2_;
+
+ MGuardToEitherClass(MDefinition* object, const JSClass* clasp1,
+ const JSClass* clasp2)
+ : MUnaryInstruction(classOpcode, object),
+ class1_(clasp1),
+ class2_(clasp2) {
+ MOZ_ASSERT(object->type() == MIRType::Object);
+ MOZ_ASSERT(clasp1 != clasp2, "Use MGuardToClass instead");
+ MOZ_ASSERT(!clasp1->isJSFunction(), "Use MGuardToFunction instead");
+ MOZ_ASSERT(!clasp2->isJSFunction(), "Use MGuardToFunction instead");
+ setResultType(MIRType::Object);
+ setMovable();
+
+ // We will bail out if the class type is incorrect, so we need to ensure we
+ // don't eliminate this instruction
+ setGuard();
+ }
+
+ public:
+ INSTRUCTION_HEADER(GuardToEitherClass)
+ TRIVIAL_NEW_WRAPPERS
+ NAMED_OPERANDS((0, object))
+
+ const JSClass* getClass1() const { return class1_; }
+ const JSClass* getClass2() const { return class2_; }
+
+ MDefinition* foldsTo(TempAllocator& alloc) override;
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+ bool congruentTo(const MDefinition* ins) const override {
+ if (!ins->isGuardToEitherClass()) {
+ return false;
+ }
+ const auto* other = ins->toGuardToEitherClass();
+ if (getClass1() != other->getClass1() &&
+ getClass1() != other->getClass2()) {
+ return false;
+ }
+ if (getClass2() != other->getClass1() &&
+ getClass2() != other->getClass2()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+ }
+};
+
class MGuardToFunction : public MUnaryInstruction,
public SingleObjectPolicy::Data {
explicit MGuardToFunction(MDefinition* object)
@@ -9337,6 +9404,23 @@ class MObjectToIterator : public MUnaryInstruction,
void setWantsIndices(bool value) { wantsIndices_ = value; }
};
+class MPostIntPtrConversion : public MUnaryInstruction,
+ public NoTypePolicy::Data {
+ explicit MPostIntPtrConversion(MDefinition* input)
+ : MUnaryInstruction(classOpcode, input) {
+ // Passes through the input.
+ setResultType(input->type());
+
+ // Note: Must be non-movable so we can attach a resume point.
+ }
+
+ public:
+ INSTRUCTION_HEADER(PostIntPtrConversion)
+ TRIVIAL_NEW_WRAPPERS
+
+ AliasSet getAliasSet() const override { return AliasSet::None(); }
+};
+
// Flips the input's sign bit, independently of the rest of the number's
// payload. Note this is different from multiplying by minus-one, which has
// side-effects for e.g. NaNs.
@@ -10808,6 +10892,8 @@ class MWasmReinterpret : public MUnaryInstruction, public NoTypePolicy::Data {
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
+ // No need to check type() here, because congruentIfOperandsEqual will
+ // check it.
return congruentIfOperandsEqual(ins);
}
@@ -10867,7 +10953,8 @@ class MWasmTernarySimd128 : public MTernaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ simdOp() == ins->toWasmTernarySimd128()->simdOp();
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
@@ -10908,8 +10995,8 @@ class MWasmBinarySimd128 : public MBinaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmBinarySimd128()->simdOp() == simdOp_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmBinarySimd128()->simdOp() == simdOp_;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
@@ -10945,8 +11032,8 @@ class MWasmBinarySimd128WithConstant : public MUnaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmBinarySimd128WithConstant()->simdOp() == simdOp_ &&
- congruentIfOperandsEqual(ins) &&
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmBinarySimd128WithConstant()->simdOp() == simdOp_ &&
rhs_.bitwiseEqual(ins->toWasmBinarySimd128WithConstant()->rhs());
}
@@ -10978,9 +11065,9 @@ class MWasmReplaceLaneSimd128 : public MBinaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmReplaceLaneSimd128()->simdOp() == simdOp_ &&
- ins->toWasmReplaceLaneSimd128()->laneIndex() == laneIndex_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmReplaceLaneSimd128()->simdOp() == simdOp_ &&
+ ins->toWasmReplaceLaneSimd128()->laneIndex() == laneIndex_;
}
uint32_t laneIndex() const { return laneIndex_; }
@@ -11006,8 +11093,8 @@ class MWasmScalarToSimd128 : public MUnaryInstruction,
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmScalarToSimd128()->simdOp() == simdOp_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmScalarToSimd128()->simdOp() == simdOp_;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
@@ -11036,9 +11123,9 @@ class MWasmReduceSimd128 : public MUnaryInstruction, public NoTypePolicy::Data {
AliasSet getAliasSet() const override { return AliasSet::None(); }
bool congruentTo(const MDefinition* ins) const override {
- return ins->toWasmReduceSimd128()->simdOp() == simdOp_ &&
- ins->toWasmReduceSimd128()->imm() == imm_ &&
- congruentIfOperandsEqual(ins);
+ return congruentIfOperandsEqual(ins) &&
+ ins->toWasmReduceSimd128()->simdOp() == simdOp_ &&
+ ins->toWasmReduceSimd128()->imm() == imm_;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* foldsTo(TempAllocator& alloc) override;
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
index 565c3c9c2b..7f0df52742 100644
--- a/js/src/jit/MIROps.yaml
+++ b/js/src/jit/MIROps.yaml
@@ -1331,7 +1331,6 @@
folds_to: custom
congruent_to: if_operands_equal
alias_set: none
- movable: true
can_recover: true
- name: ModuleMetadata
@@ -1492,6 +1491,54 @@
alias_set: custom
clone: true
+# Implements the TypedArrayByteOffset intrinsic for resizable typed arrays,
+# which calls TypedArrayObject::byteOffsetMaybeOutOfBounds().
+- name: ResizableTypedArrayByteOffsetMaybeOutOfBounds
+ operands:
+ object: Object
+ result_type: IntPtr
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+ compute_range: custom
+
+# Read the length of a resizable typed array.
+- name: ResizableTypedArrayLength
+ operands:
+ object: Object
+ arguments:
+ requiresMemoryBarrier: MemoryBarrierRequirement
+ result_type: IntPtr
+ # Not removable or movable when a barrier is needed.
+ guard: true
+ movable: false
+ congruent_to: custom
+ alias_set: custom
+ compute_range: custom
+
+# Read the byteLength of a resizable dataview.
+- name: ResizableDataViewByteLength
+ operands:
+ object: Object
+ arguments:
+ requiresMemoryBarrier: MemoryBarrierRequirement
+ result_type: IntPtr
+ # Not removable or movable when a barrier is needed.
+ guard: true
+ movable: false
+ congruent_to: custom
+ alias_set: custom
+ compute_range: custom
+
+# Read the byte length of a growable shared array buffer as IntPtr.
+- name: GrowableSharedArrayBufferByteLength
+ operands:
+ object: Object
+ result_type: IntPtr
+ guard: true
+ movable: false
+ alias_set: custom
+
# Return the element size of a typed array.
- name: TypedArrayElementSize
operands:
@@ -1513,6 +1560,26 @@
congruent_to: if_operands_equal
alias_set: custom
+# Guard a resizable typed array is in-bounds.
+- name: GuardResizableArrayBufferViewInBounds
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
+# Guard a resizable typed array is in-bounds or detached.
+- name: GuardResizableArrayBufferViewInBoundsOrDetached
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: custom
+
- name: GuardNumberToIntPtrIndex
gen_boilerplate: false
@@ -1951,6 +2018,15 @@
congruent_to: if_operands_equal
alias_set: none
+- name: GuardIsResizableTypedArray
+ operands:
+ object: Object
+ result_type: Object
+ guard: true
+ movable: true
+ congruent_to: if_operands_equal
+ alias_set: none
+
- name: GuardHasProxyHandler
operands:
object: Object
@@ -2510,6 +2586,9 @@
- name: GuardToClass
gen_boilerplate: false
+- name: GuardToEitherClass
+ gen_boilerplate: false
+
- name: GuardToFunction
gen_boilerplate: false
@@ -3106,6 +3185,9 @@
congruent_to: if_operands_equal
alias_set: custom
+- name: PostIntPtrConversion
+ gen_boilerplate: false
+
- name: WasmNeg
gen_boilerplate: false
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
index beba576a22..e1df31eff9 100644
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -606,9 +606,7 @@ void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
MOZ_ASSERT(obj != scratch);
MOZ_ASSERT(scratch != spectreRegToZero);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchPtr(cond, clasp, scratch, label);
if (JitOptions.spectreObjectMitigations) {
@@ -620,9 +618,7 @@ void MacroAssembler::branchTestObjClassNoSpectreMitigations(
Condition cond, Register obj, const Address& clasp, Register scratch,
Label* label) {
MOZ_ASSERT(obj != scratch);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchPtr(cond, clasp, scratch, label);
}
@@ -633,9 +629,7 @@ void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
MOZ_ASSERT(obj != scratch);
MOZ_ASSERT(scratch != spectreRegToZero);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchPtr(cond, clasp, scratch, label);
if (JitOptions.spectreObjectMitigations) {
@@ -643,20 +637,51 @@ void MacroAssembler::branchTestObjClass(Condition cond, Register obj,
}
}
-void MacroAssembler::branchTestClassIsFunction(Condition cond, Register clasp,
- Label* label) {
+void MacroAssembler::branchTestClass(
+ Condition cond, Register clasp,
+ std::pair<const JSClass*, const JSClass*> classes, Label* label) {
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
if (cond == Assembler::Equal) {
- branchPtr(Assembler::Equal, clasp, ImmPtr(&FunctionClass), label);
- branchPtr(Assembler::Equal, clasp, ImmPtr(&ExtendedFunctionClass), label);
+ branchPtr(Assembler::Equal, clasp, ImmPtr(classes.first), label);
+ branchPtr(Assembler::Equal, clasp, ImmPtr(classes.second), label);
return;
}
- Label isFunction;
- branchPtr(Assembler::Equal, clasp, ImmPtr(&FunctionClass), &isFunction);
- branchPtr(Assembler::NotEqual, clasp, ImmPtr(&ExtendedFunctionClass), label);
- bind(&isFunction);
+ Label isClass;
+ branchPtr(Assembler::Equal, clasp, ImmPtr(classes.first), &isClass);
+ branchPtr(Assembler::NotEqual, clasp, ImmPtr(classes.second), label);
+ bind(&isClass);
+}
+
+void MacroAssembler::branchTestObjClass(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Register spectreRegToZero, Label* label) {
+ MOZ_ASSERT(scratch != spectreRegToZero);
+
+ branchTestObjClassNoSpectreMitigations(cond, obj, classes, scratch, label);
+
+ if (JitOptions.spectreObjectMitigations) {
+ spectreZeroRegister(cond, scratch, spectreRegToZero);
+ }
+}
+
+void MacroAssembler::branchTestObjClassNoSpectreMitigations(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(obj != scratch);
+
+ loadObjClassUnsafe(obj, scratch);
+ branchTestClass(cond, scratch, classes, label);
+}
+
+void MacroAssembler::branchTestClassIsFunction(Condition cond, Register clasp,
+ Label* label) {
+ return branchTestClass(cond, clasp, {&FunctionClass, &ExtendedFunctionClass},
+ label);
}
void MacroAssembler::branchTestObjIsFunction(Condition cond, Register obj,
@@ -677,9 +702,7 @@ void MacroAssembler::branchTestObjIsFunctionNoSpectreMitigations(
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
MOZ_ASSERT(obj != scratch);
- loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
- loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
- loadPtr(Address(scratch, BaseShape::offsetOfClasp()), scratch);
+ loadObjClassUnsafe(obj, scratch);
branchTestClassIsFunction(cond, scratch, label);
}
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 54da676014..3b094d49dc 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2661,6 +2661,7 @@ void MacroAssembler::loadMegamorphicSetPropCache(Register dest) {
void MacroAssembler::lookupStringInAtomCacheLastLookups(Register str,
Register scratch,
+ Register output,
Label* fail) {
Label found;
@@ -2680,7 +2681,7 @@ void MacroAssembler::lookupStringInAtomCacheLastLookups(Register str,
// and jump back up to our usual atom handling code
bind(&found);
size_t atomOffset = StringToAtomCache::LastLookup::offsetOfAtom();
- loadPtr(Address(scratch, atomOffset), str);
+ loadPtr(Address(scratch, atomOffset), output);
}
void MacroAssembler::loadAtomHash(Register id, Register outHash, Label* done) {
@@ -2740,7 +2741,7 @@ void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
loadAtomHash(outId, outHash, &done);
bind(&nonAtom);
- lookupStringInAtomCacheLastLookups(outId, outHash, cacheMiss);
+ lookupStringInAtomCacheLastLookups(outId, outHash, outId, cacheMiss);
jump(&atom);
bind(&done);
@@ -3255,6 +3256,95 @@ void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj,
loadPrivate(slotAddr, output);
}
+void MacroAssembler::loadGrowableSharedArrayBufferByteLengthIntPtr(
+ Synchronization sync, Register obj, Register output) {
+ // Load the SharedArrayRawBuffer.
+ loadPrivate(Address(obj, SharedArrayBufferObject::rawBufferOffset()), output);
+
+ memoryBarrierBefore(sync);
+
+ // Load the byteLength of the SharedArrayRawBuffer into |output|.
+ static_assert(sizeof(mozilla::Atomic<size_t>) == sizeof(size_t));
+ loadPtr(Address(output, SharedArrayRawBuffer::offsetOfByteLength()), output);
+
+ memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::loadResizableArrayBufferViewLengthIntPtr(
+ ResizableArrayBufferView view, Synchronization sync, Register obj,
+ Register output, Register scratch) {
+ // Inline implementation of ArrayBufferViewObject::length(), when the input is
+ // guaranteed to be a resizable arraybuffer view object.
+
+ loadArrayBufferViewLengthIntPtr(obj, output);
+
+ Label done;
+ branchPtr(Assembler::NotEqual, output, ImmWord(0), &done);
+
+ // Load obj->elements in |scratch|.
+ loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+ // If backed by non-shared memory, detached and out-of-bounds both return
+ // zero, so we're done here.
+ branchTest32(Assembler::Zero,
+ Address(scratch, ObjectElements::offsetOfFlags()),
+ Imm32(ObjectElements::SHARED_MEMORY), &done);
+
+ // Load the auto-length slot.
+ unboxBoolean(Address(obj, ArrayBufferViewObject::autoLengthOffset()),
+ scratch);
+
+ // If non-auto length, there's nothing to do.
+ branchTest32(Assembler::Zero, scratch, scratch, &done);
+
+ // Load bufferByteLength into |output|.
+ {
+ // Resizable TypedArrays are guaranteed to have an ArrayBuffer.
+ unboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), output);
+
+ // Load the byte length from the raw-buffer of growable SharedArrayBuffers.
+ loadGrowableSharedArrayBufferByteLengthIntPtr(sync, output, output);
+ }
+
+ // Load the byteOffset into |scratch|.
+ loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
+
+ // Compute the accessible byte length |bufferByteLength - byteOffset|.
+ subPtr(scratch, output);
+
+ if (view == ResizableArrayBufferView::TypedArray) {
+ // Compute the array length from the byte length.
+ resizableTypedArrayElementShiftBy(obj, output, scratch);
+ }
+
+ bind(&done);
+}
+
+void MacroAssembler::loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(
+ Register obj, Register output, Register scratch) {
+ // Inline implementation of TypedArrayObject::byteOffsetMaybeOutOfBounds(),
+ // when the input is guaranteed to be a resizable typed array object.
+
+ loadArrayBufferViewByteOffsetIntPtr(obj, output);
+
+ // TypedArray is neither detached nor out-of-bounds when byteOffset non-zero.
+ Label done;
+ branchPtr(Assembler::NotEqual, output, ImmWord(0), &done);
+
+ // We're done when the initial byteOffset is zero.
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
+ output);
+ branchPtr(Assembler::Equal, output, ImmWord(0), &done);
+
+ // If the buffer is attached, return initialByteOffset.
+ branchIfHasAttachedArrayBuffer(obj, scratch, &done);
+
+ // Otherwise return zero to match the result for fixed-length TypedArrays.
+ movePtr(ImmWord(0), output);
+
+ bind(&done);
+}
+
void MacroAssembler::loadDOMExpandoValueGuardGeneration(
Register obj, ValueOperand output,
JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
@@ -7433,11 +7523,11 @@ void MacroAssembler::debugAssertCanonicalInt32(Register r) {
}
#endif
-void MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
+void MacroAssembler::memoryBarrierBefore(Synchronization sync) {
memoryBarrier(sync.barrierBefore);
}
-void MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
+void MacroAssembler::memoryBarrierAfter(Synchronization sync) {
memoryBarrier(sync.barrierAfter);
}
@@ -7834,6 +7924,74 @@ void MacroAssembler::typedArrayElementSize(Register obj, Register output) {
bind(&done);
}
+void MacroAssembler::resizableTypedArrayElementShiftBy(Register obj,
+ Register output,
+ Register scratch) {
+ loadObjClassUnsafe(obj, scratch);
+
+#ifdef DEBUG
+ Label invalidClass, validClass;
+ branchPtr(Assembler::Below, scratch,
+ ImmPtr(std::begin(TypedArrayObject::resizableClasses)),
+ &invalidClass);
+ branchPtr(Assembler::Below, scratch,
+ ImmPtr(std::end(TypedArrayObject::resizableClasses)), &validClass);
+ bind(&invalidClass);
+ assumeUnreachable("value isn't a valid ResizableLengthTypedArray class");
+ bind(&validClass);
+#endif
+
+ auto classForType = [](Scalar::Type type) {
+ MOZ_ASSERT(type < Scalar::MaxTypedArrayViewType);
+ return &TypedArrayObject::resizableClasses[type];
+ };
+
+ Label zero, one, two, three;
+
+ static_assert(ValidateSizeRange(Scalar::Int8, Scalar::Int16),
+ "element shift is zero in [Int8, Int16)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Int16)),
+ &zero);
+
+ static_assert(ValidateSizeRange(Scalar::Int16, Scalar::Int32),
+ "element shift is one in [Int16, Int32)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Int32)),
+ &one);
+
+ static_assert(ValidateSizeRange(Scalar::Int32, Scalar::Float64),
+ "element shift is two in [Int32, Float64)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::Float64)),
+ &two);
+
+ static_assert(ValidateSizeRange(Scalar::Float64, Scalar::Uint8Clamped),
+ "element shift is three in [Float64, Uint8Clamped)");
+ branchPtr(Assembler::Below, scratch,
+ ImmPtr(classForType(Scalar::Uint8Clamped)), &three);
+
+ static_assert(ValidateSizeRange(Scalar::Uint8Clamped, Scalar::BigInt64),
+ "element shift is zero in [Uint8Clamped, BigInt64)");
+ branchPtr(Assembler::Below, scratch, ImmPtr(classForType(Scalar::BigInt64)),
+ &zero);
+
+ static_assert(
+ ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
+ "element shift is three in [BigInt64, MaxTypedArrayViewType)");
+ // Fall through for BigInt64 and BigUint64
+
+ bind(&three);
+ rshiftPtr(Imm32(3), output);
+ jump(&zero);
+
+ bind(&two);
+ rshiftPtr(Imm32(2), output);
+ jump(&zero);
+
+ bind(&one);
+ rshiftPtr(Imm32(1), output);
+
+ bind(&zero);
+}
+
void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp,
Label* notTypedArray) {
// Inline implementation of IsTypedArrayClass().
@@ -7867,28 +8025,103 @@ void MacroAssembler::branchIfClassIsNotFixedLengthTypedArray(
notTypedArray);
}
-void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj, Register temp,
+void MacroAssembler::branchIfClassIsNotResizableTypedArray(
+ Register clasp, Label* notTypedArray) {
+ // Inline implementation of IsResizableTypedArrayClass().
+
+ const auto* firstTypedArrayClass =
+ std::begin(TypedArrayObject::resizableClasses);
+ const auto* lastTypedArrayClass =
+ std::prev(std::end(TypedArrayObject::resizableClasses));
+
+ branchPtr(Assembler::Below, clasp, ImmPtr(firstTypedArrayClass),
+ notTypedArray);
+ branchPtr(Assembler::Above, clasp, ImmPtr(lastTypedArrayClass),
+ notTypedArray);
+}
+
+void MacroAssembler::branchIfHasDetachedArrayBuffer(BranchIfDetached branchIf,
+ Register obj, Register temp,
Label* label) {
// Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
+ // TODO: The data-slot of detached views is set to undefined, which would be
+ // a faster way to detect detached buffers.
+
+ // auto cond = branchIf == BranchIfDetached::Yes ? Assembler::Equal
+ // : Assembler::NotEqual;
+ // branchTestUndefined(cond, Address(obj,
+ // ArrayBufferViewObject::dataOffset()), label);
+
+ Label done;
+ Label* ifNotDetached = branchIf == BranchIfDetached::Yes ? &done : label;
+ Condition detachedCond =
+ branchIf == BranchIfDetached::Yes ? Assembler::NonZero : Assembler::Zero;
+
// Load obj->elements in temp.
loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
// Shared buffers can't be detached.
- Label done;
branchTest32(Assembler::NonZero,
Address(temp, ObjectElements::offsetOfFlags()),
- Imm32(ObjectElements::SHARED_MEMORY), &done);
+ Imm32(ObjectElements::SHARED_MEMORY), ifNotDetached);
// An ArrayBufferView with a null/true buffer has never had its buffer
// exposed, so nothing can possibly detach it.
fallibleUnboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), temp,
- &done);
+ ifNotDetached);
- // Load the ArrayBuffer flags and branch if the detached flag is set.
+ // Load the ArrayBuffer flags and branch if the detached flag is (not) set.
unboxInt32(Address(temp, ArrayBufferObject::offsetOfFlagsSlot()), temp);
- branchTest32(Assembler::NonZero, temp, Imm32(ArrayBufferObject::DETACHED),
- label);
+ branchTest32(detachedCond, temp, Imm32(ArrayBufferObject::DETACHED), label);
+
+ if (branchIf == BranchIfDetached::Yes) {
+ bind(&done);
+ }
+}
+
+void MacroAssembler::branchIfResizableArrayBufferViewOutOfBounds(Register obj,
+ Register temp,
+ Label* label) {
+ // Implementation of ArrayBufferViewObject::isOutOfBounds().
+
+ Label done;
+
+ loadArrayBufferViewLengthIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), &done);
+
+ loadArrayBufferViewByteOffsetIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), &done);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialLengthOffset()), temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
+ temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchIfResizableArrayBufferViewInBounds(Register obj,
+ Register temp,
+ Label* label) {
+ // Implementation of ArrayBufferViewObject::isOutOfBounds().
+
+ Label done;
+
+ loadArrayBufferViewLengthIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ loadArrayBufferViewByteOffsetIntPtr(obj, temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), label);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialLengthOffset()), temp);
+ branchPtr(Assembler::NotEqual, temp, ImmWord(0), &done);
+
+ loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
+ temp);
+ branchPtr(Assembler::Equal, temp, ImmWord(0), label);
bind(&done);
}
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 43974a6ccc..361de3ac5f 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -13,6 +13,8 @@
#include "mozilla/Maybe.h"
#include "mozilla/Variant.h"
+#include <utility>
+
#if defined(JS_CODEGEN_X86)
# include "jit/x86/MacroAssembler-x86.h"
#elif defined(JS_CODEGEN_X64)
@@ -1784,6 +1786,21 @@ class MacroAssembler : public MacroAssemblerSpecific {
Register scratch, Register spectreRegToZero,
Label* label);
+ private:
+ inline void branchTestClass(Condition cond, Register clasp,
+ std::pair<const JSClass*, const JSClass*> classes,
+ Label* label);
+
+ public:
+ inline void branchTestObjClass(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Register spectreRegToZero, Label* label);
+ inline void branchTestObjClassNoSpectreMitigations(
+ Condition cond, Register obj,
+ std::pair<const JSClass*, const JSClass*> classes, Register scratch,
+ Label* label);
+
inline void branchTestObjShape(Condition cond, Register obj,
const Shape* shape, Register scratch,
Register spectreRegToZero, Label* label);
@@ -4191,23 +4208,23 @@ class MacroAssembler : public MacroAssemblerSpecific {
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void compareExchange(Scalar::Type type, const Synchronization& sync,
+ void compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
@@ -4218,12 +4235,12 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM: Registers must be distinct; `replacement` and `output` must be
// (even,odd) pairs.
- void compareExchange64(const Synchronization& sync, const Address& mem,
+ void compareExchange64(Synchronization sync, const Address& mem,
Register64 expected, Register64 replacement,
Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
- void compareExchange64(const Synchronization& sync, const BaseIndex& mem,
+ void compareExchange64(Synchronization sync, const BaseIndex& mem,
Register64 expected, Register64 replacement,
Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
@@ -4232,20 +4249,20 @@ class MacroAssembler : public MacroAssemblerSpecific {
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value, Register output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicExchange(Scalar::Type type, const Synchronization& sync,
+ void atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared, loong64, riscv64);
@@ -4254,11 +4271,11 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM: `value` and `output` must be distinct and (even,odd) pairs.
// ARM64: `value` and `output` must be distinct.
- void atomicExchange64(const Synchronization& sync, const Address& mem,
+ void atomicExchange64(Synchronization sync, const Address& mem,
Register64 value, Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
- void atomicExchange64(const Synchronization& sync, const BaseIndex& mem,
+ void atomicExchange64(Synchronization sync, const BaseIndex& mem,
Register64 value, Register64 output)
DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
@@ -4275,33 +4292,31 @@ class MacroAssembler : public MacroAssemblerSpecific {
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations; `value` and `output` must differ.
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const Address& mem,
- Register temp, Register output)
- DEFINED_ON(arm, arm64, x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const Address& mem, Register temp,
+ Register output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Imm32 value, const Address& mem,
- Register temp, Register output) DEFINED_ON(x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Imm32 value, const Address& mem, Register temp,
+ Register output) DEFINED_ON(x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const BaseIndex& mem,
- Register temp, Register output)
- DEFINED_ON(arm, arm64, x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const BaseIndex& mem, Register temp,
+ Register output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Imm32 value, const BaseIndex& mem,
- Register temp, Register output) DEFINED_ON(x86_shared);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Imm32 value, const BaseIndex& mem, Register temp,
+ Register output) DEFINED_ON(x86_shared);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const Address& mem,
- Register valueTemp, Register offsetTemp, Register maskTemp,
- Register output) DEFINED_ON(mips_shared, loong64, riscv64);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
- AtomicOp op, Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp, Register maskTemp,
- Register output) DEFINED_ON(mips_shared, loong64, riscv64);
+ void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
+ Register value, const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ DEFINED_ON(mips_shared, loong64, riscv64);
// x86:
// `temp` must be ecx:ebx; `output` must be edx:eax.
@@ -4313,23 +4328,21 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM64:
// Registers `value`, `temp`, and `output` must all differ.
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const Address& mem, Register64 temp,
- Register64 output)
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp, Register64 output)
DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- const Address& value, const Address& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, const Address& value,
+ const Address& mem, Register64 temp, Register64 output)
+ DEFINED_ON(x86);
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const BaseIndex& mem, Register64 temp,
- Register64 output)
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp, Register64 output)
DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
- void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
- const Address& value, const BaseIndex& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicFetchOp64(Synchronization sync, AtomicOp op, const Address& value,
+ const BaseIndex& mem, Register64 temp, Register64 output)
+ DEFINED_ON(x86);
// x64:
// `value` can be any register.
@@ -4338,18 +4351,18 @@ class MacroAssembler : public MacroAssemblerSpecific {
// ARM64:
// Registers `value` and `temp` must differ.
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const Address& mem) DEFINED_ON(x64);
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const Address& mem) DEFINED_ON(x64);
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const Address& mem, Register64 temp)
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp)
DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const BaseIndex& mem) DEFINED_ON(x64);
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const BaseIndex& mem) DEFINED_ON(x64);
- void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
- Register64 value, const BaseIndex& mem, Register64 temp)
+ void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp)
DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
// 64-bit atomic load. On 64-bit systems, use regular load with
@@ -4358,16 +4371,16 @@ class MacroAssembler : public MacroAssemblerSpecific {
// x86: `temp` must be ecx:ebx; `output` must be edx:eax.
// ARM: `output` must be (even,odd) pair.
- void atomicLoad64(const Synchronization& sync, const Address& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicLoad64(Synchronization sync, const Address& mem, Register64 temp,
+ Register64 output) DEFINED_ON(x86);
- void atomicLoad64(const Synchronization& sync, const BaseIndex& mem,
- Register64 temp, Register64 output) DEFINED_ON(x86);
+ void atomicLoad64(Synchronization sync, const BaseIndex& mem, Register64 temp,
+ Register64 output) DEFINED_ON(x86);
- void atomicLoad64(const Synchronization& sync, const Address& mem,
- Register64 output) DEFINED_ON(arm);
+ void atomicLoad64(Synchronization sync, const Address& mem, Register64 output)
+ DEFINED_ON(arm);
- void atomicLoad64(const Synchronization& sync, const BaseIndex& mem,
+ void atomicLoad64(Synchronization sync, const BaseIndex& mem,
Register64 output) DEFINED_ON(arm);
// 64-bit atomic store. On 64-bit systems, use regular store with
@@ -4376,10 +4389,10 @@ class MacroAssembler : public MacroAssemblerSpecific {
// x86: `value` must be ecx:ebx; `temp` must be edx:eax.
// ARM: `value` and `temp` must be (even,odd) pairs.
- void atomicStore64(const Synchronization& sync, const Address& mem,
- Register64 value, Register64 temp) DEFINED_ON(x86, arm);
+ void atomicStore64(Synchronization sync, const Address& mem, Register64 value,
+ Register64 temp) DEFINED_ON(x86, arm);
- void atomicStore64(const Synchronization& sync, const BaseIndex& mem,
+ void atomicStore64(Synchronization sync, const BaseIndex& mem,
Register64 value, Register64 temp) DEFINED_ON(x86, arm);
// ========================================================================
@@ -4594,105 +4607,105 @@ class MacroAssembler : public MacroAssemblerSpecific {
// For additional register constraints, see the primitive 32-bit operations
// and/or wasm operations above.
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register expected,
Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register value, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register value, Register temp,
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const Address& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const Address& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output)
DEFINED_ON(x86_shared);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register temp) DEFINED_ON(arm, arm64, x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register temp) DEFINED_ON(arm, arm64, x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const Address& mem,
Register temp) DEFINED_ON(x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Imm32 value, const BaseIndex& mem,
Register temp) DEFINED_ON(x86_shared);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp)
DEFINED_ON(mips_shared, loong64, riscv64);
- void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
+ void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
AtomicOp op, Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp)
@@ -5245,8 +5258,8 @@ class MacroAssembler : public MacroAssemblerSpecific {
void storeToTypedBigIntArray(Scalar::Type arrayType, Register64 value,
const Address& dest);
- void memoryBarrierBefore(const Synchronization& sync);
- void memoryBarrierAfter(const Synchronization& sync);
+ void memoryBarrierBefore(Synchronization sync);
+ void memoryBarrierAfter(Synchronization sync);
void debugAssertIsObject(const ValueOperand& val);
void debugAssertObjHasFixedSlots(Register obj, Register scratch);
@@ -5284,12 +5297,41 @@ class MacroAssembler : public MacroAssemblerSpecific {
Label* label);
void typedArrayElementSize(Register obj, Register output);
+
+ private:
+ // Shift |output| by the element shift of the ResizableTypedArray in |obj|.
+ void resizableTypedArrayElementShiftBy(Register obj, Register output,
+ Register scratch);
+
+ public:
void branchIfClassIsNotTypedArray(Register clasp, Label* notTypedArray);
void branchIfClassIsNotFixedLengthTypedArray(Register clasp,
Label* notTypedArray);
+ void branchIfClassIsNotResizableTypedArray(Register clasp,
+ Label* notTypedArray);
+
+ private:
+ enum class BranchIfDetached { No, Yes };
+
+ void branchIfHasDetachedArrayBuffer(BranchIfDetached branchIf, Register obj,
+ Register temp, Label* label);
+ public:
void branchIfHasDetachedArrayBuffer(Register obj, Register temp,
- Label* label);
+ Label* label) {
+ branchIfHasDetachedArrayBuffer(BranchIfDetached::Yes, obj, temp, label);
+ }
+
+ void branchIfHasAttachedArrayBuffer(Register obj, Register temp,
+ Label* label) {
+ branchIfHasDetachedArrayBuffer(BranchIfDetached::No, obj, temp, label);
+ }
+
+ void branchIfResizableArrayBufferViewOutOfBounds(Register obj, Register temp,
+ Label* label);
+
+ void branchIfResizableArrayBufferViewInBounds(Register obj, Register temp,
+ Label* label);
void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
void branchNativeIteratorIndices(Condition cond, Register ni, Register temp,
@@ -5560,7 +5602,7 @@ class MacroAssembler : public MacroAssemblerSpecific {
void loadMegamorphicCache(Register dest);
void lookupStringInAtomCacheLastLookups(Register str, Register scratch,
- Label* fail);
+ Register output, Label* fail);
void loadMegamorphicSetPropCache(Register dest);
void loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
@@ -5627,6 +5669,35 @@ class MacroAssembler : public MacroAssemblerSpecific {
void loadArrayBufferViewByteOffsetIntPtr(Register obj, Register output);
void loadArrayBufferViewLengthIntPtr(Register obj, Register output);
+ void loadGrowableSharedArrayBufferByteLengthIntPtr(Synchronization sync,
+ Register obj,
+ Register output);
+
+ private:
+ enum class ResizableArrayBufferView { TypedArray, DataView };
+
+ void loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView view,
+ Synchronization sync,
+ Register obj, Register output,
+ Register scratch);
+
+ public:
+ void loadResizableTypedArrayLengthIntPtr(Synchronization sync, Register obj,
+ Register output, Register scratch) {
+ loadResizableArrayBufferViewLengthIntPtr(
+ ResizableArrayBufferView::TypedArray, sync, obj, output, scratch);
+ }
+
+ void loadResizableDataViewByteLengthIntPtr(Synchronization sync, Register obj,
+ Register output,
+ Register scratch) {
+ loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView::DataView,
+ sync, obj, output, scratch);
+ }
+
+ void loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(
+ Register obj, Register output, Register scratch);
+
private:
void isCallableOrConstructor(bool isCallable, Register obj, Register output,
Label* isProxy);
diff --git a/js/src/jit/PcScriptCache.h b/js/src/jit/PcScriptCache.h
deleted file mode 100644
index c83c479c85..0000000000
--- a/js/src/jit/PcScriptCache.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- * vim: set ts=8 sts=2 et sw=2 tw=80:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_PcScriptCache_h
-#define jit_PcScriptCache_h
-
-#include "mozilla/Array.h"
-#include "js/TypeDecls.h"
-#include "vm/Runtime.h"
-
-// Defines a fixed-size hash table solely for the purpose of caching
-// jit::GetPcScript(). One cache is attached to each JSRuntime; it functions as
-// if cleared on GC.
-
-namespace js {
-namespace jit {
-
-struct PcScriptCacheEntry {
- uint8_t* returnAddress; // Key into the hash table.
- jsbytecode* pc; // Cached PC.
- JSScript* script; // Cached script.
-};
-
-struct PcScriptCache {
- private:
- static const uint32_t Length = 73;
-
- // GC number at the time the cache was filled or created.
- // Storing and checking against this number allows us to not bother
- // clearing this cache on every GC -- only when actually necessary.
- uint64_t gcNumber;
-
- // List of cache entries.
- mozilla::Array<PcScriptCacheEntry, Length> entries;
-
- public:
- explicit PcScriptCache(uint64_t gcNumber) { clear(gcNumber); }
-
- void clear(uint64_t gcNumber) {
- for (uint32_t i = 0; i < Length; i++) {
- entries[i].returnAddress = nullptr;
- }
- this->gcNumber = gcNumber;
- }
-
- // Get a value from the cache. May perform lazy allocation.
- [[nodiscard]] bool get(JSRuntime* rt, uint32_t hash, uint8_t* addr,
- JSScript** scriptRes, jsbytecode** pcRes) {
- // If a GC occurred, lazily clear the cache now.
- if (gcNumber != rt->gc.gcNumber()) {
- clear(rt->gc.gcNumber());
- return false;
- }
-
- if (entries[hash].returnAddress != addr) {
- return false;
- }
-
- *scriptRes = entries[hash].script;
- if (pcRes) {
- *pcRes = entries[hash].pc;
- }
-
- return true;
- }
-
- void add(uint32_t hash, uint8_t* addr, jsbytecode* pc, JSScript* script) {
- MOZ_ASSERT(addr);
- MOZ_ASSERT(pc);
- MOZ_ASSERT(script);
- entries[hash].returnAddress = addr;
- entries[hash].pc = pc;
- entries[hash].script = script;
- }
-
- static uint32_t Hash(uint8_t* addr) {
- uint32_t key = (uint32_t)((uintptr_t)addr);
- return ((key >> 3) * 2654435761u) % Length;
- }
-};
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_PcScriptCache_h */
diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
index 4ed15daabb..bd8380a690 100644
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -1802,6 +1802,25 @@ void MArrayBufferViewByteOffset::computeRange(TempAllocator& alloc) {
}
}
+void MResizableTypedArrayByteOffsetMaybeOutOfBounds::computeRange(
+ TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
+void MResizableTypedArrayLength::computeRange(TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
+void MResizableDataViewByteLength::computeRange(TempAllocator& alloc) {
+ if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
+ setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
+ }
+}
+
void MTypedArrayElementSize::computeRange(TempAllocator& alloc) {
constexpr auto MaxTypedArraySize = sizeof(double);
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
index e70722bffe..220ffe7bb2 100644
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -11,6 +11,7 @@
#include "builtin/Object.h"
#include "builtin/RegExp.h"
#include "builtin/String.h"
+#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/CompileInfo.h"
#include "jit/Ion.h"
@@ -2013,15 +2014,11 @@ bool MAtomicIsLockFree::writeRecoverData(CompactBufferWriter& writer) const {
RAtomicIsLockFree::RAtomicIsLockFree(CompactBufferReader& reader) {}
bool RAtomicIsLockFree::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
+ Value operand = iter.read();
MOZ_ASSERT(operand.isInt32());
- int32_t result;
- if (!js::AtomicIsLockFree(cx, operand, &result)) {
- return false;
- }
-
- iter.storeInstructionResult(Int32Value(result));
+ bool result = AtomicOperations::isLockfreeJS(operand.toInt32());
+ iter.storeInstructionResult(BooleanValue(result));
return true;
}
diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h
index 1ae9c1954c..43e94e6bff 100644
--- a/js/src/jit/Registers.h
+++ b/js/src/jit/Registers.h
@@ -205,6 +205,7 @@ struct Register64 {
return high != other.high || low != other.low;
}
Register scratchReg() { return high; }
+ Register secondScratchReg() { return low; }
static Register64 Invalid() {
return Register64(Register::Invalid(), Register::Invalid());
}
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
index 29777d08c7..ed3f63c88c 100644
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -2557,13 +2557,14 @@ BigInt* BigIntAsUintN(JSContext* cx, HandleBigInt x, int32_t bits) {
}
template <typename T>
-static int32_t AtomicsCompareExchange(FixedLengthTypedArrayObject* typedArray,
+static int32_t AtomicsCompareExchange(TypedArrayObject* typedArray,
size_t index, int32_t expected,
int32_t replacement) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::compareExchangeSeqCst(addr + index, T(expected),
@@ -2590,12 +2591,13 @@ AtomicsCompareExchangeFn AtomicsCompareExchange(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsExchange(FixedLengthTypedArrayObject* typedArray,
- size_t index, int32_t value) {
+static int32_t AtomicsExchange(TypedArrayObject* typedArray, size_t index,
+ int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::exchangeSeqCst(addr + index, T(value));
@@ -2621,12 +2623,13 @@ AtomicsReadWriteModifyFn AtomicsExchange(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsAdd(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsAdd(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchAddSeqCst(addr + index, T(value));
@@ -2652,12 +2655,13 @@ AtomicsReadWriteModifyFn AtomicsAdd(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsSub(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsSub(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchSubSeqCst(addr + index, T(value));
@@ -2683,12 +2687,13 @@ AtomicsReadWriteModifyFn AtomicsSub(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsAnd(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsAnd(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchAndSeqCst(addr + index, T(value));
@@ -2714,12 +2719,13 @@ AtomicsReadWriteModifyFn AtomicsAnd(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsOr(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsOr(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchOrSeqCst(addr + index, T(value));
@@ -2745,12 +2751,13 @@ AtomicsReadWriteModifyFn AtomicsOr(Scalar::Type elementType) {
}
template <typename T>
-static int32_t AtomicsXor(FixedLengthTypedArrayObject* typedArray, size_t index,
+static int32_t AtomicsXor(TypedArrayObject* typedArray, size_t index,
int32_t value) {
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
SharedMem<T*> addr = typedArray->dataPointerEither().cast<T*>();
return jit::AtomicOperations::fetchXorSeqCst(addr + index, T(value));
@@ -2776,12 +2783,12 @@ AtomicsReadWriteModifyFn AtomicsXor(Scalar::Type elementType) {
}
template <typename AtomicOp, typename... Args>
-static BigInt* AtomicAccess64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray,
+static BigInt* AtomicAccess64(JSContext* cx, TypedArrayObject* typedArray,
size_t index, AtomicOp op, Args... args) {
MOZ_ASSERT(Scalar::isBigIntType(typedArray->type()));
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
if (typedArray->type() == Scalar::BigInt64) {
SharedMem<int64_t*> addr = typedArray->dataPointerEither().cast<int64_t*>();
@@ -2795,11 +2802,12 @@ static BigInt* AtomicAccess64(JSContext* cx,
}
template <typename AtomicOp, typename... Args>
-static auto AtomicAccess64(FixedLengthTypedArrayObject* typedArray,
- size_t index, AtomicOp op, Args... args) {
+static auto AtomicAccess64(TypedArrayObject* typedArray, size_t index,
+ AtomicOp op, Args... args) {
MOZ_ASSERT(Scalar::isBigIntType(typedArray->type()));
MOZ_ASSERT(!typedArray->hasDetachedBuffer());
- MOZ_ASSERT(index < typedArray->length());
+ MOZ_ASSERT_IF(typedArray->hasResizableBuffer(), !typedArray->isOutOfBounds());
+ MOZ_ASSERT(index < typedArray->length().valueOr(0));
if (typedArray->type() == Scalar::BigInt64) {
SharedMem<int64_t*> addr = typedArray->dataPointerEither().cast<int64_t*>();
@@ -2810,14 +2818,14 @@ static auto AtomicAccess64(FixedLengthTypedArrayObject* typedArray,
return op(addr + index, BigInt::toUint64(args)...);
}
-BigInt* AtomicsLoad64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsLoad64(JSContext* cx, TypedArrayObject* typedArray,
size_t index) {
return AtomicAccess64(cx, typedArray, index, [](auto addr) {
return jit::AtomicOperations::loadSeqCst(addr);
});
}
-void AtomicsStore64(FixedLengthTypedArrayObject* typedArray, size_t index,
+void AtomicsStore64(TypedArrayObject* typedArray, size_t index,
const BigInt* value) {
AutoUnsafeCallWithABI unsafe;
@@ -2829,8 +2837,7 @@ void AtomicsStore64(FixedLengthTypedArrayObject* typedArray, size_t index,
value);
}
-BigInt* AtomicsCompareExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsCompareExchange64(JSContext* cx, TypedArrayObject* typedArray,
size_t index, const BigInt* expected,
const BigInt* replacement) {
return AtomicAccess64(
@@ -2842,9 +2849,8 @@ BigInt* AtomicsCompareExchange64(JSContext* cx,
expected, replacement);
}
-BigInt* AtomicsExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray, size_t index,
- const BigInt* value) {
+BigInt* AtomicsExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2853,8 +2859,8 @@ BigInt* AtomicsExchange64(JSContext* cx,
value);
}
-BigInt* AtomicsAdd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsAdd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2863,8 +2869,8 @@ BigInt* AtomicsAdd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsAnd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsAnd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2873,8 +2879,8 @@ BigInt* AtomicsAnd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsOr64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsOr64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2883,8 +2889,8 @@ BigInt* AtomicsOr64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsSub64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsSub64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
@@ -2893,8 +2899,8 @@ BigInt* AtomicsSub64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
value);
}
-BigInt* AtomicsXor64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value) {
+BigInt* AtomicsXor64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value) {
return AtomicAccess64(
cx, typedArray, index,
[](auto addr, auto val) {
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
index cfce36caaa..a68dd8279f 100644
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -27,7 +27,6 @@ namespace js {
class AbstractGeneratorObject;
class ArrayObject;
-class FixedLengthTypedArrayObject;
class GlobalObject;
class InterpreterFrame;
class LexicalScope;
@@ -640,11 +639,11 @@ bool StringBigIntCompare(JSContext* cx, HandleString x, HandleBigInt y,
BigInt* BigIntAsIntN(JSContext* cx, HandleBigInt x, int32_t bits);
BigInt* BigIntAsUintN(JSContext* cx, HandleBigInt x, int32_t bits);
-using AtomicsCompareExchangeFn = int32_t (*)(FixedLengthTypedArrayObject*,
- size_t, int32_t, int32_t);
+using AtomicsCompareExchangeFn = int32_t (*)(TypedArrayObject*, size_t, int32_t,
+ int32_t);
-using AtomicsReadWriteModifyFn = int32_t (*)(FixedLengthTypedArrayObject*,
- size_t, int32_t);
+using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
+ int32_t);
AtomicsCompareExchangeFn AtomicsCompareExchange(Scalar::Type elementType);
AtomicsReadWriteModifyFn AtomicsExchange(Scalar::Type elementType);
@@ -654,31 +653,29 @@ AtomicsReadWriteModifyFn AtomicsAnd(Scalar::Type elementType);
AtomicsReadWriteModifyFn AtomicsOr(Scalar::Type elementType);
AtomicsReadWriteModifyFn AtomicsXor(Scalar::Type elementType);
-BigInt* AtomicsLoad64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsLoad64(JSContext* cx, TypedArrayObject* typedArray,
size_t index);
-void AtomicsStore64(FixedLengthTypedArrayObject* typedArray, size_t index,
+void AtomicsStore64(TypedArrayObject* typedArray, size_t index,
const BigInt* value);
-BigInt* AtomicsCompareExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray,
+BigInt* AtomicsCompareExchange64(JSContext* cx, TypedArrayObject* typedArray,
size_t index, const BigInt* expected,
const BigInt* replacement);
-BigInt* AtomicsExchange64(JSContext* cx,
- FixedLengthTypedArrayObject* typedArray, size_t index,
- const BigInt* value);
-
-BigInt* AtomicsAdd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsAnd64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsOr64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsSub64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
-BigInt* AtomicsXor64(JSContext* cx, FixedLengthTypedArrayObject* typedArray,
- size_t index, const BigInt* value);
+BigInt* AtomicsExchange64(JSContext* cx, TypedArrayObject* typedArray,
+ size_t index, const BigInt* value);
+
+BigInt* AtomicsAdd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsAnd64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsOr64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsSub64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
+BigInt* AtomicsXor64(JSContext* cx, TypedArrayObject* typedArray, size_t index,
+ const BigInt* value);
JSAtom* AtomizeStringNoGC(JSContext* cx, JSString* str);
diff --git a/js/src/jit/WarpBuilderShared.cpp b/js/src/jit/WarpBuilderShared.cpp
index 89e93d6150..e04984690c 100644
--- a/js/src/jit/WarpBuilderShared.cpp
+++ b/js/src/jit/WarpBuilderShared.cpp
@@ -22,9 +22,12 @@ WarpBuilderShared::WarpBuilderShared(WarpSnapshot& snapshot,
bool WarpBuilderShared::resumeAfter(MInstruction* ins, BytecodeLocation loc) {
// resumeAfter should only be used with effectful instructions. The only
- // exception is MInt64ToBigInt, it's used to convert the result of a call into
- // Wasm code so we attach the resume point to that instead of to the call.
- MOZ_ASSERT(ins->isEffectful() || ins->isInt64ToBigInt());
+ // exceptions are:
+ // 1. MInt64ToBigInt, which is used to convert the result of a call into Wasm
+ // code so we attach the resume point to that instead of to the call.
+ // 2. MPostIntPtrConversion which is used after conversion from IntPtr.
+ MOZ_ASSERT(ins->isEffectful() || ins->isInt64ToBigInt() ||
+ ins->isPostIntPtrConversion());
MOZ_ASSERT(!ins->isMovable());
MResumePoint* resumePoint = MResumePoint::New(
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
index 7654232ecd..9a99e0f5c3 100644
--- a/js/src/jit/WarpCacheIRTranspiler.cpp
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -11,8 +11,6 @@
#include "jsmath.h"
-#include "builtin/DataViewObject.h"
-#include "builtin/MapObject.h"
#include "jit/AtomicOp.h"
#include "jit/CacheIR.h"
#include "jit/CacheIRCompiler.h"
@@ -26,7 +24,6 @@
#include "jit/WarpBuilderShared.h"
#include "jit/WarpSnapshot.h"
#include "js/ScalarType.h" // js::Scalar::Type
-#include "vm/ArgumentsObject.h"
#include "vm/BytecodeLocation.h"
#include "wasm/WasmCode.h"
@@ -52,7 +49,8 @@ class MOZ_RAII WarpCacheIRTranspiler : public WarpBuilderShared {
// Array mapping call arguments to OperandId.
using ArgumentKindArray =
- mozilla::EnumeratedArray<ArgumentKind, ArgumentKind::NumKinds, OperandId>;
+ mozilla::EnumeratedArray<ArgumentKind, OperandId,
+ size_t(ArgumentKind::NumKinds)>;
ArgumentKindArray argumentOperandIds_;
void setArgumentId(ArgumentKind kind, OperandId id) {
@@ -255,14 +253,20 @@ class MOZ_RAII WarpCacheIRTranspiler : public WarpBuilderShared {
ObjOperandId objId, uint32_t offsetOffset,
ValOperandId rhsId, uint32_t newShapeOffset);
- void addDataViewData(MDefinition* obj, Scalar::Type type,
- MDefinition** offset, MInstruction** elements);
+ MInstruction* emitTypedArrayLength(ArrayBufferViewKind viewKind,
+ MDefinition* obj);
- [[nodiscard]] bool emitAtomicsBinaryOp(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect, AtomicOp op);
+ MInstruction* emitDataViewLength(ArrayBufferViewKind viewKind,
+ MDefinition* obj);
+
+ void addDataViewData(ArrayBufferViewKind viewKind, MDefinition* obj,
+ Scalar::Type type, MDefinition** offset,
+ MInstruction** elements);
+
+ [[nodiscard]] bool emitAtomicsBinaryOp(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind,
+ AtomicOp op);
[[nodiscard]] bool emitLoadArgumentSlot(ValOperandId resultId,
uint32_t slotIndex);
@@ -347,9 +351,14 @@ bool WarpCacheIRTranspiler::transpile(
// Effectful instructions should have a resume point. MIonToWasmCall is an
// exception: we can attach the resume point to the MInt64ToBigInt instruction
- // instead.
+ // instead. Other exceptions are MResizableTypedArrayLength and
+ // MResizableDataViewByteLength, and MGrowableSharedArrayBufferByteLength,
+ // which add the resume point to MPostIntPtrConversion.
MOZ_ASSERT_IF(effectful_,
- effectful_->resumePoint() || effectful_->isIonToWasmCall());
+ effectful_->resumePoint() || effectful_->isIonToWasmCall() ||
+ effectful_->isResizableTypedArrayLength() ||
+ effectful_->isResizableDataViewByteLength() ||
+ effectful_->isGrowableSharedArrayBufferByteLength());
return true;
}
@@ -385,31 +394,44 @@ bool WarpCacheIRTranspiler::emitGuardClass(ObjOperandId objId,
return true;
}
+bool WarpCacheIRTranspiler::emitGuardEitherClass(ObjOperandId objId,
+ GuardClassKind kind1,
+ GuardClassKind kind2) {
+ MDefinition* def = getOperand(objId);
+
+ // We don't yet need this case, so it's unsupported for now.
+ MOZ_ASSERT(kind1 != GuardClassKind::JSFunction &&
+ kind2 != GuardClassKind::JSFunction);
+
+ const JSClass* classp1 = classForGuardClassKind(kind1);
+ const JSClass* classp2 = classForGuardClassKind(kind2);
+ auto* ins = MGuardToEitherClass::New(alloc(), def, classp1, classp2);
+
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
const JSClass* WarpCacheIRTranspiler::classForGuardClassKind(
GuardClassKind kind) {
switch (kind) {
case GuardClassKind::Array:
- return &ArrayObject::class_;
case GuardClassKind::PlainObject:
- return &PlainObject::class_;
case GuardClassKind::FixedLengthArrayBuffer:
- return &FixedLengthArrayBufferObject::class_;
+ case GuardClassKind::ResizableArrayBuffer:
case GuardClassKind::FixedLengthSharedArrayBuffer:
- return &FixedLengthSharedArrayBufferObject::class_;
+ case GuardClassKind::GrowableSharedArrayBuffer:
case GuardClassKind::FixedLengthDataView:
- return &FixedLengthDataViewObject::class_;
+ case GuardClassKind::ResizableDataView:
case GuardClassKind::MappedArguments:
- return &MappedArgumentsObject::class_;
case GuardClassKind::UnmappedArguments:
- return &UnmappedArgumentsObject::class_;
- case GuardClassKind::WindowProxy:
- return mirGen().runtime->maybeWindowProxyClass();
case GuardClassKind::Set:
- return &SetObject::class_;
case GuardClassKind::Map:
- return &MapObject::class_;
case GuardClassKind::BoundFunction:
- return &BoundFunctionObject::class_;
+ return ClassFor(kind);
+ case GuardClassKind::WindowProxy:
+ return mirGen().runtime->maybeWindowProxyClass();
case GuardClassKind::JSFunction:
break;
}
@@ -830,6 +852,16 @@ bool WarpCacheIRTranspiler::emitGuardIsFixedLengthTypedArray(
return true;
}
+bool WarpCacheIRTranspiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardIsResizableTypedArray::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
bool WarpCacheIRTranspiler::emitGuardHasProxyHandler(ObjOperandId objId,
uint32_t handlerOffset) {
MDefinition* obj = getOperand(objId);
@@ -2121,13 +2153,35 @@ bool WarpCacheIRTranspiler::emitCallObjectHasSparseElementResult(
return true;
}
+MInstruction* WarpCacheIRTranspiler::emitTypedArrayLength(
+ ArrayBufferViewKind viewKind, MDefinition* obj) {
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ return length;
+ }
+
+ // Bounds check doesn't require a memory barrier. See IsValidIntegerIndex
+ // abstract operation which reads the underlying buffer byte length using
+ // "unordered" memory order.
+ auto barrier = MemoryBarrierRequirement::NotRequired;
+
+ // Movable and removable because no memory barrier is needed.
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ length->setMovable();
+ length->setNotGuard();
+ add(length);
+
+ return length;
+}
+
bool WarpCacheIRTranspiler::emitLoadTypedArrayElementExistsResult(
- ObjOperandId objId, IntPtrOperandId indexId) {
+ ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
// Unsigned comparison to catch negative indices.
auto* ins = MCompare::New(alloc(), index, length, JSOp::Lt,
@@ -2165,27 +2219,29 @@ static MIRType MIRTypeForArrayBufferViewRead(Scalar::Type arrayType,
bool WarpCacheIRTranspiler::emitLoadTypedArrayElementResult(
ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
- bool handleOOB, bool forceDoubleForUint32) {
+ bool handleOOB, bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
+ auto* length = emitTypedArrayLength(viewKind, obj);
+
+ if (!handleOOB) {
+ // MLoadTypedArrayElementHole does the bounds checking.
+ index = addBoundsCheck(index, length);
+ }
+
+ auto* elements = MArrayBufferViewElements::New(alloc(), obj);
+ add(elements);
+
if (handleOOB) {
auto* load = MLoadTypedArrayElementHole::New(
- alloc(), obj, index, elementType, forceDoubleForUint32);
+ alloc(), elements, index, length, elementType, forceDoubleForUint32);
add(load);
pushResult(load);
return true;
}
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
-
- index = addBoundsCheck(index, length);
-
- auto* elements = MArrayBufferViewElements::New(alloc(), obj);
- add(elements);
-
auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType);
load->setResultType(
MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32));
@@ -2717,17 +2773,14 @@ bool WarpCacheIRTranspiler::emitStoreDenseElementHole(ObjOperandId objId,
return resumeAfter(store);
}
-bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(ObjOperandId objId,
- Scalar::Type elementType,
- IntPtrOperandId indexId,
- uint32_t rhsId,
- bool handleOOB) {
+bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(
+ ObjOperandId objId, Scalar::Type elementType, IntPtrOperandId indexId,
+ uint32_t rhsId, bool handleOOB, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* rhs = getOperand(ValOperandId(rhsId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
if (!handleOOB) {
// MStoreTypedArrayElementHole does the bounds checking.
@@ -2749,11 +2802,34 @@ bool WarpCacheIRTranspiler::emitStoreTypedArrayElement(ObjOperandId objId,
return resumeAfter(store);
}
-void WarpCacheIRTranspiler::addDataViewData(MDefinition* obj, Scalar::Type type,
+MInstruction* WarpCacheIRTranspiler::emitDataViewLength(
+ ArrayBufferViewKind viewKind, MDefinition* obj) {
+ if (viewKind == ArrayBufferViewKind::FixedLength) {
+ auto* length = MArrayBufferViewLength::New(alloc(), obj);
+ add(length);
+
+ return length;
+ }
+
+ // Bounds check doesn't require a memory barrier. See GetViewValue and
+ // SetViewValue abstract operations which read the underlying buffer byte
+ // length using "unordered" memory order.
+ auto barrier = MemoryBarrierRequirement::NotRequired;
+
+ // Movable and removable because no memory barrier is needed.
+ auto* length = MResizableDataViewByteLength::New(alloc(), obj, barrier);
+ length->setMovable();
+ length->setNotGuard();
+ add(length);
+
+ return length;
+}
+
+void WarpCacheIRTranspiler::addDataViewData(ArrayBufferViewKind viewKind,
+ MDefinition* obj, Scalar::Type type,
MDefinition** offset,
MInstruction** elements) {
- MInstruction* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitDataViewLength(viewKind, obj);
// Adjust the length to account for accesses near the end of the dataview.
if (size_t byteSize = Scalar::byteSize(type); byteSize > 1) {
@@ -2773,14 +2849,14 @@ void WarpCacheIRTranspiler::addDataViewData(MDefinition* obj, Scalar::Type type,
bool WarpCacheIRTranspiler::emitLoadDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId,
BooleanOperandId littleEndianId, Scalar::Type elementType,
- bool forceDoubleForUint32) {
+ bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* offset = getOperand(offsetId);
MDefinition* littleEndian = getOperand(littleEndianId);
// Add bounds check and get the DataViewObject's elements.
MInstruction* elements;
- addDataViewData(obj, elementType, &offset, &elements);
+ addDataViewData(viewKind, obj, elementType, &offset, &elements);
// Load the element.
MInstruction* load;
@@ -2802,7 +2878,8 @@ bool WarpCacheIRTranspiler::emitLoadDataViewValueResult(
bool WarpCacheIRTranspiler::emitStoreDataViewValueResult(
ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
- BooleanOperandId littleEndianId, Scalar::Type elementType) {
+ BooleanOperandId littleEndianId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* offset = getOperand(offsetId);
MDefinition* value = getOperand(ValOperandId(valueId));
@@ -2810,7 +2887,7 @@ bool WarpCacheIRTranspiler::emitStoreDataViewValueResult(
// Add bounds check and get the DataViewObject's elements.
MInstruction* elements;
- addDataViewData(obj, elementType, &offset, &elements);
+ addDataViewData(viewKind, obj, elementType, &offset, &elements);
// Store the element.
MInstruction* store;
@@ -4067,6 +4144,78 @@ bool WarpCacheIRTranspiler::emitArrayBufferViewByteOffsetDoubleResult(
return true;
}
+bool WarpCacheIRTranspiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* byteOffset =
+ MResizableTypedArrayByteOffsetMaybeOutOfBounds::New(alloc(), obj);
+ add(byteOffset);
+
+ auto* byteOffsetInt32 = MNonNegativeIntPtrToInt32::New(alloc(), byteOffset);
+ add(byteOffsetInt32);
+
+ pushResult(byteOffsetInt32);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::
+ emitResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* byteOffset =
+ MResizableTypedArrayByteOffsetMaybeOutOfBounds::New(alloc(), obj);
+ add(byteOffset);
+
+ auto* byteOffsetDouble = MIntPtrToDouble::New(alloc(), byteOffset);
+ add(byteOffsetDouble);
+
+ pushResult(byteOffsetDouble);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitResizableTypedArrayLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthInt32);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitResizableTypedArrayLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |length| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthDouble);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
bool WarpCacheIRTranspiler::emitTypedArrayByteLengthInt32Result(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4112,6 +4261,63 @@ bool WarpCacheIRTranspiler::emitTypedArrayByteLengthDoubleResult(
return true;
}
+bool WarpCacheIRTranspiler::emitResizableTypedArrayByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* size = MTypedArrayElementSize::New(alloc(), obj);
+ add(size);
+
+ auto* mul = MMul::New(alloc(), lengthInt32, size, MIRType::Int32);
+ mul->setCanBeNegativeZero(false);
+ add(mul);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), mul);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitResizableTypedArrayByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableTypedArrayLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* size = MTypedArrayElementSize::New(alloc(), obj);
+ add(size);
+
+ auto* sizeDouble = MToDouble::New(alloc(), size);
+ add(sizeDouble);
+
+ auto* mul = MMul::New(alloc(), lengthDouble, sizeDouble, MIRType::Double);
+ mul->setCanBeNegativeZero(false);
+ add(mul);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), mul);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
bool WarpCacheIRTranspiler::emitTypedArrayElementSizeResult(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4123,6 +4329,80 @@ bool WarpCacheIRTranspiler::emitTypedArrayElementSizeResult(
return true;
}
+bool WarpCacheIRTranspiler::emitResizableDataViewByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableDataViewByteLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthInt32);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitResizableDataViewByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ // Explicit |byteLength| accesses are seq-consistent atomic loads.
+ auto barrier = MemoryBarrierRequirement::Required;
+
+ auto* length = MResizableDataViewByteLength::New(alloc(), obj, barrier);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthDouble);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MGrowableSharedArrayBufferByteLength::New(alloc(), obj);
+ addEffectful(length);
+
+ auto* lengthInt32 = MNonNegativeIntPtrToInt32::New(alloc(), length);
+ add(lengthInt32);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthInt32);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
+bool WarpCacheIRTranspiler::emitGrowableSharedArrayBufferByteLengthDoubleResult(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* length = MGrowableSharedArrayBufferByteLength::New(alloc(), obj);
+ addEffectful(length);
+
+ auto* lengthDouble = MIntPtrToDouble::New(alloc(), length);
+ add(lengthDouble);
+
+ auto* postConversion = MPostIntPtrConversion::New(alloc(), lengthDouble);
+ add(postConversion);
+
+ pushResult(postConversion);
+ return resumeAfterUnchecked(postConversion);
+}
+
bool WarpCacheIRTranspiler::emitGuardHasAttachedArrayBuffer(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4134,6 +4414,29 @@ bool WarpCacheIRTranspiler::emitGuardHasAttachedArrayBuffer(
return true;
}
+bool WarpCacheIRTranspiler::emitGuardResizableArrayBufferViewInBounds(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins = MGuardResizableArrayBufferViewInBounds::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
+bool WarpCacheIRTranspiler::emitGuardResizableArrayBufferViewInBoundsOrDetached(
+ ObjOperandId objId) {
+ MDefinition* obj = getOperand(objId);
+
+ auto* ins =
+ MGuardResizableArrayBufferViewInBoundsOrDetached::New(alloc(), obj);
+ add(ins);
+
+ setOperand(objId, ins);
+ return true;
+}
+
bool WarpCacheIRTranspiler::emitIsTypedArrayConstructorResult(
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -4318,14 +4621,14 @@ bool WarpCacheIRTranspiler::emitNewTypedArrayFromArrayResult(
bool WarpCacheIRTranspiler::emitAtomicsCompareExchangeResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
- uint32_t replacementId, Scalar::Type elementType) {
+ uint32_t replacementId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* expected = getOperand(ValOperandId(expectedId));
MDefinition* replacement = getOperand(ValOperandId(replacementId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4347,13 +4650,12 @@ bool WarpCacheIRTranspiler::emitAtomicsCompareExchangeResult(
bool WarpCacheIRTranspiler::emitAtomicsExchangeResult(
ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
- Scalar::Type elementType) {
+ Scalar::Type elementType, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* value = getOperand(ValOperandId(valueId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4373,17 +4675,15 @@ bool WarpCacheIRTranspiler::emitAtomicsExchangeResult(
return resumeAfter(exchange);
}
-bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect, AtomicOp op) {
+bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind,
+ AtomicOp op) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* value = getOperand(ValOperandId(valueId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4409,59 +4709,48 @@ bool WarpCacheIRTranspiler::emitAtomicsBinaryOp(ObjOperandId objId,
return resumeAfter(binop);
}
-bool WarpCacheIRTranspiler::emitAtomicsAddResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsAddResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchAddOp);
+ viewKind, AtomicOp::Add);
}
-bool WarpCacheIRTranspiler::emitAtomicsSubResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsSubResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchSubOp);
+ viewKind, AtomicOp::Sub);
}
-bool WarpCacheIRTranspiler::emitAtomicsAndResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsAndResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchAndOp);
+ viewKind, AtomicOp::And);
}
-bool WarpCacheIRTranspiler::emitAtomicsOrResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsOrResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchOrOp);
+ viewKind, AtomicOp::Or);
}
-bool WarpCacheIRTranspiler::emitAtomicsXorResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType,
- bool forEffect) {
+bool WarpCacheIRTranspiler::emitAtomicsXorResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
return emitAtomicsBinaryOp(objId, indexId, valueId, elementType, forEffect,
- AtomicFetchXorOp);
+ viewKind, AtomicOp::Xor);
}
-bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- Scalar::Type elementType) {
+bool WarpCacheIRTranspiler::emitAtomicsLoadResult(
+ ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
+ ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
@@ -4473,7 +4762,7 @@ bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
MIRTypeForArrayBufferViewRead(elementType, forceDoubleForUint32);
auto* load = MLoadUnboxedScalar::New(alloc(), elements, index, elementType,
- DoesRequireMemoryBarrier);
+ MemoryBarrierRequirement::Required);
load->setResultType(knownType);
addEffectful(load);
@@ -4481,24 +4770,23 @@ bool WarpCacheIRTranspiler::emitAtomicsLoadResult(ObjOperandId objId,
return resumeAfter(load);
}
-bool WarpCacheIRTranspiler::emitAtomicsStoreResult(ObjOperandId objId,
- IntPtrOperandId indexId,
- uint32_t valueId,
- Scalar::Type elementType) {
+bool WarpCacheIRTranspiler::emitAtomicsStoreResult(
+ ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
+ Scalar::Type elementType, ArrayBufferViewKind viewKind) {
MDefinition* obj = getOperand(objId);
MDefinition* index = getOperand(indexId);
MDefinition* value = getOperand(ValOperandId(valueId));
- auto* length = MArrayBufferViewLength::New(alloc(), obj);
- add(length);
+ auto* length = emitTypedArrayLength(viewKind, obj);
index = addBoundsCheck(index, length);
auto* elements = MArrayBufferViewElements::New(alloc(), obj);
add(elements);
- auto* store = MStoreUnboxedScalar::New(alloc(), elements, index, value,
- elementType, DoesRequireMemoryBarrier);
+ auto* store =
+ MStoreUnboxedScalar::New(alloc(), elements, index, value, elementType,
+ MemoryBarrierRequirement::Required);
addEffectful(store);
pushResult(value);
diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp
index 0c35309c7e..98675164a9 100644
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2404,10 +2404,6 @@ void CodeGenerator::visitNegF(LNegF* ins) {
masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
auto input = ToFloatRegister(lir->input());
auto output = ToRegister(lir->output());
diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp
index 50d5d6645c..be8348a1fc 100644
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -5008,7 +5008,7 @@ static Register ComputePointerForAtomic(MacroAssembler& masm,
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register output) {
bool signExtend = Scalar::isSignedIntType(type);
@@ -5087,15 +5087,13 @@ static void CompareExchange(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& address, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, sync, address, oldval, newval, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& address, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, sync, address, oldval, newval, output);
@@ -5118,7 +5116,7 @@ void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register output) {
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
@@ -5175,15 +5173,13 @@ static void AtomicExchange(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& address, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, sync, address, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& address, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, sync, address, value, output);
@@ -5225,8 +5221,8 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const Register& value, const T& mem,
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const Register& value, const T& mem,
Register flagTemp, Register output) {
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
@@ -5274,19 +5270,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add(scratch, output, O2Reg(value));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub(scratch, output, O2Reg(value));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch, output, O2Reg(value));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_orr(scratch, output, O2Reg(value));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_eor(scratch, output, O2Reg(value));
break;
default:
@@ -5312,17 +5308,17 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, value, mem, temp, output);
}
@@ -5357,8 +5353,8 @@ void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const Register& value, const T& mem,
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const Register& value, const T& mem,
Register flagTemp) {
unsigned nbytes = Scalar::byteSize(type);
@@ -5396,19 +5392,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_orr(scratch, scratch, O2Reg(value));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_eor(scratch, scratch, O2Reg(value));
break;
default:
@@ -5451,7 +5447,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicLoad64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 output) {
MOZ_ASSERT((output.low.code() & 1) == 0);
MOZ_ASSERT(output.low.code() + 1 == output.high.code());
@@ -5495,7 +5491,7 @@ void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != replace && replace != output && output != expect);
@@ -5556,13 +5552,13 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
@@ -5571,7 +5567,7 @@ void MacroAssembler::compareExchange64(const Synchronization& sync,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(output != value);
@@ -5624,13 +5620,12 @@ void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
WasmAtomicExchange64(*this, access, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
@@ -5639,9 +5634,8 @@ void MacroAssembler::atomicExchange64(const Synchronization& sync,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(temp.low != InvalidReg && temp.high != InvalidReg);
MOZ_ASSERT(output != value);
MOZ_ASSERT(temp != value);
@@ -5671,23 +5665,23 @@ static void AtomicFetchOp64(MacroAssembler& masm,
FaultingCodeOffset(load.getOffset()));
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add(temp.low, output.low, O2Reg(value.low), SetCC);
masm.as_adc(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub(temp.low, output.low, O2Reg(value.low), SetCC);
masm.as_sbc(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.low, output.low, O2Reg(value.low));
masm.as_and(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_orr(temp.low, output.low, O2Reg(value.low));
masm.as_orr(temp.high, output.high, O2Reg(value.high));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_eor(temp.low, output.low, O2Reg(value.low));
masm.as_eor(temp.high, output.high, O2Reg(value.high));
break;
@@ -5725,25 +5719,25 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
WasmAtomicFetchOp64(*this, access, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
@@ -5754,7 +5748,7 @@ void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -5766,15 +5760,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -5783,9 +5776,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -5795,14 +5787,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -5810,9 +5802,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -5822,7 +5814,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -5830,7 +5822,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -5838,14 +5830,14 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp) {
AtomicEffectOp(*this, nullptr, arrayType, sync, op, value, mem, temp);
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp) {
AtomicEffectOp(*this, nullptr, arrayType, sync, op, value, mem, temp);
@@ -5854,25 +5846,23 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
// ========================================================================
// Primitive atomic operations.
-void MacroAssembler::atomicLoad64(const Synchronization& sync,
- const Address& mem, Register64 output) {
+void MacroAssembler::atomicLoad64(Synchronization sync, const Address& mem,
+ Register64 output) {
AtomicLoad64(*this, nullptr, sync, mem, output);
}
-void MacroAssembler::atomicLoad64(const Synchronization& sync,
- const BaseIndex& mem, Register64 output) {
+void MacroAssembler::atomicLoad64(Synchronization sync, const BaseIndex& mem,
+ Register64 output) {
AtomicLoad64(*this, nullptr, sync, mem, output);
}
-void MacroAssembler::atomicStore64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 temp) {
+void MacroAssembler::atomicStore64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, sync, mem, value, temp);
}
-void MacroAssembler::atomicStore64(const Synchronization& sync,
- const BaseIndex& mem, Register64 value,
- Register64 temp) {
+void MacroAssembler::atomicStore64(Synchronization sync, const BaseIndex& mem,
+ Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, sync, mem, value, temp);
}
diff --git a/js/src/jit/arm64/CodeGenerator-arm64.cpp b/js/src/jit/arm64/CodeGenerator-arm64.cpp
index ff3ea96a7d..a232135419 100644
--- a/js/src/jit/arm64/CodeGenerator-arm64.cpp
+++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
@@ -2563,10 +2563,6 @@ void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
ToRegister(lir->memoryBase()), ToRegister(lir->ptr()));
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
MWasmAddOffset* mir = lir->mir();
Register base = ToRegister(lir->base());
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp
index 682f69df59..e3ec2494ff 100644
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -2324,8 +2324,8 @@ template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
Scalar::Type type, Width targetWidth,
- const Synchronization& sync, const T& mem,
- Register oldval, Register newval, Register output) {
+ Synchronization sync, const T& mem, Register oldval,
+ Register newval, Register output) {
MOZ_ASSERT(oldval != output && newval != output);
vixl::UseScratchRegisterScope temps(&masm);
@@ -2395,8 +2395,8 @@ template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
Scalar::Type type, Width targetWidth,
- const Synchronization& sync, const T& mem,
- Register value, Register output) {
+ Synchronization sync, const T& mem, Register value,
+ Register output) {
MOZ_ASSERT(value != output);
vixl::UseScratchRegisterScope temps(&masm);
@@ -2458,9 +2458,8 @@ template <bool wantResult, typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
Scalar::Type type, Width targetWidth,
- const Synchronization& sync, AtomicOp op,
- const T& mem, Register value, Register temp,
- Register output) {
+ Synchronization sync, AtomicOp op, const T& mem,
+ Register value, Register temp, Register output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
MOZ_ASSERT_IF(wantResult, output != temp);
@@ -2514,25 +2513,25 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
FETCH_OP_CASE(add, value);
break;
- case AtomicFetchSubOp: {
+ case AtomicOp::Sub: {
Register scratch = temps.AcquireX().asUnsized();
masm.Neg(X(scratch), X(value));
FETCH_OP_CASE(add, scratch);
break;
}
- case AtomicFetchAndOp: {
+ case AtomicOp::And: {
Register scratch = temps.AcquireX().asUnsized();
masm.Eor(X(scratch), X(value), Operand(~0));
FETCH_OP_CASE(clr, scratch);
break;
}
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
FETCH_OP_CASE(set, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
FETCH_OP_CASE(eor, value);
break;
}
@@ -2558,19 +2557,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.bind(&again);
LoadExclusive(masm, access, type, targetWidth, ptr, output);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.Add(X(temp), X(output), X(value));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.Sub(X(temp), X(output), X(value));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.And(X(temp), X(output), X(value));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.Orr(X(temp), X(output), X(value));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.Eor(X(temp), X(output), X(value));
break;
}
@@ -2583,72 +2582,69 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, Width::_32, sync, mem, oldval, newval,
output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, Width::_32, sync, mem, oldval, newval,
output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
expect.reg, replace.reg, output.reg);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
expect.reg, replace.reg, output.reg);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
value.reg, output.reg);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange(*this, nullptr, Scalar::Int64, Width::_64, sync, mem,
value.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp<true>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
value.reg, temp.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp<true>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
value.reg, temp.reg, output.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp<false>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
value.reg, temp.reg, temp.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp<false>(*this, nullptr, Scalar::Int64, Width::_64, sync, op, mem,
@@ -2669,15 +2665,13 @@ void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
oldval, newval, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, Width::_32, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, Width::_32, sync, mem, value, output);
@@ -2697,18 +2691,18 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
value, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp<true>(*this, nullptr, type, Width::_32, sync, op, mem, value,
temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp<true>(*this, nullptr, type, Width::_32, sync, op, mem, value,
temp, output);
}
@@ -2804,7 +2798,7 @@ void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -2816,15 +2810,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -2833,9 +2826,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -2845,14 +2837,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -2860,9 +2852,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -2872,7 +2864,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -2880,7 +2872,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -2888,7 +2880,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp) {
AtomicFetchOp<false>(*this, nullptr, arrayType, Width::_32, sync, op, mem,
@@ -2896,7 +2888,7 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp) {
AtomicFetchOp<false>(*this, nullptr, arrayType, Width::_32, sync, op, mem,
diff --git a/js/src/jit/loong64/Assembler-loong64.cpp b/js/src/jit/loong64/Assembler-loong64.cpp
index 6c7a5f53da..07dac546c1 100644
--- a/js/src/jit/loong64/Assembler-loong64.cpp
+++ b/js/src/jit/loong64/Assembler-loong64.cpp
@@ -103,15 +103,15 @@ uint32_t js::jit::SA3(uint32_t value) {
}
Register js::jit::toRK(Instruction& i) {
- return Register::FromCode((i.encode() & RKMask) >> RKShift);
+ return Register::FromCode(((i.encode() >> RKShift) & RKMask));
}
Register js::jit::toRJ(Instruction& i) {
- return Register::FromCode((i.encode() & RJMask) >> RJShift);
+ return Register::FromCode(((i.encode() >> RJShift) & RJMask));
}
Register js::jit::toRD(Instruction& i) {
- return Register::FromCode((i.encode() & RDMask) >> RDShift);
+ return Register::FromCode(((i.encode() >> RDShift) & RDMask));
}
Register js::jit::toR(Instruction& i) {
diff --git a/js/src/jit/loong64/Assembler-loong64.h b/js/src/jit/loong64/Assembler-loong64.h
index 4e0b8d6b66..a385d71f5f 100644
--- a/js/src/jit/loong64/Assembler-loong64.h
+++ b/js/src/jit/loong64/Assembler-loong64.h
@@ -309,6 +309,7 @@ static const uint32_t Imm26Shift = 0;
static const uint32_t Imm26Bits = 26;
static const uint32_t CODEShift = 0;
static const uint32_t CODEBits = 15;
+static const uint32_t HINTBits = 5;
// LoongArch instruction field bit masks.
static const uint32_t RJMask = (1 << RJBits) - 1;
@@ -316,7 +317,9 @@ static const uint32_t RKMask = (1 << RKBits) - 1;
static const uint32_t RDMask = (1 << RDBits) - 1;
static const uint32_t SA2Mask = (1 << SA2Bits) - 1;
static const uint32_t SA3Mask = (1 << SA3Bits) - 1;
+static const uint32_t CDMask = (1 << CDBits) - 1;
static const uint32_t CONDMask = (1 << CONDBits) - 1;
+static const uint32_t HINTMask = (1 << HINTBits) - 1;
static const uint32_t LSBWMask = (1 << LSBWBits) - 1;
static const uint32_t LSBDMask = (1 << LSBDBits) - 1;
static const uint32_t MSBWMask = (1 << MSBWBits) - 1;
@@ -1611,7 +1614,7 @@ class InstReg : public Instruction {
InstReg(OpcodeField op, int32_t cond, FloatRegister fk, FloatRegister fj,
AssemblerLOONG64::FPConditionBit cd)
: Instruction(op | (cond & CONDMask) << CONDShift | FK(fk) | FJ(fj) |
- (cd & RDMask)) {
+ (cd & CDMask)) {
MOZ_ASSERT(is_uintN(cond, 5));
}
@@ -1700,7 +1703,7 @@ class InstImm : public Instruction {
}
InstImm(OpcodeField op, int32_t si12, Register rj, int32_t hint)
: Instruction(op | (si12 & Imm12Mask) << Imm12Shift | RJ(rj) |
- (hint & RDMask)) {
+ (hint & HINTMask)) {
MOZ_ASSERT(op == op_preld);
}
InstImm(OpcodeField op, int32_t msb, int32_t lsb, Register rj, Register rd,
@@ -1738,7 +1741,9 @@ class InstImm : public Instruction {
uint32_t extractRJ() {
return extractBitField(RJShift + RJBits - 1, RJShift);
}
- void setRJ(uint32_t rj) { data = (data & ~RJMask) | (rj << RJShift); }
+ void setRJ(uint32_t rj) {
+ data = (data & ~(RJMask << RJShift)) | (rj << RJShift);
+ }
uint32_t extractRD() {
return extractBitField(RDShift + RDBits - 1, RDShift);
}
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp
index 4c4dfd18ff..76d3047680 100644
--- a/js/src/jit/loong64/CodeGenerator-loong64.cpp
+++ b/js/src/jit/loong64/CodeGenerator-loong64.cpp
@@ -1988,10 +1988,6 @@ void CodeGenerator::visitNotF(LNotF* ins) {
Assembler::DoubleEqualOrUnordered);
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
diff --git a/js/src/jit/loong64/MacroAssembler-loong64.cpp b/js/src/jit/loong64/MacroAssembler-loong64.cpp
index 528c120058..1c07f7f91a 100644
--- a/js/src/jit/loong64/MacroAssembler-loong64.cpp
+++ b/js/src/jit/loong64/MacroAssembler-loong64.cpp
@@ -3357,7 +3357,7 @@ void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -3463,7 +3463,7 @@ static void CompareExchange(MacroAssembler& masm,
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != output && replace != output);
@@ -3499,7 +3499,7 @@ static void CompareExchange64(MacroAssembler& masm,
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register output) {
@@ -3602,7 +3602,7 @@ static void AtomicExchange(MacroAssembler& masm,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(value != output);
ScratchRegisterScope scratch(masm);
@@ -3633,10 +3633,10 @@ static void AtomicExchange64(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope scratch2(masm);
bool signExtend = Scalar::isSignedIntType(type);
@@ -3671,19 +3671,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_ll_w(output, scratch, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(scratch2, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(scratch2, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch2, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(scratch2, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(scratch2, output, value);
break;
default:
@@ -3718,19 +3718,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_srl_w(output, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(valueTemp, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(valueTemp, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, output, value);
break;
default:
@@ -3778,9 +3778,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
ScratchRegisterScope scratch(masm);
@@ -3801,19 +3800,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.as_ll_d(output.reg, scratch, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_d(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_d(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(temp.reg, output.reg, value.reg);
break;
default:
@@ -3826,8 +3825,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -3836,8 +3834,7 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -3846,13 +3843,13 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
@@ -3894,8 +3891,7 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -3903,8 +3899,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -3912,13 +3907,12 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
@@ -3940,43 +3934,43 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
valueTemp, offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
@@ -4003,10 +3997,9 @@ void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp) {
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope scratch2(masm);
unsigned nbytes = Scalar::byteSize(type);
@@ -4040,19 +4033,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_ll_w(scratch2, scratch, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(scratch2, scratch2, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(scratch2, scratch2, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(scratch2, scratch2, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(scratch2, scratch2, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(scratch2, scratch2, value);
break;
default:
@@ -4087,19 +4080,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_srl_w(valueTemp, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_add_w(valueTemp, valueTemp, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sub_w(valueTemp, valueTemp, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, valueTemp, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, valueTemp, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, valueTemp, value);
break;
default:
@@ -4184,7 +4177,7 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4201,10 +4194,10 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp);
@@ -4217,8 +4210,8 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register valueTemp,
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -4232,17 +4225,17 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
offsetTemp, maskTemp, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -4252,17 +4245,16 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4272,7 +4264,7 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4282,7 +4274,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -4292,7 +4284,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
@@ -4301,7 +4293,7 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
index 424ddab061..f7f1d7a16d 100644
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1401,10 +1401,6 @@ void CodeGenerator::visitNotF(LNotF* ins) {
Assembler::DoubleEqualOrUnordered);
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
// Ensure that there is enough space in the buffer for the OsiPoint
// patching to occur. Otherwise, we could overwrite the invalidation
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
index 052c76ba0f..284bbe0a12 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -2262,7 +2262,7 @@ void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2366,8 +2366,7 @@ static void CompareExchange(MacroAssembler& masm,
masm.bind(&end);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -2376,8 +2375,7 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -2407,7 +2405,7 @@ void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register output) {
@@ -2508,8 +2506,7 @@ static void AtomicExchange(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2517,8 +2514,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2545,10 +2541,10 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
@@ -2580,19 +2576,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_ll(output, SecondScratchReg, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(ScratchRegister, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(ScratchRegister, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(ScratchRegister, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(ScratchRegister, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(ScratchRegister, output, value);
break;
default:
@@ -2630,19 +2626,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.as_srlv(output, ScratchRegister, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(valueTemp, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(valueTemp, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, output, value);
break;
default:
@@ -2688,20 +2684,20 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.memoryBarrierAfter(sync);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
@@ -2727,10 +2723,9 @@ void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp) {
unsigned nbytes = Scalar::byteSize(type);
switch (nbytes) {
@@ -2761,19 +2756,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_ll(ScratchRegister, SecondScratchReg, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(ScratchRegister, ScratchRegister, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(ScratchRegister, ScratchRegister, value);
break;
default:
@@ -2811,19 +2806,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(valueTemp, valueTemp, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_subu(valueTemp, valueTemp, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(valueTemp, valueTemp, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(valueTemp, valueTemp, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(valueTemp, valueTemp, value);
break;
default:
@@ -2875,7 +2870,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2891,17 +2886,17 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
offsetTemp, maskTemp, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -2912,10 +2907,10 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp);
@@ -2927,17 +2922,16 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2948,8 +2942,8 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register valueTemp,
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -2963,7 +2957,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2973,7 +2967,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2983,7 +2977,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
@@ -2992,7 +2986,7 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
index f4b3d557a5..747db53799 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -2745,27 +2745,27 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.load64(Address(SecondScratchReg, 0), output);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_addu(temp.low, output.low, value.low);
masm.as_sltu(temp.high, temp.low, output.low);
masm.as_addu(temp.high, temp.high, output.high);
masm.as_addu(temp.high, temp.high, value.high);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_sltu(temp.high, output.low, value.low);
masm.as_subu(temp.high, output.high, temp.high);
masm.as_subu(temp.low, output.low, value.low);
masm.as_subu(temp.high, temp.high, value.high);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.low, output.low, value.low);
masm.as_and(temp.high, output.high, value.high);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(temp.low, output.low, value.low);
masm.as_or(temp.high, output.high, value.high);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(temp.low, output.low, value.low);
masm.as_xor(temp.high, output.high, value.high);
break;
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
index cbf66ccac4..1530bfcbc8 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -2611,7 +2611,7 @@ void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != output && replace != output);
@@ -2658,13 +2658,13 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
@@ -2673,7 +2673,7 @@ void MacroAssembler::compareExchange64(const Synchronization& sync,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(value != output);
masm.computeEffectiveAddress(mem, SecondScratchReg);
@@ -2717,13 +2717,12 @@ void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
WasmAtomicExchange64(*this, access, mem, src, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
@@ -2732,9 +2731,8 @@ void MacroAssembler::atomicExchange64(const Synchronization& sync,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
masm.computeEffectiveAddress(mem, SecondScratchReg);
@@ -2751,19 +2749,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.as_lld(output.reg, SecondScratchReg, 0);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.as_daddu(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.as_dsubu(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.as_and(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.as_or(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.as_xor(temp.reg, output.reg, value.reg);
break;
default:
@@ -2790,25 +2788,25 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
diff --git a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
index 1c890799ed..3cfb91a036 100644
--- a/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
+++ b/js/src/jit/riscv64/CodeGenerator-riscv64.cpp
@@ -1986,10 +1986,6 @@ void CodeGenerator::visitNotF(LNotF* ins) {
masm.ma_compareF32(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- masm.memoryBarrier(ins->type());
-}
-
void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
diff --git a/js/src/jit/riscv64/MacroAssembler-riscv64.cpp b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
index 93ccf1cc27..dc5721ea9f 100644
--- a/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
+++ b/js/src/jit/riscv64/MacroAssembler-riscv64.cpp
@@ -2243,7 +2243,7 @@ uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
template <typename T>
static void AtomicExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register output) {
@@ -2352,7 +2352,7 @@ static void AtomicExchange(MacroAssembler& masm,
template <typename T>
static void AtomicExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 value, Register64 output) {
MOZ_ASSERT(value != output);
UseScratchRegisterScope temps(&masm);
@@ -2382,9 +2382,8 @@ static void AtomicExchange64(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, AtomicOp op,
- Register64 value, const T& mem, Register64 temp,
- Register64 output) {
+ Synchronization sync, AtomicOp op, Register64 value,
+ const T& mem, Register64 temp, Register64 output) {
MOZ_ASSERT(value != output);
MOZ_ASSERT(value != temp);
UseScratchRegisterScope temps(&masm);
@@ -2405,19 +2404,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.lr_d(true, true, output.reg, SecondScratchReg);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.add(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.sub(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(temp.reg, output.reg, value.reg);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(temp.reg, output.reg, value.reg);
break;
default:
@@ -2433,10 +2432,9 @@ static void AtomicFetchOp64(MacroAssembler& masm,
template <typename T>
static void AtomicEffectOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp) {
ScratchRegisterScope scratch(masm);
UseScratchRegisterScope temps(&masm);
Register scratch2 = temps.Acquire();
@@ -2471,19 +2469,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.lr_w(true, true, scratch2, scratch);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(scratch2, scratch2, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(scratch2, scratch2, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(scratch2, scratch2, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(scratch2, scratch2, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(scratch2, scratch2, value);
break;
default:
@@ -2519,19 +2517,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
masm.srlw(valueTemp, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(valueTemp, valueTemp, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(valueTemp, valueTemp, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(valueTemp, valueTemp, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(valueTemp, valueTemp, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(valueTemp, valueTemp, value);
break;
default:
@@ -2563,10 +2561,10 @@ static void AtomicEffectOp(MacroAssembler& masm,
template <typename T>
static void AtomicFetchOp(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
- AtomicOp op, const T& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+ Scalar::Type type, Synchronization sync, AtomicOp op,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
ScratchRegisterScope scratch(masm);
UseScratchRegisterScope temps(&masm);
Register scratch2 = temps.Acquire();
@@ -2602,19 +2600,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.lr_w(true, true, output, scratch);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(scratch2, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(scratch2, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(scratch2, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(scratch2, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(scratch2, output, value);
break;
default:
@@ -2650,19 +2648,19 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.srlw(output, scratch2, offsetTemp);
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.addw(valueTemp, output, value);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.subw(valueTemp, output, value);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.and_(valueTemp, output, value);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.or_(valueTemp, output, value);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xor_(valueTemp, output, value);
break;
default:
@@ -2715,7 +2713,7 @@ static void AtomicFetchOp(MacroAssembler& masm,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2732,10 +2730,10 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp);
@@ -2748,8 +2746,8 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register valueTemp,
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -2763,7 +2761,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
@@ -2772,37 +2770,35 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp) {
AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization& sync,
+void MacroAssembler::atomicExchange64(Synchronization sync,
const BaseIndex& mem, Register64 value,
Register64 output) {
AtomicExchange64(*this, nullptr, sync, mem, value, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
maskTemp, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2811,8 +2807,7 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
maskTemp, temp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2820,8 +2815,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
maskTemp, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register value,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -2830,7 +2824,7 @@ void MacroAssembler::atomicExchange(Scalar::Type type,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2840,7 +2834,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp,
@@ -2849,20 +2843,20 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
maskTemp, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const Address& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type type,
- const Synchronization& sync, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register valueTemp, Register offsetTemp,
- Register maskTemp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
offsetTemp, maskTemp, output);
}
@@ -3058,7 +3052,7 @@ void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
template <typename T>
static void CompareExchange64(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register64 expect, Register64 replace,
Register64 output) {
MOZ_ASSERT(expect != output && replace != output);
@@ -3092,30 +3086,30 @@ static void CompareExchange64(MacroAssembler& masm,
masm.bind(&exit);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
- const Address& mem, Register64 expect,
- Register64 replace, Register64 output) {
+void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
-void MacroAssembler::compareExchange64(const Synchronization& sync,
+void MacroAssembler::compareExchange64(Synchronization sync,
const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output) {
CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register valueTemp,
- Register offsetTemp, Register maskTemp,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
offsetTemp, maskTemp, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -3947,25 +3941,25 @@ void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp) {
AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
@@ -4034,7 +4028,7 @@ void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchange(MacroAssembler& masm,
const wasm::MemoryAccessDesc* access,
- Scalar::Type type, const Synchronization& sync,
+ Scalar::Type type, Synchronization sync,
const T& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp,
Register maskTemp, Register output) {
@@ -4140,8 +4134,7 @@ static void CompareExchange(MacroAssembler& masm,
masm.bind(&end);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const Address& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
@@ -4150,8 +4143,7 @@ void MacroAssembler::compareExchange(Scalar::Type type,
offsetTemp, maskTemp, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type,
- const Synchronization& sync,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp,
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
index 8abf68504b..6cdf76981b 100644
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -552,7 +552,7 @@ class MemoryAccessDesc {
explicit MemoryAccessDesc(
uint32_t memoryIndex, Scalar::Type type, uint32_t align, uint64_t offset,
BytecodeOffset trapOffset, mozilla::DebugOnly<bool> hugeMemory,
- const jit::Synchronization& sync = jit::Synchronization::None())
+ jit::Synchronization sync = jit::Synchronization::None())
: memoryIndex_(memoryIndex),
offset64_(offset),
align_(align),
@@ -592,7 +592,7 @@ class MemoryAccessDesc {
uint32_t align() const { return align_; }
Scalar::Type type() const { return type_; }
unsigned byteSize() const { return Scalar::byteSize(type()); }
- const jit::Synchronization& sync() const { return sync_; }
+ jit::Synchronization sync() const { return sync_; }
BytecodeOffset trapOffset() const { return trapOffset_; }
wasm::SimdOp widenSimdOp() const {
MOZ_ASSERT(isWidenSimd128Load());
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index 1a838f78c3..d8b5693d85 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -2185,25 +2185,28 @@ class LLoadDataViewElement : public LInstructionHelper<1, 3, 1 + INT64_PIECES> {
};
class LLoadTypedArrayElementHoleBigInt
- : public LInstructionHelper<BOX_PIECES, 2, 1 + INT64_PIECES> {
+ : public LInstructionHelper<BOX_PIECES, 3, 1 + INT64_PIECES> {
public:
LIR_HEADER(LoadTypedArrayElementHoleBigInt)
- LLoadTypedArrayElementHoleBigInt(const LAllocation& object,
+ LLoadTypedArrayElementHoleBigInt(const LAllocation& elements,
const LAllocation& index,
+ const LAllocation& length,
const LDefinition& temp,
const LInt64Definition& temp64)
: LInstructionHelper(classOpcode) {
- setOperand(0, object);
+ setOperand(0, elements);
setOperand(1, index);
+ setOperand(2, length);
setTemp(0, temp);
setInt64Temp(1, temp64);
}
const MLoadTypedArrayElementHole* mir() const {
return mir_->toLoadTypedArrayElementHole();
}
- const LAllocation* object() { return getOperand(0); }
+ const LAllocation* elements() { return getOperand(0); }
const LAllocation* index() { return getOperand(1); }
+ const LAllocation* length() { return getOperand(2); }
const LDefinition* temp() { return getTemp(0); }
const LInt64Definition temp64() { return getInt64Temp(1); }
};
diff --git a/js/src/jit/wasm32/CodeGenerator-wasm32.cpp b/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
index 923297a0c1..4c27637203 100644
--- a/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
+++ b/js/src/jit/wasm32/CodeGenerator-wasm32.cpp
@@ -175,7 +175,6 @@ void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
}
void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) { MOZ_CRASH(); }
void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) { MOZ_CRASH(); }
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) { MOZ_CRASH(); }
void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH(); }
void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
MOZ_CRASH();
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
index 9e5319842b..86d4bca0e0 100644
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -432,7 +432,7 @@ void CodeGenerator::visitAtomicTypedArrayElementBinop64(
// Add and Sub don't need |fetchTemp| and can save a `mov` when the value and
// output register are equal to each other.
- if (atomicOp == AtomicFetchAddOp || atomicOp == AtomicFetchSubOp) {
+ if (atomicOp == AtomicOp::Add || atomicOp == AtomicOp::Sub) {
fetchTemp = Register64::Invalid();
fetchOut = temp1;
createTemp = temp2.reg;
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
index 55d83e3f05..9f9b1713c2 100644
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -208,8 +208,8 @@ void LIRGenerator::visitAtomicTypedArrayElementBinop(
//
// For AND/OR/XOR we need to use a CMPXCHG loop with rax as a temp register.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp = !(ins->operation() == AtomicOp::Add ||
+ ins->operation() == AtomicOp::Sub);
LInt64Definition temp1 = tempInt64();
LInt64Definition temp2;
@@ -427,8 +427,8 @@ void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
// *mem does not have the expected value, so reloading it at the
// top of the loop would be redundant.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
bool reuseInput = false;
LAllocation value;
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
index 5106e7e382..ebc8c91eaa 100644
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -1459,7 +1459,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
Register output) {
// NOTE: the generated code must match the assembly code in gen_fetchop in
// GenerateAtomicOperations.py
- if (op == AtomicFetchAddOp) {
+ if (op == AtomicOp::Add) {
if (value != output) {
masm.movq(value, output);
}
@@ -1468,7 +1468,7 @@ static void AtomicFetchOp64(MacroAssembler& masm,
FaultingCodeOffset(masm.currentOffset()));
}
masm.lock_xaddq(output, Operand(mem));
- } else if (op == AtomicFetchSubOp) {
+ } else if (op == AtomicOp::Sub) {
if (value != output) {
masm.movq(value, output);
}
@@ -1492,13 +1492,13 @@ static void AtomicFetchOp64(MacroAssembler& masm,
masm.bind(&again);
masm.movq(rax, temp);
switch (op) {
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.andq(value, temp);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.orq(value, temp);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.xorq(value, temp);
break;
default:
@@ -1532,19 +1532,19 @@ static void AtomicEffectOp64(MacroAssembler& masm,
FaultingCodeOffset(masm.currentOffset()));
}
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addq(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subq(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andq(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orq(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorq(value, Operand(mem));
break;
default:
@@ -1558,8 +1558,8 @@ void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
AtomicEffectOp64(*this, &access, op, value.reg, mem);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const Address& mem, Register64 expected,
+void MacroAssembler::compareExchange64(Synchronization, const Address& mem,
+ Register64 expected,
Register64 replacement,
Register64 output) {
// NOTE: the generated code must match the assembly code in gen_cmpxchg in
@@ -1571,8 +1571,7 @@ void MacroAssembler::compareExchange64(const Synchronization&,
lock_cmpxchgq(replacement.reg, Operand(mem));
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const BaseIndex& mem,
+void MacroAssembler::compareExchange64(Synchronization, const BaseIndex& mem,
Register64 expected,
Register64 replacement,
Register64 output) {
@@ -1583,9 +1582,8 @@ void MacroAssembler::compareExchange64(const Synchronization&,
lock_cmpxchgq(replacement.reg, Operand(mem));
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const Address& mem,
+ Register64 value, Register64 output) {
// NOTE: the generated code must match the assembly code in gen_exchange in
// GenerateAtomicOperations.py
if (value != output) {
@@ -1594,33 +1592,32 @@ void MacroAssembler::atomicExchange64(const Synchronization&,
xchgq(output.reg, Operand(mem));
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const BaseIndex& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const BaseIndex& mem,
+ Register64 value, Register64 output) {
if (value != output) {
movq(value.reg, output.reg);
}
xchgq(output.reg, Operand(mem));
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const Address& mem) {
AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
}
-void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
Register64 value, const BaseIndex& mem) {
AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
}
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
index 434a54669b..692e884f06 100644
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -2078,12 +2078,6 @@ void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
}
}
-void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
- if (ins->type() & MembarStoreLoad) {
- masm.storeLoadFence();
- }
-}
-
void CodeGeneratorX86Shared::visitOutOfLineWasmTruncateCheck(
OutOfLineWasmTruncateCheck* ool) {
FloatRegister input = ool->input();
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
index bd5986298d..6d90f2f96b 100644
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -732,8 +732,8 @@ void LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(
// There are optimization opportunities:
// - better register allocation in the x86 8-bit case, Bug #1077036.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
bool fixedOutput = true;
bool reuseInput = false;
LDefinition tempDef1 = LDefinition::BogusTemp();
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
index 9848086e7f..8ce3f68224 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -1246,7 +1246,8 @@ template FaultingCodeOffset MacroAssembler::storeFloat32(FloatRegister src,
void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
if (barrier & MembarStoreLoad) {
- storeLoadFence();
+ // This implementation follows Linux.
+ masm.mfence();
}
}
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
index e474f83530..1520321260 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -1143,13 +1143,13 @@ static void CompareExchange(MacroAssembler& masm,
ExtendTo32(masm, type, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization,
const Address& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
}
-void MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::compareExchange(Scalar::Type type, Synchronization,
const BaseIndex& mem, Register oldval,
Register newval, Register output) {
CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
@@ -1201,13 +1201,13 @@ static void AtomicExchange(MacroAssembler& masm,
ExtendTo32(masm, type, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization,
const Address& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, mem, value, output);
}
-void MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&,
+void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization,
const BaseIndex& mem, Register value,
Register output) {
AtomicExchange(*this, nullptr, type, mem, value, output);
@@ -1227,7 +1227,7 @@ void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
static void SetupValue(MacroAssembler& masm, AtomicOp op, Imm32 src,
Register output) {
- if (op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Sub) {
masm.movl(Imm32(-src.value), output);
} else {
masm.movl(src, output);
@@ -1239,7 +1239,7 @@ static void SetupValue(MacroAssembler& masm, AtomicOp op, Register src,
if (src != output) {
masm.movl(src, output);
}
- if (op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Sub) {
masm.negl(output);
}
}
@@ -1269,15 +1269,14 @@ static void AtomicFetchOp(MacroAssembler& masm,
masm.j(MacroAssembler::NonZero, &again); \
} while (0)
- MOZ_ASSERT_IF(op == AtomicFetchAddOp || op == AtomicFetchSubOp,
- temp == InvalidReg);
+ MOZ_ASSERT_IF(op == AtomicOp::Add || op == AtomicOp::Sub, temp == InvalidReg);
switch (Scalar::byteSize(arrayType)) {
case 1:
CheckBytereg(output);
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
CheckBytereg(value); // But not for the bitwise ops
SetupValue(masm, op, value, output);
if (access) {
@@ -1286,17 +1285,17 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddb(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, andl,
lock_cmpxchgb);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, orl,
lock_cmpxchgb);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
CheckBytereg(temp);
ATOMIC_BITOP_BODY(movb, wasm::TrapMachineInsn::Load8, xorl,
lock_cmpxchgb);
@@ -1307,8 +1306,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
break;
case 2:
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
SetupValue(masm, op, value, output);
if (access) {
masm.append(*access, wasm::TrapMachineInsn::Atomic,
@@ -1316,15 +1315,15 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddw(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, andl,
lock_cmpxchgw);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, orl,
lock_cmpxchgw);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_BITOP_BODY(movw, wasm::TrapMachineInsn::Load16, xorl,
lock_cmpxchgw);
break;
@@ -1334,8 +1333,8 @@ static void AtomicFetchOp(MacroAssembler& masm,
break;
case 4:
switch (op) {
- case AtomicFetchAddOp:
- case AtomicFetchSubOp:
+ case AtomicOp::Add:
+ case AtomicOp::Sub:
SetupValue(masm, op, value, output);
if (access) {
masm.append(*access, wasm::TrapMachineInsn::Atomic,
@@ -1343,15 +1342,15 @@ static void AtomicFetchOp(MacroAssembler& masm,
}
masm.lock_xaddl(output, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, andl,
lock_cmpxchgl);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, orl,
lock_cmpxchgl);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_BITOP_BODY(movl, wasm::TrapMachineInsn::Load32, xorl,
lock_cmpxchgl);
break;
@@ -1367,30 +1366,29 @@ static void AtomicFetchOp(MacroAssembler& masm,
#undef ATOMIC_BITOP_BODY
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const Address& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const BaseIndex& mem,
- Register temp, Register output) {
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value,
+ const BaseIndex& mem, Register temp,
+ Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const Address& mem,
+void MacroAssembler::atomicFetchOp(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value, const Address& mem,
Register temp, Register output) {
AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
}
@@ -1436,19 +1434,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
switch (Scalar::byteSize(arrayType)) {
case 1:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addb(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subb(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andb(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orb(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorb(value, Operand(mem));
break;
default:
@@ -1457,19 +1455,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
break;
case 2:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addw(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subw(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andw(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orw(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorw(value, Operand(mem));
break;
default:
@@ -1478,19 +1476,19 @@ static void AtomicEffectOp(MacroAssembler& masm,
break;
case 4:
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
masm.lock_addl(value, Operand(mem));
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
masm.lock_subl(value, Operand(mem));
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
masm.lock_andl(value, Operand(mem));
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
masm.lock_orl(value, Operand(mem));
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
masm.lock_xorl(value, Operand(mem));
break;
default:
@@ -1535,7 +1533,7 @@ void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
template <typename T>
static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
+ Synchronization sync, const T& mem,
Register oldval, Register newval, Register temp,
AnyRegister output) {
if (arrayType == Scalar::Uint32) {
@@ -1547,15 +1545,14 @@ static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register oldval,
- Register newval, Register temp,
- AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register oldval, Register newval,
+ Register temp, AnyRegister output) {
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register oldval,
Register newval, Register temp,
AnyRegister output) {
@@ -1564,9 +1561,8 @@ void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, const T& mem,
- Register value, Register temp,
- AnyRegister output) {
+ Synchronization sync, const T& mem, Register value,
+ Register temp, AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
@@ -1576,14 +1572,14 @@ static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
- const Address& mem, Register value,
- Register temp, AnyRegister output) {
+ Synchronization sync, const Address& mem,
+ Register value, Register temp,
+ AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
- const Synchronization& sync,
+ Synchronization sync,
const BaseIndex& mem, Register value,
Register temp, AnyRegister output) {
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
@@ -1591,9 +1587,9 @@ void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Register value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Register value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -1603,7 +1599,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -1611,39 +1607,36 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Register value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const BaseIndex& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Register value, const Address& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Register value,
+ const Address& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
-void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization&, AtomicOp op,
- Imm32 value, const Address& mem,
- Register temp) {
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, Synchronization,
+ AtomicOp op, Imm32 value,
+ const Address& mem, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
}
void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const BaseIndex& mem,
Register temp) {
MOZ_ASSERT(temp == InvalidReg);
@@ -1652,9 +1645,9 @@ void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
template <typename T>
static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
- Imm32 value, const T& mem, Register temp1,
- Register temp2, AnyRegister output) {
+ Synchronization sync, AtomicOp op, Imm32 value,
+ const T& mem, Register temp1, Register temp2,
+ AnyRegister output) {
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
@@ -1664,7 +1657,7 @@ static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const Address& mem,
Register temp1, Register temp2,
AnyRegister output) {
@@ -1672,7 +1665,7 @@ void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
}
void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
- const Synchronization& sync, AtomicOp op,
+ Synchronization sync, AtomicOp op,
Imm32 value, const BaseIndex& mem,
Register temp1, Register temp2,
AnyRegister output) {
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
index dd1ae53537..21af90e90d 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -161,15 +161,6 @@ class MacroAssemblerX86Shared : public Assembler {
void atomic_inc32(const Operand& addr) { lock_incl(addr); }
void atomic_dec32(const Operand& addr) { lock_decl(addr); }
- void storeLoadFence() {
- // This implementation follows Linux.
- if (HasSSE2()) {
- masm.mfence();
- } else {
- lock_addl(Imm32(0), Operand(Address(esp, 0)));
- }
- }
-
void branch16(Condition cond, Register lhs, Register rhs, Label* label) {
cmpw(rhs, lhs);
j(cond, label);
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
index 0577a0976e..e958e998c2 100644
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -635,8 +635,8 @@ void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
// - better 8-bit register allocation and instruction selection, Bug
// #1077036.
- bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
- ins->operation() == AtomicFetchSubOp);
+ bool bitOp =
+ !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
LDefinition tempDef = LDefinition::BogusTemp();
LAllocation value;
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
index a68d7b03b7..232303b429 100644
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -1423,19 +1423,19 @@ static void AtomicFetchOp64(MacroAssembler& masm,
} while (0)
switch (op) {
- case AtomicFetchAddOp:
+ case AtomicOp::Add:
ATOMIC_OP_BODY(add64FromMemory);
break;
- case AtomicFetchSubOp:
+ case AtomicOp::Sub:
ATOMIC_OP_BODY(sub64FromMemory);
break;
- case AtomicFetchAndOp:
+ case AtomicOp::And:
ATOMIC_OP_BODY(and64FromMemory);
break;
- case AtomicFetchOrOp:
+ case AtomicOp::Or:
ATOMIC_OP_BODY(or64FromMemory);
break;
- case AtomicFetchXorOp:
+ case AtomicOp::Xor:
ATOMIC_OP_BODY(xor64FromMemory);
break;
default:
@@ -1626,60 +1626,57 @@ void MacroAssembler::wasmTruncateFloat32ToUInt64(
// ========================================================================
// Primitive atomic operations.
-void MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem,
+void MacroAssembler::atomicLoad64(Synchronization, const Address& mem,
Register64 temp, Register64 output) {
AtomicLoad64(*this, nullptr, mem, temp, output);
}
-void MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem,
+void MacroAssembler::atomicLoad64(Synchronization, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicLoad64(*this, nullptr, mem, temp, output);
}
-void MacroAssembler::atomicStore64(const Synchronization&, const Address& mem,
+void MacroAssembler::atomicStore64(Synchronization, const Address& mem,
Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, mem, value, temp);
}
-void MacroAssembler::atomicStore64(const Synchronization&, const BaseIndex& mem,
+void MacroAssembler::atomicStore64(Synchronization, const BaseIndex& mem,
Register64 value, Register64 temp) {
AtomicExchange64(*this, nullptr, mem, value, temp);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const Address& mem, Register64 expected,
+void MacroAssembler::compareExchange64(Synchronization, const Address& mem,
+ Register64 expected,
Register64 replacement,
Register64 output) {
CompareExchange64(*this, nullptr, mem, expected, replacement, output);
}
-void MacroAssembler::compareExchange64(const Synchronization&,
- const BaseIndex& mem,
+void MacroAssembler::compareExchange64(Synchronization, const BaseIndex& mem,
Register64 expected,
Register64 replacement,
Register64 output) {
CompareExchange64(*this, nullptr, mem, expected, replacement, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const Address& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const Address& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, mem, value, output);
}
-void MacroAssembler::atomicExchange64(const Synchronization&,
- const BaseIndex& mem, Register64 value,
- Register64 output) {
+void MacroAssembler::atomicExchange64(Synchronization, const BaseIndex& mem,
+ Register64 value, Register64 output) {
AtomicExchange64(*this, nullptr, mem, value, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization, AtomicOp op,
const Address& value, const Address& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
}
-void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+void MacroAssembler::atomicFetchOp64(Synchronization, AtomicOp op,
const Address& value, const BaseIndex& mem,
Register64 temp, Register64 output) {
AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
diff --git a/js/src/jsapi-tests/testCompileScript.cpp b/js/src/jsapi-tests/testCompileScript.cpp
index 45f5e8301a..ff7432b14b 100644
--- a/js/src/jsapi-tests/testCompileScript.cpp
+++ b/js/src/jsapi-tests/testCompileScript.cpp
@@ -53,9 +53,8 @@ bool testCompile() {
CHECK(buf16.init(cx, src_16.data(), src_16.length(),
JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileGlobalScriptToStencil(fc, options, buf16, compileStorage);
+ CompileGlobalScriptToStencil(fc, options, buf16);
CHECK(stencil);
CHECK(stencil->scriptExtra.size() == 1);
CHECK(stencil->scriptExtra[0].extent.sourceStart == 0);
@@ -63,7 +62,6 @@ bool testCompile() {
CHECK(stencil->scriptData.size() == 1);
CHECK(stencil->scriptData[0].hasSharedData()); // has generated bytecode
CHECK(stencil->scriptData[0].gcThingsLength == 1);
- CHECK(compileStorage.hasInput());
}
{ // 8-bit characters
@@ -71,9 +69,8 @@ bool testCompile() {
CHECK(
buf8.init(cx, src.data(), src.length(), JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileGlobalScriptToStencil(fc, options, buf8, compileStorage);
+ CompileGlobalScriptToStencil(fc, options, buf8);
CHECK(stencil);
CHECK(stencil->scriptExtra.size() == 1);
CHECK(stencil->scriptExtra[0].extent.sourceStart == 0);
@@ -81,7 +78,6 @@ bool testCompile() {
CHECK(stencil->scriptData.size() == 1);
CHECK(stencil->scriptData[0].hasSharedData()); // has generated bytecode
CHECK(stencil->scriptData[0].gcThingsLength == 1);
- CHECK(compileStorage.hasInput());
}
{ // propagates failures
@@ -90,9 +86,8 @@ bool testCompile() {
CHECK(srcBuf.init(cx, badSrc.data(), badSrc.length(),
JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileGlobalScriptToStencil(fc, options, srcBuf, compileStorage);
+ CompileGlobalScriptToStencil(fc, options, srcBuf);
CHECK(!stencil);
CHECK(fc->maybeError().isSome());
const js::CompileError& error = fc->maybeError().ref();
@@ -120,9 +115,8 @@ bool testNonsyntacticCompile() {
auto destroyFc =
mozilla::MakeScopeExit([fc] { JS::DestroyFrontendContext(fc); });
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileGlobalScriptToStencil(fc, options, srcBuf, compileStorage);
+ CompileGlobalScriptToStencil(fc, options, srcBuf);
CHECK(stencil);
JS::InstantiateOptions instantiateOptions(options);
@@ -153,9 +147,8 @@ bool testCompileModule() {
CHECK(buf16.init(cx, src_16.data(), src_16.length(),
JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileModuleScriptToStencil(fc, options, buf16, compileStorage);
+ CompileModuleScriptToStencil(fc, options, buf16);
CHECK(stencil);
CHECK(stencil->isModule());
CHECK(stencil->scriptExtra.size() == 1);
@@ -164,7 +157,6 @@ bool testCompileModule() {
CHECK(stencil->scriptData.size() == 1);
CHECK(stencil->scriptData[0].hasSharedData()); // has generated bytecode
CHECK(stencil->scriptData[0].gcThingsLength == 1);
- CHECK(compileStorage.hasInput());
}
{ // 8-bit characters
@@ -172,9 +164,8 @@ bool testCompileModule() {
CHECK(
buf8.init(cx, src.data(), src.length(), JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileModuleScriptToStencil(fc, options, buf8, compileStorage);
+ CompileModuleScriptToStencil(fc, options, buf8);
CHECK(stencil);
CHECK(stencil->scriptExtra.size() == 1);
CHECK(stencil->scriptExtra[0].extent.sourceStart == 0);
@@ -182,7 +173,6 @@ bool testCompileModule() {
CHECK(stencil->scriptData.size() == 1);
CHECK(stencil->scriptData[0].hasSharedData()); // has generated bytecode
CHECK(stencil->scriptData[0].gcThingsLength == 1);
- CHECK(compileStorage.hasInput());
}
{ // propagates failures
@@ -191,9 +181,8 @@ bool testCompileModule() {
CHECK(srcBuf.init(cx, badSrc.data(), badSrc.length(),
JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileModuleScriptToStencil(fc, options, srcBuf, compileStorage);
+ CompileModuleScriptToStencil(fc, options, srcBuf);
CHECK(!stencil);
CHECK(fc->maybeError().isSome());
const js::CompileError& error = fc->maybeError().ref();
@@ -220,15 +209,12 @@ bool testPrepareForInstantiate() {
auto destroyFc =
mozilla::MakeScopeExit([fc] { JS::DestroyFrontendContext(fc); });
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- CompileGlobalScriptToStencil(fc, options, buf16, compileStorage);
+ CompileGlobalScriptToStencil(fc, options, buf16);
CHECK(stencil);
CHECK(stencil->scriptData.size() == 2);
CHECK(stencil->scopeData.size() == 1); // function f
CHECK(stencil->parserAtomData.size() == 1); // 'field'
- CHECK(compileStorage.hasInput());
- CHECK(compileStorage.getInput().atomCache.empty());
JS::InstantiationStorage storage;
CHECK(JS::PrepareForInstantiate(fc, *stencil, storage));
diff --git a/js/src/jsapi-tests/testFrontendCompileStencil.cpp b/js/src/jsapi-tests/testFrontendCompileStencil.cpp
index ba29b5aea2..39e46ba0b1 100644
--- a/js/src/jsapi-tests/testFrontendCompileStencil.cpp
+++ b/js/src/jsapi-tests/testFrontendCompileStencil.cpp
@@ -41,11 +41,9 @@ BEGIN_FRONTEND_TEST(testFrontendContextCompileGlobalScriptToStencil) {
JS::SourceText<mozilla::Utf8Unit> srcBuf;
CHECK(
srcBuf.init(fc, source, strlen(source), JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- JS::CompileGlobalScriptToStencil(fc, options, srcBuf, compileStorage);
+ JS::CompileGlobalScriptToStencil(fc, options, srcBuf);
CHECK(stencil);
- CHECK(compileStorage.hasInput());
}
{
@@ -54,11 +52,9 @@ BEGIN_FRONTEND_TEST(testFrontendContextCompileGlobalScriptToStencil) {
JS::SourceText<char16_t> srcBuf;
CHECK(srcBuf.init(fc, source, std::char_traits<char16_t>::length(source),
JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- JS::CompileGlobalScriptToStencil(fc, options, srcBuf, compileStorage);
+ JS::CompileGlobalScriptToStencil(fc, options, srcBuf);
CHECK(stencil);
- CHECK(compileStorage.hasInput());
}
JS::DestroyFrontendContext(fc);
diff --git a/js/src/jsapi-tests/testFrontendErrors.cpp b/js/src/jsapi-tests/testFrontendErrors.cpp
index c9d3ee1eb5..47acf559d7 100644
--- a/js/src/jsapi-tests/testFrontendErrors.cpp
+++ b/js/src/jsapi-tests/testFrontendErrors.cpp
@@ -51,9 +51,8 @@ BEGIN_TEST(testFrontendErrors_error) {
JS::SourceText<mozilla::Utf8Unit> srcBuf;
CHECK(
srcBuf.init(fc, source, strlen(source), JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- JS::CompileGlobalScriptToStencil(fc, options, srcBuf, compileStorage);
+ JS::CompileGlobalScriptToStencil(fc, options, srcBuf);
CHECK(!stencil);
}
@@ -133,9 +132,8 @@ BEGIN_TEST(testFrontendErrors_warning) {
JS::SourceText<mozilla::Utf8Unit> srcBuf;
CHECK(
srcBuf.init(fc, source, strlen(source), JS::SourceOwnership::Borrowed));
- JS::CompilationStorage compileStorage;
RefPtr<JS::Stencil> stencil =
- JS::CompileGlobalScriptToStencil(fc, options, srcBuf, compileStorage);
+ JS::CompileGlobalScriptToStencil(fc, options, srcBuf);
CHECK(stencil);
}
diff --git a/js/src/jsapi-tests/testStencil.cpp b/js/src/jsapi-tests/testStencil.cpp
index ab89222ebd..7d6b78d7d8 100644
--- a/js/src/jsapi-tests/testStencil.cpp
+++ b/js/src/jsapi-tests/testStencil.cpp
@@ -85,6 +85,8 @@ bool basic_test(const CharT* chars) {
CHECK(srcBuf.init(cx, chars, length, JS::SourceOwnership::Borrowed));
JS::CompileOptions options(cx);
+ options.setFile("testStencil_Module");
+
RefPtr<JS::Stencil> stencil =
JS::CompileModuleScriptToStencil(cx, options, srcBuf);
CHECK(stencil);
diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
index 356a5687a2..77c3ae5f09 100644
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -1289,7 +1289,7 @@ JS_PUBLIC_API void JS_RemoveExtraGCRootsTracer(JSContext* cx,
}
JS_PUBLIC_API JS::GCReason JS::WantEagerMinorGC(JSRuntime* rt) {
- if (rt->gc.nursery().shouldCollect()) {
+ if (rt->gc.nursery().wantEagerCollection()) {
return JS::GCReason::EAGER_NURSERY_COLLECTION;
}
return JS::GCReason::NO_REASON;
@@ -1302,7 +1302,7 @@ JS_PUBLIC_API JS::GCReason JS::WantEagerMajorGC(JSRuntime* rt) {
JS_PUBLIC_API void JS::MaybeRunNurseryCollection(JSRuntime* rt,
JS::GCReason reason) {
gc::GCRuntime& gc = rt->gc;
- if (gc.nursery().shouldCollect()) {
+ if (gc.nursery().wantEagerCollection()) {
gc.minorGC(reason);
}
}
@@ -4966,7 +4966,25 @@ JS_PUBLIC_API bool JS::CopyAsyncStack(JSContext* cx,
return true;
}
-JS_PUBLIC_API Zone* JS::GetObjectZone(JSObject* obj) { return obj->zone(); }
+JS_PUBLIC_API Zone* JS::GetObjectZone(JSObject* obj) {
+ Zone* zone = obj->zone();
+
+ // Check zone pointer is valid and not a poison value. See bug 1878421.
+ MOZ_RELEASE_ASSERT(zone->runtimeFromMainThread());
+
+ return zone;
+}
+
+JS_PUBLIC_API Zone* JS::GetTenuredGCThingZone(GCCellPtr thing) {
+ js::gc::Cell* cell = thing.asCell();
+ MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
+ Zone* zone = js::gc::detail::GetTenuredGCThingZone(cell);
+
+ // Check zone pointer is valid and not a poison value. See bug 1878421.
+ MOZ_RELEASE_ASSERT(zone->runtimeFromMainThread());
+
+ return zone;
+}
JS_PUBLIC_API Zone* JS::GetNurseryCellZone(gc::Cell* cell) {
return cell->nurseryZone();
diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp
index 84921c4d20..7040213f56 100644
--- a/js/src/jsdate.cpp
+++ b/js/src/jsdate.cpp
@@ -56,7 +56,6 @@
#include "vm/JSObject.h"
#include "vm/StringType.h"
#include "vm/Time.h"
-#include "vm/Warnings.h"
#include "vm/Compartment-inl.h" // For js::UnwrapAndTypeCheckThis
#include "vm/GeckoProfiler-inl.h"
@@ -1071,23 +1070,6 @@ int FixupNonFullYear(int year) {
}
template <typename CharT>
-bool IsPrefixOfKeyword(const CharT* s, size_t len, const char* keyword) {
- while (len > 0 && *keyword) {
- MOZ_ASSERT(IsAsciiAlpha(*s));
- MOZ_ASSERT(IsAsciiLowercaseAlpha(*keyword));
-
- if (unicode::ToLowerCase(static_cast<Latin1Char>(*s)) != *keyword) {
- break;
- }
-
- s++, keyword++;
- len--;
- }
-
- return len == 0;
-}
-
-template <typename CharT>
bool MatchesKeyword(const CharT* s, size_t len, const char* keyword) {
while (len > 0) {
MOZ_ASSERT(IsAsciiAlpha(*s));
@@ -1329,10 +1311,6 @@ struct CharsAndAction {
int action;
};
-static constexpr const char* const days_of_week[] = {
- "monday", "tuesday", "wednesday", "thursday",
- "friday", "saturday", "sunday"};
-
static constexpr CharsAndAction keywords[] = {
// clang-format off
// AM/PM
@@ -1365,8 +1343,7 @@ constexpr size_t MinKeywordLength(const CharsAndAction (&keywords)[N]) {
template <typename CharT>
static bool ParseDate(DateTimeInfo::ForceUTC forceUTC, const CharT* s,
- size_t length, ClippedTime* result,
- bool* countLateWeekday) {
+ size_t length, ClippedTime* result) {
if (length == 0) {
return false;
}
@@ -1434,8 +1411,6 @@ static bool ParseDate(DateTimeInfo::ForceUTC forceUTC, const CharT* s,
bool negativeYear = false;
// Includes "GMT", "UTC", "UT", and "Z" timezone keywords
bool seenGmtAbbr = false;
- // For telemetry purposes
- bool seenLateWeekday = false;
// Try parsing the leading dashed-date.
//
@@ -1667,21 +1642,6 @@ static bool ParseDate(DateTimeInfo::ForceUTC forceUTC, const CharT* s,
return false;
}
- // Completely ignore days of the week, and don't derive any semantics
- // from them.
- bool isLateWeekday = false;
- for (const char* weekday : days_of_week) {
- if (IsPrefixOfKeyword(s + start, index - start, weekday)) {
- isLateWeekday = true;
- seenLateWeekday = true;
- break;
- }
- }
- if (isLateWeekday) {
- prevc = 0;
- continue;
- }
-
// Record a month if it is a month name. Note that some numbers are
// initially treated as months; if a numeric field has already been
// interpreted as a month, store that value to the actually appropriate
@@ -1882,48 +1842,16 @@ static bool ParseDate(DateTimeInfo::ForceUTC forceUTC, const CharT* s,
date += tzOffset * msPerMinute;
}
- // Setting this down here so that it only counts the telemetry in
- // the case of a successful parse.
- if (seenLateWeekday) {
- *countLateWeekday = true;
- }
-
*result = TimeClip(date);
return true;
}
static bool ParseDate(DateTimeInfo::ForceUTC forceUTC, JSLinearString* s,
- ClippedTime* result, JSContext* cx) {
- bool countLateWeekday = false;
- bool success;
-
- {
- AutoCheckCannotGC nogc;
- success = s->hasLatin1Chars()
- ? ParseDate(forceUTC, s->latin1Chars(nogc), s->length(),
- result, &countLateWeekday)
- : ParseDate(forceUTC, s->twoByteChars(nogc), s->length(),
- result, &countLateWeekday);
- }
-
- // We are running telemetry to see if support for day of week after
- // mday can be dropped. It is being done here to keep
- // JSRuntime::setUseCounter out of AutoCheckCannotGC's scope.
- if (countLateWeekday) {
- cx->runtime()->setUseCounter(cx->global(), JSUseCounter::LATE_WEEKDAY);
-
- if (!cx->realm()->warnedAboutDateLateWeekday) {
- if (!WarnNumberASCII(cx, JSMSG_DEPRECATED_LATE_WEEKDAY)) {
- // Proceed as if nothing happened if warning fails
- if (cx->isExceptionPending()) {
- cx->clearPendingException();
- }
- }
- cx->realm()->warnedAboutDateLateWeekday = true;
- }
- }
-
- return success;
+ ClippedTime* result) {
+ AutoCheckCannotGC nogc;
+ return s->hasLatin1Chars()
+ ? ParseDate(forceUTC, s->latin1Chars(nogc), s->length(), result)
+ : ParseDate(forceUTC, s->twoByteChars(nogc), s->length(), result);
}
static bool date_parse(JSContext* cx, unsigned argc, Value* vp) {
@@ -1945,7 +1873,7 @@ static bool date_parse(JSContext* cx, unsigned argc, Value* vp) {
}
ClippedTime result;
- if (!ParseDate(ForceUTC(cx->realm()), linearStr, &result, cx)) {
+ if (!ParseDate(ForceUTC(cx->realm()), linearStr, &result)) {
args.rval().setNaN();
return true;
}
@@ -3789,7 +3717,7 @@ static bool DateOneArgument(JSContext* cx, const CallArgs& args) {
return false;
}
- if (!ParseDate(ForceUTC(cx->realm()), linearStr, &t, cx)) {
+ if (!ParseDate(ForceUTC(cx->realm()), linearStr, &t)) {
t = ClippedTime::invalid();
}
} else {
diff --git a/js/src/moz.build b/js/src/moz.build
index 422bb9d40f..3ec9c6f489 100644
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -641,9 +641,10 @@ if CONFIG["JS_HAS_CTYPES"]:
CXXFLAGS += CONFIG["MOZ_FFI_CFLAGS"]
if CONFIG["JS_HAS_INTL_API"]:
- DIRS += [
- "../../intl/bidi",
- "../../intl/components",
- ]
+ if CONFIG["JS_STANDALONE"]:
+ DIRS += [
+ "../../intl/bidi",
+ "../../intl/components",
+ ]
USE_LIBS += ["intlcomponents"]
diff --git a/js/src/old-configure.in b/js/src/old-configure.in
index c7aa44b3f8..6861d2bbac 100644
--- a/js/src/old-configure.in
+++ b/js/src/old-configure.in
@@ -30,10 +30,6 @@ dnl Set the minimum version of toolkit libs used by mozilla
dnl ========================================================
W32API_VERSION=3.14
-dnl Set various checks
-dnl ========================================================
-MISSING_X=
-
dnl Initialize the Pthread test variables early so they can be
dnl overridden by each platform.
dnl ========================================================
@@ -111,7 +107,7 @@ case "$target" in
AC_DEFINE(_CRT_NONSTDC_NO_WARNINGS)
MSVC_C_RUNTIME_DLL=vcruntime140.dll
- if test -n "$IS_VS2019_OR_MORE" -a "$TARGET_CPU" != "x86"; then
+ if test "$TARGET_CPU" != "x86"; then
MSVC_C_RUNTIME_1_DLL=vcruntime140_1.dll
fi
MSVC_CXX_RUNTIME_DLL=msvcp140.dll
@@ -250,30 +246,12 @@ dnl ========================================================
dnl System overrides of the defaults for target
dnl ========================================================
+MOZ_OPTIMIZE_LDFLAGS="${_COMPILATION_OPTIMIZE_LDFLAGS} ${MOZ_OPTIMIZE_FLAGS}"
+
case "$target" in
*-darwin*)
MOZ_OPTIMIZE_FLAGS="-O3"
DSO_LDOPTS=''
-
- dnl DTrace and -dead_strip don't interact well. See bug 403132.
- dnl ===================================================================
- if test "x$enable_dtrace" = "xyes"; then
- echo "Skipping -dead_strip because DTrace is enabled. See bug 403132."
- else
- dnl check for the presence of the -dead_strip linker flag
- AC_MSG_CHECKING([for -dead_strip option to ld])
- _SAVE_LDFLAGS=$LDFLAGS
- LDFLAGS="$LDFLAGS -Wl,-dead_strip"
- AC_TRY_LINK(,[return 0;],_HAVE_DEAD_STRIP=1,_HAVE_DEAD_STRIP=)
- if test -n "$_HAVE_DEAD_STRIP" ; then
- AC_MSG_RESULT([yes])
- MOZ_OPTIMIZE_LDFLAGS="-Wl,-dead_strip"
- else
- AC_MSG_RESULT([no])
- fi
-
- LDFLAGS=$_SAVE_LDFLAGS
- fi
MOZ_FIX_LINK_PATHS=
;;
@@ -458,12 +436,10 @@ fi
if test -z "$SKIP_COMPILER_CHECKS"; then
dnl Checks for typedefs, structures, and compiler characteristics.
dnl ========================================================
-AC_C_CONST
AC_TYPE_MODE_T
AC_TYPE_OFF_T
AC_TYPE_PID_T
-AC_TYPE_SIZE_T
-AC_LANG_CPLUSPLUS
+
AC_LANG_C
AC_MSG_CHECKING(for ssize_t)
AC_CACHE_VAL(ac_cv_type_ssize_t,
@@ -619,75 +595,6 @@ then
fi
-dnl Checks for library functions.
-dnl ========================================================
-
-dnl check for clock_gettime(), the CLOCK_MONOTONIC clock
-AC_CACHE_CHECK(for clock_gettime(CLOCK_MONOTONIC),
- ac_cv_clock_monotonic,
- [for libs in "" -lrt; do
- _SAVE_LIBS="$LIBS"
- LIBS="$LIBS $libs"
-dnl clock_gettime is available on OSX since 10.12, so depending on MACOSX_DEPLOYMENT_TARGET,
-dnl we should or not be able to use it. To detect if we can, we need to make the
-dnl availability attribute strict, so that compilation fails when the target is < 10.12.
- AC_TRY_LINK([#define availability(os, ...) availability(os, strict, __VA_ARGS)
- #include <time.h>],
- [ struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts); ],
- ac_cv_clock_monotonic=$libs
- LIBS="$_SAVE_LIBS"
- break,
- ac_cv_clock_monotonic=no)
- LIBS="$_SAVE_LIBS"
- done])
-if test "$ac_cv_clock_monotonic" != "no"; then
- HAVE_CLOCK_MONOTONIC=1
- REALTIME_LIBS=$ac_cv_clock_monotonic
- AC_DEFINE(HAVE_CLOCK_MONOTONIC)
- AC_SUBST(HAVE_CLOCK_MONOTONIC)
- AC_SUBST_LIST(REALTIME_LIBS)
-fi
-
-dnl Checks for math functions.
-dnl ========================================================
-AC_CHECK_LIB(m, sin)
-
-AC_CACHE_CHECK(
- [for res_ninit()],
- ac_cv_func_res_ninit,
- [if test "$OS_TARGET" = NetBSD -o "$OS_TARGET" = OpenBSD; then
- dnl no need for res_ninit() on NetBSD and OpenBSD
- ac_cv_func_res_ninit=no
- else
- AC_TRY_LINK([
- #ifdef linux
- #define _BSD_SOURCE 1
- #endif
- #include <sys/types.h>
- #include <netinet/in.h>
- #include <arpa/nameser.h>
- #include <resolv.h>
- ],
- [int foo = res_ninit(&_res);],
- [ac_cv_func_res_ninit=yes],
- [ac_cv_func_res_ninit=no])
- fi
- ])
-
-if test "$ac_cv_func_res_ninit" = "yes"; then
- AC_DEFINE(HAVE_RES_NINIT)
-dnl must add the link line we do something as foolish as this... dougt
-dnl else
-dnl AC_CHECK_LIB(bind, res_ninit, AC_DEFINE(HAVE_RES_NINIT),
-dnl AC_CHECK_LIB(resolv, res_ninit, AC_DEFINE(HAVE_RES_NINIT)))
-fi
-
-AM_LANGINFO_CODESET
-
-AC_LANG_C
-
-dnl ===================================================================
dnl ========================================================
dnl Put your C++ language/feature checks below
dnl ========================================================
@@ -719,35 +626,6 @@ if test "$GNU_CC"; then
TARGET_COMPILER_ABI="${TARGET_COMPILER_ABI-${ARM_ABI_PREFIX}gcc3}"
fi
-# try harder, when checking for __thread support, see bug 521750 comment #33 and below
-# We pass MOZ_OPTIMIZE_LDFLAGS to the linker because if dead_strip is
-# enabled, the linker in xcode 4.1 will crash. Without this it would crash when
-# linking XUL.
-_SAVE_LDFLAGS=$LDFLAGS
-LDFLAGS="$LDFLAGS $DSO_LDOPTS $MOZ_OPTIMIZE_LDFLAGS"
-AC_CACHE_CHECK(for __thread keyword for TLS variables,
- ac_cv_thread_keyword,
- [AC_TRY_LINK([__thread bool tlsIsMainThread = false;],
- [return tlsIsMainThread;],
- ac_cv_thread_keyword=yes,
- ac_cv_thread_keyword=no)])
-LDFLAGS=$_SAVE_LDFLAGS
-if test "$ac_cv_thread_keyword" = yes; then
- # mips builds fail with TLS variables because of a binutils bug.
- # See bug 528687
- case "${target}" in
- mips*-*)
- :
- ;;
- *-android*|*-linuxandroid*)
- :
- ;;
- *)
- AC_DEFINE(HAVE_THREAD_TLS_KEYWORD)
- ;;
- esac
-fi
-
dnl End of C++ language/feature checks
AC_LANG_C
@@ -834,9 +712,6 @@ else
fi
if test "$MOZ_MEMORY"; then
- dnl The generic feature tests that determine how to compute ncpus are long and
- dnl complicated. Therefore, simply define special cpp variables for the
- dnl platforms we have special knowledge of.
case "${target}" in
*-mingw*)
export MOZ_NO_DEBUG_RTL=1
diff --git a/js/src/shell/ModuleLoader.cpp b/js/src/shell/ModuleLoader.cpp
index aca109cbcd..9bf1c015ac 100644
--- a/js/src/shell/ModuleLoader.cpp
+++ b/js/src/shell/ModuleLoader.cpp
@@ -122,7 +122,7 @@ bool ModuleLoader::ImportModuleDynamically(JSContext* cx,
bool ModuleLoader::loadRootModule(JSContext* cx, HandleString path) {
RootedValue rval(cx);
- if (!loadAndExecute(cx, path, &rval)) {
+ if (!loadAndExecute(cx, path, nullptr, &rval)) {
return false;
}
@@ -156,8 +156,9 @@ void ModuleLoader::clearModules(JSContext* cx) {
}
bool ModuleLoader::loadAndExecute(JSContext* cx, HandleString path,
+ HandleObject moduleRequestArg,
MutableHandleValue rval) {
- RootedObject module(cx, loadAndParse(cx, path));
+ RootedObject module(cx, loadAndParse(cx, path, moduleRequestArg));
if (!module) {
return false;
}
@@ -178,7 +179,7 @@ JSObject* ModuleLoader::resolveImportedModule(
return nullptr;
}
- return loadAndParse(cx, path);
+ return loadAndParse(cx, path, moduleRequest);
}
bool ModuleLoader::populateImportMeta(JSContext* cx,
@@ -328,7 +329,7 @@ bool ModuleLoader::tryDynamicImport(JSContext* cx,
return false;
}
- return loadAndExecute(cx, path, rval);
+ return loadAndExecute(cx, path, moduleRequest, rval);
}
JSLinearString* ModuleLoader::resolve(JSContext* cx,
@@ -418,7 +419,8 @@ JSLinearString* ModuleLoader::resolve(JSContext* cx, HandleString specifier,
return normalizePath(cx, linear);
}
-JSObject* ModuleLoader::loadAndParse(JSContext* cx, HandleString pathArg) {
+JSObject* ModuleLoader::loadAndParse(JSContext* cx, HandleString pathArg,
+ JS::HandleObject moduleRequestArg) {
Rooted<JSLinearString*> path(cx, JS_EnsureLinearString(cx, pathArg));
if (!path) {
return nullptr;
@@ -461,17 +463,40 @@ JSObject* ModuleLoader::loadAndParse(JSContext* cx, HandleString pathArg) {
return nullptr;
}
- module = JS::CompileModule(cx, options, srcBuf);
- if (!module) {
- return nullptr;
+ JS::ModuleType moduleType = JS::ModuleType::JavaScript;
+ if (moduleRequestArg) {
+ Rooted<ModuleRequestObject*> moduleRequest(
+ cx, &moduleRequestArg->as<ModuleRequestObject>());
+ if (!ModuleRequestObject::getModuleType(cx, moduleRequest, moduleType)) {
+ return nullptr;
+ }
}
- RootedObject info(cx, js::CreateScriptPrivate(cx, path));
- if (!info) {
- return nullptr;
- }
+ switch (moduleType) {
+ case JS::ModuleType::Unknown:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_MODULE_TYPE);
+ return nullptr;
+ case JS::ModuleType::JavaScript: {
+ module = JS::CompileModule(cx, options, srcBuf);
+ if (!module) {
+ return nullptr;
+ }
+
+ RootedObject info(cx, js::CreateScriptPrivate(cx, path));
+ if (!info) {
+ return nullptr;
+ }
- JS::SetModulePrivate(module, ObjectValue(*info));
+ JS::SetModulePrivate(module, ObjectValue(*info));
+ } break;
+ case JS::ModuleType::JSON:
+ module = JS::CompileJsonModule(cx, options, srcBuf);
+ if (!module) {
+ return nullptr;
+ }
+ break;
+ }
if (!addModuleToRegistry(cx, path, module)) {
return nullptr;
diff --git a/js/src/shell/ModuleLoader.h b/js/src/shell/ModuleLoader.h
index 276e199661..c61ca755bc 100644
--- a/js/src/shell/ModuleLoader.h
+++ b/js/src/shell/ModuleLoader.h
@@ -42,7 +42,8 @@ class ModuleLoader {
static bool DynamicImportDelayRejected(JSContext* cx, unsigned argc,
Value* vp);
- bool loadAndExecute(JSContext* cx, HandleString path, MutableHandleValue);
+ bool loadAndExecute(JSContext* cx, HandleString path,
+ HandleObject moduleRequestArg, MutableHandleValue);
JSObject* resolveImportedModule(JSContext* cx, HandleValue referencingPrivate,
HandleObject moduleRequest);
bool populateImportMeta(JSContext* cx, HandleValue privateValue,
@@ -58,7 +59,8 @@ class ModuleLoader {
bool tryDynamicImport(JSContext* cx, HandleValue referencingPrivate,
HandleObject moduleRequest, HandleObject promise,
MutableHandleValue rval);
- JSObject* loadAndParse(JSContext* cx, HandleString path);
+ JSObject* loadAndParse(JSContext* cx, HandleString path,
+ HandleObject moduleRequestArg);
bool lookupModuleInRegistry(JSContext* cx, HandleString path,
MutableHandleObject moduleOut);
bool addModuleToRegistry(JSContext* cx, HandleString path,
diff --git a/js/src/shell/ShellModuleObjectWrapper.cpp b/js/src/shell/ShellModuleObjectWrapper.cpp
index 9f1b9fd59c..4e8c234e61 100644
--- a/js/src/shell/ShellModuleObjectWrapper.cpp
+++ b/js/src/shell/ShellModuleObjectWrapper.cpp
@@ -315,12 +315,12 @@ bool ShellModuleNativeWrapperGetter(JSContext* cx, const JS::CallArgs& args,
DEFINE_GETTER_FUNCTIONS(ModuleRequestObject, specifier, StringOrNullValue,
IdentFilter)
-DEFINE_GETTER_FUNCTIONS(ModuleRequestObject, assertions, ObjectOrNullValue,
+DEFINE_GETTER_FUNCTIONS(ModuleRequestObject, attributes, ObjectOrNullValue,
IdentFilter)
static const JSPropertySpec ShellModuleRequestObjectWrapper_accessors[] = {
JS_PSG("specifier", ShellModuleRequestObjectWrapper_specifierGetter, 0),
- JS_PSG("assertions", ShellModuleRequestObjectWrapper_assertionsGetter, 0),
+ JS_PSG("assertions", ShellModuleRequestObjectWrapper_attributesGetter, 0),
JS_PS_END};
DEFINE_GETTER_FUNCTIONS(ImportEntry, moduleRequest, ObjectOrNullValue,
diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp
index 03e9e0c109..624b8217c3 100644
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -11,6 +11,7 @@
#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF, MOZ_RELEASE_ASSERT, MOZ_CRASH
#include "mozilla/Atomics.h"
#include "mozilla/Attributes.h"
+#include "mozilla/Compression.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/EnumSet.h"
#include "mozilla/IntegerPrintfMacros.h"
@@ -127,7 +128,7 @@
#include "js/ErrorReport.h" // JS::PrintError
#include "js/Exception.h" // JS::StealPendingExceptionStack
#include "js/experimental/CodeCoverage.h" // js::EnableCodeCoverage
-#include "js/experimental/CompileScript.h" // JS::NewFrontendContext, JS::DestroyFrontendContext, JS::HadFrontendErrors, JS::ConvertFrontendErrorsToRuntimeErrors, JS::CompileGlobalScriptToStencil, JS::CompileModuleScriptToStencil, JS::CompilationStorage
+#include "js/experimental/CompileScript.h" // JS::NewFrontendContext, JS::DestroyFrontendContext, JS::HadFrontendErrors, JS::ConvertFrontendErrorsToRuntimeErrors, JS::CompileGlobalScriptToStencil, JS::CompileModuleScriptToStencil
#include "js/experimental/CTypes.h" // JS::InitCTypesClass
#include "js/experimental/Intl.h" // JS::AddMoz{DateTimeFormat,DisplayNames}Constructor
#include "js/experimental/JitInfo.h" // JSJit{Getter,Setter,Method}CallArgs, JSJitGetterInfo, JSJit{Getter,Setter}Op, JSJitInfo
@@ -213,6 +214,8 @@
#include "vm/Realm-inl.h"
#include "vm/Stack-inl.h"
+#undef compress
+
using namespace js;
using namespace js::cli;
using namespace js::shell;
@@ -637,15 +640,11 @@ void OffThreadJob::run() {
switch (kind_) {
case Kind::CompileScript: {
- JS::CompilationStorage compileStorage;
- stencil_ = JS::CompileGlobalScriptToStencil(fc_, options_, srcBuf_,
- compileStorage);
+ stencil_ = JS::CompileGlobalScriptToStencil(fc_, options_, srcBuf_);
break;
}
case Kind::CompileModule: {
- JS::CompilationStorage compileStorage;
- stencil_ = JS::CompileModuleScriptToStencil(fc_, options_, srcBuf_,
- compileStorage);
+ stencil_ = JS::CompileModuleScriptToStencil(fc_, options_, srcBuf_);
break;
}
case Kind::Decode: {
@@ -724,12 +723,6 @@ bool shell::enableWasm = false;
bool shell::enableSharedMemory = SHARED_MEMORY_DEFAULT;
bool shell::enableWasmBaseline = false;
bool shell::enableWasmOptimizing = false;
-
-#define WASM_FEATURE(NAME, _, STAGE, ...) \
- bool shell::enableWasm##NAME = STAGE != WasmFeatureStage::Experimental;
-JS_FOR_WASM_FEATURES(WASM_FEATURE);
-#undef WASM_FEATURE
-
bool shell::enableWasmVerbose = false;
bool shell::enableTestWasmAwaitTier2 = false;
bool shell::enableSourcePragmas = true;
@@ -1538,7 +1531,6 @@ static bool BoundToAsyncStack(JSContext* cx, unsigned argc, Value* vp) {
}
RootedString causeString(cx, ToString(cx, v));
if (!causeString) {
- MOZ_ASSERT(cx->isExceptionPending());
return false;
}
@@ -9004,6 +8996,121 @@ static bool IsValidJSON(JSContext* cx, unsigned argc, Value* vp) {
return true;
}
+// Quick file format for a LZ4 compressed file
+static constexpr uint32_t LZ4MagicHeader = -1;
+// A magic word and a length field
+static constexpr size_t LZ4HeaderSize = sizeof(uint32_t) * 2;
+static constexpr size_t LZ4MaxSize = UINT32_MAX;
+
+static bool CompressLZ4(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedObject callee(cx, &args.callee());
+
+ if (!args.get(0).isObject() ||
+ !args.get(0).toObject().is<ArrayBufferObject>()) {
+ ReportUsageErrorASCII(cx, callee, "First argument must be an ArrayBuffer");
+ return false;
+ }
+
+ JS::Rooted<ArrayBufferObject*> bytes(
+ cx, &args.get(0).toObject().as<ArrayBufferObject>());
+ size_t byteLength = bytes->byteLength();
+ if (byteLength > LZ4MaxSize) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Create a buffer big enough for the header and the max amount of compressed
+ // bytes.
+ size_t outputCapacity =
+ LZ4HeaderSize + mozilla::Compression::LZ4::maxCompressedSize(byteLength);
+
+ mozilla::UniquePtr<void, JS::FreePolicy> output(js_malloc(outputCapacity));
+ if (!output) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Write the magic header word and decompressed size in bytes.
+ ((uint32_t*)(output.get()))[0] = LZ4MagicHeader;
+ ((uint32_t*)(output.get()))[1] = byteLength;
+
+ // Compress the bytes into the output
+ char* compressedBytesStart = ((char*)output.get()) + LZ4HeaderSize;
+ size_t compressedBytesLength = mozilla::Compression::LZ4::compress(
+ (const char*)bytes->dataPointer(), byteLength, compressedBytesStart);
+ size_t outputLength = compressedBytesLength + LZ4HeaderSize;
+
+ // Create an ArrayBuffer wrapping the compressed bytes
+ JSObject* outputArrayBuffer =
+ NewArrayBufferWithContents(cx, outputLength, std::move(output));
+ if (!outputArrayBuffer) {
+ return false;
+ }
+
+ args.rval().setObject(*outputArrayBuffer);
+ return true;
+}
+
+static bool DecompressLZ4(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedObject callee(cx, &args.callee());
+
+ if (!args.get(0).isObject() ||
+ !args.get(0).toObject().is<ArrayBufferObject>()) {
+ ReportUsageErrorASCII(cx, callee, "First argument must be an ArrayBuffer");
+ return false;
+ }
+
+ JS::Rooted<ArrayBufferObject*> bytes(
+ cx, &args.get(0).toObject().as<ArrayBufferObject>());
+ size_t byteLength = bytes->byteLength();
+ if (byteLength < LZ4HeaderSize) {
+ JS_ReportErrorASCII(cx, "Invalid LZ4 buffer");
+ return false;
+ }
+
+ // Check the magic header and get the decompressed byte length.
+ uint32_t magicHeader = ((uint32_t*)(bytes->dataPointer()))[0];
+ uint32_t decompressedBytesLength = ((uint32_t*)(bytes->dataPointer()))[1];
+ if (magicHeader != LZ4MagicHeader) {
+ JS_ReportErrorASCII(cx, "Invalid magic header");
+ return false;
+ }
+
+ // Allocate a buffer to store the decompressed bytes.
+ mozilla::UniquePtr<void, JS::FreePolicy> decompressedBytes(
+ js_malloc(decompressedBytesLength));
+ if (!decompressedBytes) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Decompress the bytes into the output
+ const char* compressedBytesStart =
+ ((const char*)bytes->dataPointer()) + LZ4HeaderSize;
+ size_t compressedBytesLength = byteLength - LZ4HeaderSize;
+ size_t actualDecompressedBytesLength = 0;
+ if (!mozilla::Compression::LZ4::decompress(
+ compressedBytesStart, compressedBytesLength,
+ (char*)decompressedBytes.get(), decompressedBytesLength,
+ &actualDecompressedBytesLength) ||
+ actualDecompressedBytesLength != decompressedBytesLength) {
+ JS_ReportErrorASCII(cx, "Invalid LZ4 buffer");
+ return false;
+ }
+
+ // Create an ArrayBuffer wrapping the decompressed bytes
+ JSObject* outputArrayBuffer = NewArrayBufferWithContents(
+ cx, decompressedBytesLength, std::move(decompressedBytes));
+ if (!outputArrayBuffer) {
+ return false;
+ }
+
+ args.rval().setObject(*outputArrayBuffer);
+ return true;
+}
+
// clang-format off
static const JSFunctionSpecWithHelp shell_functions[] = {
JS_FN_HELP("options", Options, 0, 0,
@@ -9675,6 +9782,14 @@ JS_FN_HELP("createUserArrayBuffer", CreateUserArrayBuffer, 1, 0,
"isValidJSON(source)",
" Returns true if the given source is valid JSON."),
+ JS_FN_HELP("compressLZ4", CompressLZ4, 1, 0,
+"compressLZ4(bytes)",
+" Return a compressed copy of bytes using LZ4."),
+
+ JS_FN_HELP("decompressLZ4", DecompressLZ4, 1, 0,
+"decompressLZ4(bytes)",
+" Return a decompressed copy of bytes using LZ4."),
+
JS_FS_HELP_END
};
// clang-format on
@@ -11015,9 +11130,6 @@ static void SetWorkerContextOptions(JSContext* cx) {
.setWasm(enableWasm)
.setWasmBaseline(enableWasmBaseline)
.setWasmIon(enableWasmOptimizing)
-#define WASM_FEATURE(NAME, ...) .setWasm##NAME(enableWasm##NAME)
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
.setWasmVerbose(enableWasmVerbose)
.setTestWasmAwaitTier2(enableTestWasmAwaitTier2)
@@ -11453,21 +11565,34 @@ static bool ParsePrefValue(const char* name, const char* val, T* result) {
}
}
-static bool SetJSPref(const char* pref) {
- const char* assign = strchr(pref, '=');
- if (!assign) {
- fprintf(stderr, "Missing '=' for --setpref\n");
- return false;
+static bool SetJSPrefToTrueForBool(const char* name) {
+ // Search for a matching pref and try to set it to a default value for the
+ // type.
+#define CHECK_PREF(NAME, CPP_NAME, TYPE, SETTER, IS_STARTUP_PREF) \
+ if (strcmp(name, NAME) == 0) { \
+ if constexpr (std::is_same_v<TYPE, bool>) { \
+ JS::Prefs::SETTER(true); \
+ return true; \
+ } else { \
+ fprintf(stderr, "Pref %s must have a value specified.\n", name); \
+ return false; \
+ } \
}
+ FOR_EACH_JS_PREF(CHECK_PREF)
+#undef CHECK_PREF
- size_t nameLen = assign - pref;
- const char* valStart = assign + 1; // Skip '='.
+ // Nothing matched, return false
+ fprintf(stderr, "Invalid pref name: %s\n", name);
+ return false;
+}
- // Search for a matching pref and try to set it.
+static bool SetJSPrefToValue(const char* name, size_t nameLen,
+ const char* value) {
+ // Search for a matching pref and try to set it to the provided value.
#define CHECK_PREF(NAME, CPP_NAME, TYPE, SETTER, IS_STARTUP_PREF) \
- if (nameLen == strlen(NAME) && memcmp(pref, NAME, strlen(NAME)) == 0) { \
+ if (nameLen == strlen(NAME) && memcmp(name, NAME, strlen(NAME)) == 0) { \
TYPE v; \
- if (!ParsePrefValue<TYPE>(NAME, valStart, &v)) { \
+ if (!ParsePrefValue<TYPE>(NAME, value, &v)) { \
return false; \
} \
JS::Prefs::SETTER(v); \
@@ -11476,10 +11601,29 @@ static bool SetJSPref(const char* pref) {
FOR_EACH_JS_PREF(CHECK_PREF)
#undef CHECK_PREF
- fprintf(stderr, "Invalid pref name: %s\n", pref);
+ // Nothing matched, return false
+ fprintf(stderr, "Invalid pref name: %s\n", name);
return false;
}
+static bool SetJSPref(const char* pref) {
+ const char* assign = strchr(pref, '=');
+ if (!assign) {
+ if (!SetJSPrefToTrueForBool(pref)) {
+ return false;
+ }
+ return true;
+ }
+
+ size_t nameLen = assign - pref;
+ const char* valStart = assign + 1; // Skip '='.
+
+ if (!SetJSPrefToValue(pref, nameLen, valStart)) {
+ return false;
+ }
+ return true;
+}
+
static void ListJSPrefs() {
auto printPref = [](const char* name, auto defaultVal) {
using T = decltype(defaultVal);
@@ -11832,20 +11976,8 @@ bool InitOptionParser(OptionParser& op) {
!op.addBoolOption('\0', "test-wasm-await-tier2",
"Forcibly activate tiering and block "
"instantiation on completion of tier2") ||
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, FLAG_FUZZ_ON, SHELL, ...) \
- !op.addBoolOption('\0', "no-wasm-" SHELL, \
- STAGE == WasmFeatureStage::Experimental \
- ? "No-op." \
- : "Disable wasm " SHELL " feature.") || \
- !op.addBoolOption('\0', "wasm-" SHELL, \
- STAGE == WasmFeatureStage::Experimental \
- ? "Enable wasm " SHELL " feature." \
- : "No-op.") ||
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
- !op.addBoolOption('\0', "no-native-regexp",
- "Disable native regexp compilation") ||
+ !op.addBoolOption('\0', "no-native-regexp",
+ "Disable native regexp compilation") ||
!op.addIntOption(
'\0', "regexp-warmup-threshold", "COUNT",
"Wait for COUNT invocations before compiling regexps to native code "
@@ -12206,14 +12338,29 @@ bool InitOptionParser(OptionParser& op) {
#endif
!op.addStringOption('\0', "telemetry-dir", "[directory]",
"Output telemetry results in a directory") ||
- !op.addMultiStringOption('\0', "setpref", "name=val",
- "Set the value of a JS pref. Use --list-prefs "
+ !op.addMultiStringOption('P', "setpref", "name[=val]",
+ "Set the value of a JS pref. The value may "
+ "be omitted for boolean prefs, in which case "
+ "they default to true. Use --list-prefs "
"to print all pref names.") ||
!op.addBoolOption(
'\0', "list-prefs",
"Print list of prefs that can be set with --setpref.") ||
!op.addBoolOption('\0', "use-fdlibm-for-sin-cos-tan",
- "Use fdlibm for Math.sin, Math.cos, and Math.tan")) {
+ "Use fdlibm for Math.sin, Math.cos, and Math.tan") ||
+ !op.addBoolOption('\0', "wasm-gc", "Enable WebAssembly gc proposal.") ||
+ !op.addBoolOption('\0', "wasm-relaxed-simd",
+ "Enable WebAssembly relaxed-simd proposal.") ||
+ !op.addBoolOption('\0', "wasm-multi-memory",
+ "Enable WebAssembly multi-memory proposal.") ||
+ !op.addBoolOption('\0', "wasm-memory-control",
+ "Enable WebAssembly memory-control proposal.") ||
+ !op.addBoolOption('\0', "wasm-memory64",
+ "Enable WebAssembly memory64 proposal.") ||
+ !op.addBoolOption('\0', "wasm-tail-calls",
+ "Enable WebAssembly tail-calls proposal.") ||
+ !op.addBoolOption('\0', "wasm-js-string-builtins",
+ "Enable WebAssembly js-string-builtins proposal.")) {
return false;
}
@@ -12234,36 +12381,60 @@ bool SetGlobalOptionsPreJSInit(const OptionParser& op) {
// Override pref values for prefs that have a custom shell flag.
// If you're adding a new feature, consider using --setpref instead.
- JS::Prefs::setAtStartup_array_grouping(
- !op.getBoolOption("disable-array-grouping"));
- JS::Prefs::setAtStartup_arraybuffer_transfer(
- !op.getBoolOption("disable-arraybuffer-transfer"));
- JS::Prefs::set_experimental_shadow_realms(
- op.getBoolOption("enable-shadow-realms"));
- JS::Prefs::setAtStartup_well_formed_unicode_strings(
- !op.getBoolOption("disable-well-formed-unicode-strings"));
+ if (op.getBoolOption("disable-array-grouping")) {
+ JS::Prefs::setAtStartup_array_grouping(false);
+ }
+ if (op.getBoolOption("disable-arraybuffer-transfer")) {
+ JS::Prefs::setAtStartup_arraybuffer_transfer(false);
+ }
+ if (op.getBoolOption("enable-shadow-realms")) {
+ JS::Prefs::set_experimental_shadow_realms(true);
+ }
+ if (op.getBoolOption("disable-well-formed-unicode-strings")) {
+ JS::Prefs::setAtStartup_well_formed_unicode_strings(false);
+ }
#ifdef NIGHTLY_BUILD
- JS::Prefs::setAtStartup_experimental_arraybuffer_resizable(
- op.getBoolOption("enable-arraybuffer-resizable"));
- JS::Prefs::setAtStartup_experimental_sharedarraybuffer_growable(
- op.getBoolOption("enable-arraybuffer-resizable"));
- JS::Prefs::setAtStartup_experimental_iterator_helpers(
- op.getBoolOption("enable-iterator-helpers"));
- JS::Prefs::setAtStartup_experimental_new_set_methods(
- op.getBoolOption("enable-new-set-methods"));
- JS::Prefs::setAtStartup_experimental_symbols_as_weakmap_keys(
- op.getBoolOption("enable-symbols-as-weakmap-keys"));
+ if (op.getBoolOption("enable-arraybuffer-resizable")) {
+ JS::Prefs::setAtStartup_experimental_arraybuffer_resizable(true);
+ JS::Prefs::setAtStartup_experimental_sharedarraybuffer_growable(true);
+ }
+ if (op.getBoolOption("enable-iterator-helpers")) {
+ JS::Prefs::setAtStartup_experimental_iterator_helpers(true);
+ }
+ if (op.getBoolOption("enable-new-set-methods")) {
+ JS::Prefs::setAtStartup_experimental_new_set_methods(true);
+ }
+ if (op.getBoolOption("enable-symbols-as-weakmap-keys")) {
+ JS::Prefs::setAtStartup_experimental_symbols_as_weakmap_keys(true);
+ }
#endif
- JS::Prefs::setAtStartup_weakrefs(!op.getBoolOption("disable-weak-refs"));
+ if (op.getBoolOption("disable-weak-refs")) {
+ JS::Prefs::setAtStartup_weakrefs(false);
+ }
JS::Prefs::setAtStartup_experimental_weakrefs_expose_cleanupSome(true);
- JS::Prefs::setAtStartup_destructuring_fuse(
- !op.getBoolOption("disable-destructuring-fuse"));
+ if (op.getBoolOption("disable-destructuring-fuse")) {
+ JS::Prefs::setAtStartup_destructuring_fuse(false);
+ }
+ if (op.getBoolOption("disable-property-error-message-fix")) {
+ JS::Prefs::setAtStartup_property_error_message_fix(false);
+ }
+
JS::Prefs::set_use_fdlibm_for_sin_cos_tan(
op.getBoolOption("use-fdlibm-for-sin-cos-tan"));
- JS::Prefs::setAtStartup_property_error_message_fix(
- !op.getBoolOption("disable-property-error-message-fix"));
+
+ if (op.getBoolOption("wasm-gc") || op.getBoolOption("wasm-relaxed-simd") ||
+ op.getBoolOption("wasm-multi-memory") ||
+ op.getBoolOption("wasm-memory-control") ||
+ op.getBoolOption("wasm-memory64") ||
+ op.getBoolOption("wasm-tail-calls") ||
+ op.getBoolOption("wasm-js-string-builtins")) {
+ fprintf(
+ stderr,
+ "Wasm shell flags are now using prefs, use -P wasm_feature instead.\n");
+ return false;
+ }
if (op.getBoolOption("list-prefs")) {
ListJSPrefs();
@@ -12556,17 +12727,6 @@ bool SetContextWasmOptions(JSContext* cx, const OptionParser& op) {
}
}
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, FLAG_FUZZ_ON, SHELL, ...) \
- if (STAGE == WasmFeatureStage::Experimental) { \
- enableWasm##NAME = op.getBoolOption("wasm-" SHELL); \
- } else { \
- enableWasm##NAME = !op.getBoolOption("no-wasm-" SHELL); \
- }
-
- JS_FOR_WASM_FEATURES(WASM_FEATURE);
-#undef WASM_FEATURE
-
enableWasmVerbose = op.getBoolOption("wasm-verbose");
enableTestWasmAwaitTier2 = op.getBoolOption("test-wasm-await-tier2");
@@ -12575,11 +12735,7 @@ bool SetContextWasmOptions(JSContext* cx, const OptionParser& op) {
.setWasm(enableWasm)
.setWasmForTrustedPrinciples(enableWasm)
.setWasmBaseline(enableWasmBaseline)
- .setWasmIon(enableWasmOptimizing)
-#define WASM_FEATURE(NAME, ...) .setWasm##NAME(enableWasm##NAME)
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
- ;
+ .setWasmIon(enableWasmOptimizing);
#ifndef __wasi__
// This must be set before self-hosted code is initialized, as self-hosted
@@ -12598,18 +12754,12 @@ bool SetContextWasmOptions(JSContext* cx, const OptionParser& op) {
// Also the following are to be propagated.
const char* to_propagate[] = {
-# define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, FLAG_FUZZ_ON, SHELL, ...) \
- STAGE == WasmFeatureStage::Experimental ? "--wasm-" SHELL \
- : "--no-wasm-" SHELL,
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-# undef WASM_FEATURE
// Compiler selection options
"--test-wasm-await-tier2",
- NULL};
- for (const char** p = &to_propagate[0]; *p; p++) {
- if (op.getBoolOption(&(*p)[2] /* 2 => skip the leading '--' */)) {
- if (!sCompilerProcessFlags.append(*p)) {
+ };
+ for (const char* p : to_propagate) {
+ if (op.getBoolOption(p + 2 /* 2 => skip the leading '--' */)) {
+ if (!sCompilerProcessFlags.append(p)) {
return false;
}
}
diff --git a/js/src/shell/jsshell.h b/js/src/shell/jsshell.h
index 6ffe8ff236..9cbf4505f9 100644
--- a/js/src/shell/jsshell.h
+++ b/js/src/shell/jsshell.h
@@ -112,11 +112,6 @@ extern bool enableWasm;
extern bool enableSharedMemory;
extern bool enableWasmBaseline;
extern bool enableWasmOptimizing;
-
-#define WASM_FEATURE(NAME, ...) extern bool enableWasm##NAME;
-JS_FOR_WASM_FEATURES(WASM_FEATURE);
-#undef WASM_FEATURE
-
extern bool enableWasmVerbose;
extern bool enableTestWasmAwaitTier2;
extern bool enableSourcePragmas;
diff --git a/js/src/tests/jstests.list b/js/src/tests/jstests.list
index 72d078dc40..55ffc65f6f 100644
--- a/js/src/tests/jstests.list
+++ b/js/src/tests/jstests.list
@@ -10,12 +10,6 @@ skip script non262/String/normalize-generateddata-input.js # input data for othe
slow script test262/built-ins/decodeURI/S15.1.3.1_A2.5_T1.js
slow script test262/built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1.js
-# Windows10-aarch64 fails certain tests.
-# https://bugzilla.mozilla.org/show_bug.cgi?id=1526003
-# https://bugzilla.mozilla.org/show_bug.cgi?id=1526012
-skip-if((xulRuntime.XPCOMABI.match(/aarch64/))&&(xulRuntime.OS=="WINNT")) script non262/Math/fround.js
-skip-if((xulRuntime.XPCOMABI.match(/aarch64/))&&(xulRuntime.OS=="WINNT")) script non262/Math/log2-approx.js
-
###########################################################################
# Generated jstests.list for test262 when inline |reftest| isn't possible #
@@ -626,13 +620,6 @@ skip script test262/built-ins/RegExp/unicodeSets/generated/rgi-emoji-15.1.js
skip script test262/intl402/DateTimeFormat/timezone-not-canonicalized.js
skip script test262/intl402/DateTimeFormat/timezone-case-insensitive.js
-# Resolved options reordered.
-# https://github.com/tc39/ecma402/pull/811
-skip script test262/intl402/NumberFormat/constructor-option-read-order.js
-skip script test262/intl402/NumberFormat/prototype/resolvedOptions/return-keys-order-default.js
-skip script test262/intl402/PluralRules/constructor-option-read-order.js
-skip script test262/intl402/PluralRules/prototype/resolvedOptions/return-keys-order-default.js
-
# Requires Unicode 15.1
# https://github.com/tc39/test262/pull/3947
# https://bugzilla.mozilla.org/show_bug.cgi?id=1859752
diff --git a/js/src/tests/lib/tasks_adb_remote.py b/js/src/tests/lib/tasks_adb_remote.py
index 2d2739a281..1ddc6baa08 100644
--- a/js/src/tests/lib/tasks_adb_remote.py
+++ b/js/src/tests/lib/tasks_adb_remote.py
@@ -160,7 +160,7 @@ do_test()
#
# The timeout command send a SIGTERM signal, which should return 143
# (=128+15). However, due to a bug in tinybox, it returns 142.
- if test \( $rc -eq 143 -o $rc -eq 142 \) -a $attempt -lt {retry}; then
+ if test \\( $rc -eq 143 -o $rc -eq 142 \\) -a $attempt -lt {retry}; then
echo '\\n{tag}RETRY='$rc,$time
attempt=$((attempt + 1))
do_test $idx $attempt "$@"
diff --git a/js/src/tests/non262/Date/dashed-date.js b/js/src/tests/non262/Date/dashed-date.js
index e479904098..d7b4692e7f 100644
--- a/js/src/tests/non262/Date/dashed-date.js
+++ b/js/src/tests/non262/Date/dashed-date.js
@@ -46,12 +46,10 @@ const tests = [
// ==== Date followed by hour and TZ ====
["24-Apr-2023 12:34:56", "2023-04-24T12:34:56"],
- ["24-Apr-2023 Mon 12:34:56", "2023-04-24T12:34:56"],
["24-Apr-2023 (Mon) 12:34:56", "2023-04-24T12:34:56"],
["24-Apr-2023(Mon)12:34:56", "2023-04-24T12:34:56"],
["24-Apr-2023,12:34:56", "2023-04-24T12:34:56"],
- ["24-Apr-2023,Mon 12:34:56", "2023-04-24T12:34:56"],
["24-Apr-2023 12:34:56 GMT", "2023-04-24T12:34:56Z"],
["24-Apr-2023 12:34:56 +04", "2023-04-24T12:34:56+04:00"],
@@ -145,6 +143,10 @@ const invalidTests = [
"24-Apr-2312+10:13:14",
"24-Apr-2312=10:13:14",
"24-Apr-2312?10:13:14",
+
+ // Late weekday
+ "24-Apr-2023 Mon 12:34:56",
+ "24-Apr-2023,Mon 12:34:56",
];
for (const testString of invalidTests) {
diff --git a/js/src/tests/non262/Date/parse-keywords.js b/js/src/tests/non262/Date/parse-keywords.js
index c834bbfe0b..ba3b50b71b 100644
--- a/js/src/tests/non262/Date/parse-keywords.js
+++ b/js/src/tests/non262/Date/parse-keywords.js
@@ -12,7 +12,6 @@ const accepted = {
"Sep 26 1995 10:00 am": "1995-09-26T10:00:00",
"Sep 26 1995 10:00 AM": "1995-09-26T10:00:00",
"Sep 26 1995 10:00 pm": "1995-09-26T22:00:00",
- "Sep 26 Thurs 1995 Mon 10:thursday:00": "1995-09-26T10:00:00",
};
const rejected = [
"Sep 26 1995 G",
@@ -22,6 +21,12 @@ const rejected = [
"Sep 26 1995 10:00 a",
"Sep 26 1995 10:00 p",
"0/zx",
+
+ // Late weekday
+ "Sep 26 Thurs 1995 10:00",
+ "Sep 26 1995 Thurs 10:00",
+ "Sep 26 1995 10:Thurs:00",
+ "Sep 26 1995 10:00 Thurs",
];
for (const [test, expected] of Object.entries(accepted)) {
diff --git a/js/src/tests/non262/Intl/ListFormat/unit-type.js b/js/src/tests/non262/Intl/ListFormat/unit-type.js
index 8c76677865..4f1c7321cf 100644
--- a/js/src/tests/non262/Intl/ListFormat/unit-type.js
+++ b/js/src/tests/non262/Intl/ListFormat/unit-type.js
@@ -1,4 +1,4 @@
-// |reftest| skip -- "unit" type currently not supported
+// |reftest| skip-if(!this.hasOwnProperty('Intl'))
const {Element, Literal} = ListFormatParts;
const styles = ["long", "short", "narrow"];
@@ -38,7 +38,6 @@ const styles = ["long", "short", "narrow"];
const testData = {
"ar": {
long: [Element("A"), Literal(" Ùˆ"), Element("B")],
- narrow: [Element("A"), Literal("، "), Element("B")],
},
"de": {
long: [Element("A"), Literal(", "), Element("B")],
@@ -90,7 +89,7 @@ const styles = ["long", "short", "narrow"];
// non-ASCII case
"ar": {
long: [Element("A"), Literal("، و"), Element("B"), Literal("، و"), Element("C"), Literal("، و"), Element("D")],
- narrow: [Element("A"), Literal("، "), Element("B"), Literal("، "), Element("C"), Literal("، "), Element("D")],
+ narrow: [Element("A"), Literal(" Ùˆ"), Element("B"), Literal(" Ùˆ"), Element("C"), Literal(" Ùˆ"), Element("D")],
},
// all values are equal
diff --git a/js/src/tests/non262/String/make-normalize-generateddata-input.py b/js/src/tests/non262/String/make-normalize-generateddata-input.py
index 5c3d2d3e44..086275e67d 100644
--- a/js/src/tests/non262/String/make-normalize-generateddata-input.py
+++ b/js/src/tests/non262/String/make-normalize-generateddata-input.py
@@ -18,7 +18,7 @@ def to_code_list(codes):
def convert(dir):
- ver_pat = re.compile("NormalizationTest-([0-9\.]+)\.txt")
+ ver_pat = re.compile(r"NormalizationTest-([0-9\.]+)\.txt")
part_pat = re.compile("^@(Part([0-9]+) .+)$")
test_pat = re.compile(
"^([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);$"
diff --git a/js/src/tests/non262/argumentsLengthOpt.js b/js/src/tests/non262/argumentsLengthOpt.js
new file mode 100644
index 0000000000..16385fa5a4
--- /dev/null
+++ b/js/src/tests/non262/argumentsLengthOpt.js
@@ -0,0 +1,87 @@
+// Test cases for arguments.length optimization.
+
+function f1() {
+ return arguments.length;
+}
+
+function f2(a, b, c) {
+ return arguments.length;
+}
+
+// arrow functions don't have their own arguments, and so capture the enclosing
+// scope.
+function f3(a, b, c, d) {
+ return (() => arguments.length)();
+}
+
+// Test a function which mutates arguments.length
+function f4(a, b, c, d) {
+ arguments.length = 42;
+ return arguments.length;
+}
+
+// Manually read out arguments; should disable the length opt
+function f5() {
+ for (var i = 0; i < arguments.length; i++) {
+ if (arguments[i] == 10) { return true }
+ }
+ return false;
+}
+
+function f6() {
+ function inner() {
+ return arguments.length;
+ }
+ return inner(1, 2, 3);
+}
+
+// edge cases of the arguments bindings:
+function f7() {
+ var arguments = 42;
+ return arguments;
+}
+
+function f8() {
+ var arguments = [1, 2];
+ return arguments.length;
+}
+
+function f9() {
+ eval("arguments.length = 42");
+ return arguments.length;
+}
+
+function test() {
+ assertEq(f1(), 0);
+ assertEq(f1(1), 1);
+ assertEq(f1(1, 2), 2);
+ assertEq(f1(1, 2, 3), 3);
+
+ assertEq(f2(), 0);
+ assertEq(f2(1, 2, 3), 3);
+
+ assertEq(f3(), 0);
+ assertEq(f3(1, 2, 3), 3);
+
+ assertEq(f4(), 42);
+ assertEq(f4(1, 2, 3), 42);
+
+ assertEq(f5(), false);
+ assertEq(f5(1, 2, 3, 10), true);
+ assertEq(f5(1, 2, 3, 10, 20), true);
+ assertEq(f5(1, 2, 3, 9, 20, 30), false);
+
+ assertEq(f6(), 3)
+ assertEq(f6(1, 2, 3, 4), 3)
+
+ assertEq(f7(), 42);
+
+ assertEq(f8(), 2);
+
+ assertEq(f9(), 42);
+}
+
+test();
+
+if (typeof reportCompare === "function")
+ reportCompare(0, 0, "ok");
diff --git a/js/src/tests/non262/extensions/typedarray-set-neutering.js b/js/src/tests/non262/extensions/typedarray-set-detach.js
index 23df8158c5..23df8158c5 100644
--- a/js/src/tests/non262/extensions/typedarray-set-neutering.js
+++ b/js/src/tests/non262/extensions/typedarray-set-detach.js
diff --git a/js/src/tests/non262/reflect-parse/argumentsReflect.js b/js/src/tests/non262/reflect-parse/argumentsReflect.js
new file mode 100644
index 0000000000..69a8624acc
--- /dev/null
+++ b/js/src/tests/non262/reflect-parse/argumentsReflect.js
@@ -0,0 +1,14 @@
+// |reftest| skip-if(!xulRuntime.shell)
+
+// Test reflect.parse on a function with arguments.length
+let ast = Reflect.parse(`function f10() {
+ return arguments.length;
+}`);
+
+assertEq(ast.body[0].body.body[0].argument.object.type, "Identifier");
+assertEq(ast.body[0].body.body[0].argument.object.name, "arguments");
+assertEq(ast.body[0].body.body[0].argument.property.type, "Identifier");
+assertEq(ast.body[0].body.body[0].argument.property.name, "length");
+
+if (typeof reportCompare === "function")
+ reportCompare(0, 0, "ok");
diff --git a/js/src/tests/shell/compression.js b/js/src/tests/shell/compression.js
new file mode 100644
index 0000000000..4836d832fb
--- /dev/null
+++ b/js/src/tests/shell/compression.js
@@ -0,0 +1,30 @@
+// |reftest| skip-if(!xulRuntime.shell)
+
+// Compressed buffers must have magic header and length
+assertThrows(() => decompressLZ4(new ArrayBuffer()));
+
+// Compress and decompress take an array buffer, not arrays
+assertThrows(() => compressLZ4([]));
+assertThrows(() => decompressLZ4([]));
+
+// Round trip several buffers
+let tests = [
+ new Uint8Array([]),
+ new Uint8Array([0]),
+ new Uint8Array([0, 1, 2, 3]),
+ new Uint8Array(1000),
+];
+
+for (let test of tests) {
+ let original = test.buffer;
+
+ let compressed = compressLZ4(original);
+ assertEq(compressed instanceof ArrayBuffer, true);
+
+ let decompressed = decompressLZ4(compressed);
+ assertEq(decompressed instanceof ArrayBuffer, true);
+
+ assertEqArray(new Uint8Array(original), new Uint8Array(decompressed));
+}
+
+reportCompare(true,true);
diff --git a/js/src/tests/test262-export.py b/js/src/tests/test262-export.py
index 477db883ea..dc30776973 100755
--- a/js/src/tests/test262-export.py
+++ b/js/src/tests/test262-export.py
@@ -237,7 +237,7 @@ def mergeMeta(reftest, frontmatter, includes):
if info:
# Open some space in an existing info text
if "info" in frontmatter:
- frontmatter["info"] += "\n\n \%s" % info
+ frontmatter["info"] += "\n\n \\%s" % info
else:
frontmatter["info"] = info
diff --git a/js/src/tests/test262-update.py b/js/src/tests/test262-update.py
index 8578f57d68..fc6fa62c45 100755
--- a/js/src/tests/test262-update.py
+++ b/js/src/tests/test262-update.py
@@ -23,7 +23,6 @@ UNSUPPORTED_FEATURES = set(
"Intl.DurationFormat", # Bug 1648139
"Atomics.waitAsync", # Bug 1467846
"legacy-regexp", # Bug 1306461
- "json-modules", # Bug 1670176
"regexp-duplicate-named-groups", # Bug 1773135
"json-parse-with-source", # Bug 1658310
"set-methods", # Bug 1805038
diff --git a/js/src/tests/test262/language/expressions/dynamic-import/import-assertions/2nd-param-assert-enumeration-enumerable.js b/js/src/tests/test262/language/expressions/dynamic-import/import-assertions/2nd-param-assert-enumeration-enumerable.js
index a4bc49e2de..1c80a751e4 100644
--- a/js/src/tests/test262/language/expressions/dynamic-import/import-assertions/2nd-param-assert-enumeration-enumerable.js
+++ b/js/src/tests/test262/language/expressions/dynamic-import/import-assertions/2nd-param-assert-enumeration-enumerable.js
@@ -1,4 +1,4 @@
-// |reftest| skip async -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) async -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/expressions/dynamic-import/import-attributes/2nd-param-with-enumeration-enumerable.js b/js/src/tests/test262/language/expressions/dynamic-import/import-attributes/2nd-param-with-enumeration-enumerable.js
index 68f5d91899..61967b8b7f 100644
--- a/js/src/tests/test262/language/expressions/dynamic-import/import-attributes/2nd-param-with-enumeration-enumerable.js
+++ b/js/src/tests/test262/language/expressions/dynamic-import/import-attributes/2nd-param-with-enumeration-enumerable.js
@@ -1,4 +1,4 @@
-// |reftest| skip async -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) async -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-extensibility-array.js b/js/src/tests/test262/language/import/import-assertions/json-extensibility-array.js
index bcd7f27f0f..8a20170d9b 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-extensibility-array.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-extensibility-array.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-extensibility-object.js b/js/src/tests/test262/language/import/import-assertions/json-extensibility-object.js
index bf68fb37f2..99ea5cca86 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-extensibility-object.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-extensibility-object.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-idempotency.js b/js/src/tests/test262/language/import/import-assertions/json-idempotency.js
index 8b7e7fc85d..9509e199ef 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-idempotency.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-idempotency.js
@@ -1,4 +1,4 @@
-// |reftest| skip module async -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module async -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-invalid.js b/js/src/tests/test262/language/import/import-assertions/json-invalid.js
index a4aab457d3..bfe57e645a 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-invalid.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-invalid.js
@@ -1,4 +1,4 @@
-// |reftest| skip error:SyntaxError module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) error:SyntaxError module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-named-bindings.js b/js/src/tests/test262/language/import/import-assertions/json-named-bindings.js
index 5d2f5faabb..bf506c576d 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-named-bindings.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-named-bindings.js
@@ -1,4 +1,4 @@
-// |reftest| skip error:SyntaxError module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) error:SyntaxError module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-value-array.js b/js/src/tests/test262/language/import/import-assertions/json-value-array.js
index ffc2061793..5a67cb51ef 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-value-array.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-value-array.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-value-boolean.js b/js/src/tests/test262/language/import/import-assertions/json-value-boolean.js
index cd3425681d..55c334e89b 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-value-boolean.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-value-boolean.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-value-null.js b/js/src/tests/test262/language/import/import-assertions/json-value-null.js
index 14a7b89ef8..e8fb11bbb2 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-value-null.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-value-null.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-value-number.js b/js/src/tests/test262/language/import/import-assertions/json-value-number.js
index 9fc19d060a..8e66e646c4 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-value-number.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-value-number.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-value-object.js b/js/src/tests/test262/language/import/import-assertions/json-value-object.js
index 0a042d45c3..7436ae242f 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-value-object.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-value-object.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-value-string.js b/js/src/tests/test262/language/import/import-assertions/json-value-string.js
index a02e3381a4..6fc2e1a64f 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-value-string.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-value-string.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-assertions/json-via-namespace.js b/js/src/tests/test262/language/import/import-assertions/json-via-namespace.js
index c4142df74b..bbf4acf71e 100644
--- a/js/src/tests/test262/language/import/import-assertions/json-via-namespace.js
+++ b/js/src/tests/test262/language/import/import-assertions/json-via-namespace.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-assertions) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-extensibility-array.js b/js/src/tests/test262/language/import/import-attributes/json-extensibility-array.js
index c654861c19..1f59154309 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-extensibility-array.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-extensibility-array.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-extensibility-object.js b/js/src/tests/test262/language/import/import-attributes/json-extensibility-object.js
index 12883345a9..cf1ca9a810 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-extensibility-object.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-extensibility-object.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-idempotency.js b/js/src/tests/test262/language/import/import-attributes/json-idempotency.js
index 1cadd19aae..fab2e9d1eb 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-idempotency.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-idempotency.js
@@ -1,4 +1,4 @@
-// |reftest| skip module async -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module async -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-invalid.js b/js/src/tests/test262/language/import/import-attributes/json-invalid.js
index 4e121f368a..fe2f2cd4bd 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-invalid.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-invalid.js
@@ -1,4 +1,4 @@
-// |reftest| skip error:SyntaxError module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) error:SyntaxError module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-named-bindings.js b/js/src/tests/test262/language/import/import-attributes/json-named-bindings.js
index a45e97ce54..2654cd6d7a 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-named-bindings.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-named-bindings.js
@@ -1,4 +1,4 @@
-// |reftest| skip error:SyntaxError module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) error:SyntaxError module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-value-array.js b/js/src/tests/test262/language/import/import-attributes/json-value-array.js
index 41fa87ca79..c7a7e28ff0 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-value-array.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-value-array.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-value-boolean.js b/js/src/tests/test262/language/import/import-attributes/json-value-boolean.js
index 91d62565f2..5a9b7a686e 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-value-boolean.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-value-boolean.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-value-null.js b/js/src/tests/test262/language/import/import-attributes/json-value-null.js
index a1c1ff35f5..ea1e95c10a 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-value-null.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-value-null.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-value-number.js b/js/src/tests/test262/language/import/import-attributes/json-value-number.js
index 2bd1c60270..bb14011524 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-value-number.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-value-number.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-value-object.js b/js/src/tests/test262/language/import/import-attributes/json-value-object.js
index 6c1b6d36f1..b9105f0818 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-value-object.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-value-object.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-value-string.js b/js/src/tests/test262/language/import/import-attributes/json-value-string.js
index 4b938405b2..fff0ed3e3a 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-value-string.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-value-string.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/tests/test262/language/import/import-attributes/json-via-namespace.js b/js/src/tests/test262/language/import/import-attributes/json-via-namespace.js
index a115b30412..d7c5ce332c 100644
--- a/js/src/tests/test262/language/import/import-attributes/json-via-namespace.js
+++ b/js/src/tests/test262/language/import/import-attributes/json-via-namespace.js
@@ -1,4 +1,4 @@
-// |reftest| skip module -- json-modules is not supported
+// |reftest| shell-option(--enable-import-attributes) skip-if(!xulRuntime.shell) module -- requires shell-options
// Copyright (C) 2021 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
diff --git a/js/src/util/StructuredSpewer.cpp b/js/src/util/StructuredSpewer.cpp
index f74570ab0c..72f483c6b5 100644
--- a/js/src/util/StructuredSpewer.cpp
+++ b/js/src/util/StructuredSpewer.cpp
@@ -183,54 +183,46 @@ void StructuredSpewer::parseSpewFlags(const char* flags) {
}
if (ContainsFlag(flags, "help")) {
+ // clang-format off
printf(
- "\n"
- "usage: SPEW=option,option,... where options can be:\n"
- "\n"
- " help Dump this help message\n"
- " channel Enable the selected channel from below, "
- "if\n"
- " more than one channel is specified, then "
- "the\n"
- " channel will be set whichever specified "
- "filter\n"
- " comes first in STRUCTURED_CHANNEL_LIST."
- " AtStartup Enable spewing at browser startup instead\n"
- " of when gecko profiling starts."
- "\n"
- " Channels: \n"
- "\n"
- // List Channels
- " BaselineICStats Dump the IC Entry counters during Ion "
- "analysis\n"
- " ScriptStats Dump statistics collected by tracelogger "
- "that\n"
- " is aggregated by script. Requires\n"
- " JS_TRACE_LOGGING=1\n"
- " CacheIRHealthReport Dump the CacheIR information and "
- "associated "
- "rating\n"
- // End Channel list
- "\n\n"
- "By default output goes to a file called spew_output.$PID.$THREAD\n"
- "\n"
- "Further control of the spewer can be accomplished with the below\n"
- "environment variables:\n"
- "\n"
- " SPEW_FILE: Selects the file to write to. An absolute path.\n"
- "\n"
- " SPEW_FILTER: A string which is matched against 'signature'\n"
- " constructed from a JSScript, currently connsisting of \n"
- " filename:line:col.\n"
- "\n"
- " A JSScript matches the filter string is found in the\n"
- " signature\n"
- "\n"
- " SPEW_UPLOAD: If this variable is set as well as MOZ_UPLOAD_DIR,\n"
- " output goes to $MOZ_UPLOAD_DIR/spew_output* to ease usage\n"
- " with Treeherder.\n"
+ "\n"
+ "usage: SPEW=option,option,... where options can be:\n"
+ "\n"
+ " help Dump this help message\n"
+ " channel Enable the selected channel from below, if\n"
+ " more than one channel is specified, then the\n"
+ " channel will be set whichever specified filter\n"
+ " comes first in STRUCTURED_CHANNEL_LIST.\n"
+ " AtStartup Enable spewing at browser startup instead\n"
+ " of when gecko profiling starts."
+ "\n"
+ " Channels: \n"
+ "\n"
+ // List Channels
+ " BaselineICStats Dump the IC Entry counters during Ion analysis\n"
+ " CacheIRHealthReport Dump the CacheIR information and associated rating\n"
+ // End Channel list
+ "\n\n"
+ "By default output goes to a file called spew_output.$PID.$THREAD\n"
+ "\n"
+ "Further control of the spewer can be accomplished with the below\n"
+ "environment variables:\n"
+ "\n"
+ " SPEW_FILE: Selects the file to write to. An absolute path.\n"
+ "\n"
+ " SPEW_FILTER: A string which is matched against 'signature'\n"
+ " constructed from a JSScript, currently connsisting of \n"
+ " filename:line:col.\n"
+ "\n"
+ " A JSScript matches the filter string is found in the\n"
+ " signature\n"
+ "\n"
+ " SPEW_UPLOAD: If this variable is set as well as MOZ_UPLOAD_DIR,\n"
+ " output goes to $MOZ_UPLOAD_DIR/spew_output* to ease usage\n"
+ " with Treeherder.\n"
);
+ // clang-format on
exit(0);
}
}
diff --git a/js/src/util/StructuredSpewer.h b/js/src/util/StructuredSpewer.h
index 407dc34a25..613ef65b59 100644
--- a/js/src/util/StructuredSpewer.h
+++ b/js/src/util/StructuredSpewer.h
@@ -73,7 +73,6 @@ namespace js {
# define STRUCTURED_CHANNEL_LIST(_) \
_(BaselineICStats) \
- _(ScriptStats) \
_(CacheIRHealthReport)
// Structured spew channels
@@ -187,8 +186,8 @@ class StructuredSpewer {
// Globally selected channel.
StructuredSpewFilter selectedChannel_;
- using NameArray =
- mozilla::EnumeratedArray<SpewChannel, SpewChannel::Count, const char*>;
+ using NameArray = mozilla::EnumeratedArray<SpewChannel, const char*,
+ size_t(SpewChannel::Count)>;
// Channel Names
static NameArray const names_;
diff --git a/js/src/util/make_unicode.py b/js/src/util/make_unicode.py
index 6ddecd6cbb..21ad718f3e 100755
--- a/js/src/util/make_unicode.py
+++ b/js/src/util/make_unicode.py
@@ -1111,7 +1111,7 @@ def make_regexp_space_test(version, test_space_table, codepoint_table):
test_space.write(",\n".join(map(hex_and_name, test_space_table)))
test_space.write("\n);\n")
test_space.write(
- """
+ r"""
assertEq(/^\s+$/.exec(onlySpace) !== null, true);
assertEq(/^[\s]+$/.exec(onlySpace) !== null, true);
assertEq(/^[^\s]+$/.exec(onlySpace) === null, true);
diff --git a/js/src/vm/ArgumentsObject.h b/js/src/vm/ArgumentsObject.h
index eeaca41a97..9ac3989885 100644
--- a/js/src/vm/ArgumentsObject.h
+++ b/js/src/vm/ArgumentsObject.h
@@ -275,12 +275,14 @@ class ArgumentsObject : public NativeObject {
return argc;
}
- // True iff arguments.length has been assigned or deleted.
- bool hasOverriddenLength() const {
+ bool hasFlags(uint32_t flags) const {
const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
- return v.toInt32() & LENGTH_OVERRIDDEN_BIT;
+ return v.toInt32() & flags;
}
+ // True iff arguments.length has been assigned or deleted.
+ bool hasOverriddenLength() const { return hasFlags(LENGTH_OVERRIDDEN_BIT); }
+
void markLengthOverridden() {
uint32_t v =
getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | LENGTH_OVERRIDDEN_BIT;
@@ -292,8 +294,7 @@ class ArgumentsObject : public NativeObject {
// True iff arguments[@@iterator] has been assigned or deleted.
bool hasOverriddenIterator() const {
- const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
- return v.toInt32() & ITERATOR_OVERRIDDEN_BIT;
+ return hasFlags(ITERATOR_OVERRIDDEN_BIT);
}
void markIteratorOverridden() {
@@ -311,10 +312,7 @@ class ArgumentsObject : public NativeObject {
static bool getArgumentsIterator(JSContext* cx, MutableHandleValue val);
// True iff any element has been assigned or deleted.
- bool hasOverriddenElement() const {
- const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
- return v.toInt32() & ELEMENT_OVERRIDDEN_BIT;
- }
+ bool hasOverriddenElement() const { return hasFlags(ELEMENT_OVERRIDDEN_BIT); }
void markElementOverridden() {
uint32_t v =
@@ -409,10 +407,7 @@ class ArgumentsObject : public NativeObject {
return IsMagicScopeSlotValue(v);
}
- bool anyArgIsForwarded() const {
- const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
- return v.toInt32() & FORWARDED_ARGUMENTS_BIT;
- }
+ bool anyArgIsForwarded() const { return hasFlags(FORWARDED_ARGUMENTS_BIT); }
void markArgumentForwarded() {
uint32_t v =
@@ -504,10 +499,7 @@ class MappedArgumentsObject : public ArgumentsObject {
return getFixedSlot(CALLEE_SLOT).toObject().as<JSFunction>();
}
- bool hasOverriddenCallee() const {
- const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
- return v.toInt32() & CALLEE_OVERRIDDEN_BIT;
- }
+ bool hasOverriddenCallee() const { return hasFlags(CALLEE_OVERRIDDEN_BIT); }
void markCalleeOverridden() {
uint32_t v =
diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp
index 72c9ebeb18..2fe4f01f8d 100644
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -536,12 +536,7 @@ bool ArrayBufferObject::maxByteLengthGetterImpl(JSContext* cx,
auto* buffer = &args.thisv().toObject().as<ArrayBufferObject>();
// Steps 4-6.
- size_t maxByteLength;
- if (buffer->isResizable()) {
- maxByteLength = buffer->as<ResizableArrayBufferObject>().maxByteLength();
- } else {
- maxByteLength = buffer->byteLength();
- }
+ size_t maxByteLength = buffer->maxByteLength();
MOZ_ASSERT_IF(buffer->isDetached(), maxByteLength == 0);
// Step 7.
@@ -914,8 +909,6 @@ void ArrayBufferObject::detach(JSContext* cx,
// Update all views of the buffer to account for the buffer having been
// detached, and clear the buffer's data and list of views.
- //
- // Typed object buffers are not exposed and cannot be detached.
auto& innerViews = ObjectRealm::get(buffer).innerViews.get();
if (InnerViewTable::ViewVector* views =
@@ -962,6 +955,20 @@ void ResizableArrayBufferObject::resize(size_t newByteLength) {
}
setByteLength(newByteLength);
+
+ // Update all views of the buffer to account for the buffer having been
+ // resized.
+
+ auto& innerViews = ObjectRealm::get(this).innerViews.get();
+ if (InnerViewTable::ViewVector* views =
+ innerViews.maybeViewsUnbarriered(this)) {
+ for (auto& view : *views) {
+ view->notifyBufferResized();
+ }
+ }
+ if (auto* view = firstView()) {
+ view->as<ArrayBufferViewObject>().notifyBufferResized();
+ }
}
/* clang-format off */
@@ -1490,10 +1497,7 @@ size_t ArrayBufferObject::byteLength() const {
inline size_t ArrayBufferObject::associatedBytes() const {
if (isMalloced()) {
- if (isResizable()) {
- return as<ResizableArrayBufferObject>().maxByteLength();
- }
- return byteLength();
+ return maxByteLength();
}
if (isMapped()) {
return RoundUp(byteLength(), js::gc::SystemPageSize());
@@ -2472,7 +2476,7 @@ bool ArrayBufferObject::ensureNonInline(JSContext* cx,
return true;
}
- size_t nbytes = buffer->byteLength();
+ size_t nbytes = buffer->maxByteLength();
ArrayBufferContents copy = NewCopiedBufferContents(cx, buffer);
if (!copy) {
return false;
diff --git a/js/src/vm/ArrayBufferObject.h b/js/src/vm/ArrayBufferObject.h
index 17faf7682e..5aa96bf887 100644
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -566,6 +566,12 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared {
void setDataPointer(BufferContents contents);
void setByteLength(size_t length);
+ /**
+ * Return the byte length for fixed-length buffers or the maximum byte length
+ * for resizable buffers.
+ */
+ inline size_t maxByteLength() const;
+
size_t associatedBytes() const;
uint32_t flags() const;
@@ -703,6 +709,13 @@ class ResizableArrayBufferObject : public ArrayBufferObject {
JS::Handle<ResizableArrayBufferObject*> source);
};
+size_t ArrayBufferObject::maxByteLength() const {
+ if (isResizable()) {
+ return as<ResizableArrayBufferObject>().maxByteLength();
+ }
+ return byteLength();
+}
+
// Create a buffer for a wasm memory, whose type is determined by
// memory.indexType().
ArrayBufferObjectMaybeShared* CreateWasmBuffer(JSContext* cx,
@@ -775,6 +788,7 @@ class InnerViewTable {
private:
friend class ArrayBufferObject;
+ friend class ResizableArrayBufferObject;
bool addView(JSContext* cx, ArrayBufferObject* buffer,
ArrayBufferViewObject* view);
ViewVector* maybeViewsUnbarriered(ArrayBufferObject* buffer);
diff --git a/js/src/vm/ArrayBufferViewObject.cpp b/js/src/vm/ArrayBufferViewObject.cpp
index 4bcd7890c1..27004f3e2a 100644
--- a/js/src/vm/ArrayBufferViewObject.cpp
+++ b/js/src/vm/ArrayBufferViewObject.cpp
@@ -41,7 +41,7 @@ void ArrayBufferViewObject::trace(JSTracer* trc, JSObject* obj) {
&gc::MaybeForwardedObjectAs<ResizableArrayBufferObject>(bufferObj);
}
if (buffer) {
- size_t offset = view->byteOffset();
+ size_t offset = view->dataPointerOffset();
MOZ_ASSERT_IF(!buffer->dataPointer(), offset == 0);
// The data may or may not be inline with the buffer. The buffer can only
@@ -69,13 +69,22 @@ void ArrayBufferViewObject::notifyBufferDetached() {
setFixedSlot(DATA_SLOT, UndefinedValue());
}
+void ArrayBufferViewObject::notifyBufferResized() {
+ MOZ_ASSERT(!isSharedMemory());
+ MOZ_ASSERT(hasBuffer());
+ MOZ_ASSERT(!bufferUnshared()->isLengthPinned());
+ MOZ_ASSERT(bufferUnshared()->isResizable());
+
+ computeResizableLengthAndByteOffset(bytesPerElement());
+}
+
void ArrayBufferViewObject::notifyBufferMoved(uint8_t* srcBufStart,
uint8_t* dstBufStart) {
MOZ_ASSERT(!isSharedMemory());
MOZ_ASSERT(hasBuffer());
if (srcBufStart != dstBufStart) {
- void* data = dstBufStart + byteOffset();
+ void* data = dstBufStart + dataPointerOffset();
getFixedSlotRef(DATA_SLOT).unbarrieredSet(PrivateValue(data));
}
}
@@ -183,6 +192,76 @@ bool ArrayBufferViewObject::init(JSContext* cx,
return true;
}
+bool ArrayBufferViewObject::initResizable(JSContext* cx,
+ ArrayBufferObjectMaybeShared* buffer,
+ size_t byteOffset, size_t length,
+ uint32_t bytesPerElement,
+ AutoLength autoLength) {
+ MOZ_ASSERT(buffer->isResizable());
+
+ if (!init(cx, buffer, byteOffset, length, bytesPerElement)) {
+ return false;
+ }
+
+ initFixedSlot(AUTO_LENGTH_SLOT, BooleanValue(static_cast<bool>(autoLength)));
+ initFixedSlot(INITIAL_LENGTH_SLOT, PrivateValue(length));
+ initFixedSlot(INITIAL_BYTE_OFFSET_SLOT, PrivateValue(byteOffset));
+
+ // Compute the actual byteLength and byteOffset for non-shared buffers.
+ if (!isSharedMemory()) {
+ computeResizableLengthAndByteOffset(bytesPerElement);
+ }
+
+ MOZ_ASSERT(!isOutOfBounds(), "can't create out-of-bounds views");
+
+ return true;
+}
+
+void ArrayBufferViewObject::computeResizableLengthAndByteOffset(
+ size_t bytesPerElement) {
+ MOZ_ASSERT(!isSharedMemory());
+ MOZ_ASSERT(hasBuffer());
+ MOZ_ASSERT(!bufferUnshared()->isLengthPinned());
+ MOZ_ASSERT(bufferUnshared()->isResizable());
+
+ size_t byteOffsetStart = initialByteOffset();
+ size_t bufferByteLength = bufferUnshared()->byteLength();
+
+ // Out-of-bounds if the byteOffset exceeds the buffer length.
+ if (byteOffsetStart > bufferByteLength) {
+ setFixedSlot(LENGTH_SLOT, PrivateValue(size_t(0)));
+ setFixedSlot(BYTEOFFSET_SLOT, PrivateValue(size_t(0)));
+ return;
+ }
+
+ size_t length;
+ if (isAutoLength()) {
+ length = (bufferByteLength - byteOffsetStart) / bytesPerElement;
+ } else {
+ length = initialLength();
+
+ // Out-of-bounds if the byteOffset end index exceeds the buffer length.
+ size_t byteOffsetEnd = byteOffsetStart + length * bytesPerElement;
+ if (byteOffsetEnd > bufferByteLength) {
+ setFixedSlot(LENGTH_SLOT, PrivateValue(size_t(0)));
+ setFixedSlot(BYTEOFFSET_SLOT, PrivateValue(size_t(0)));
+ return;
+ }
+ }
+
+ setFixedSlot(LENGTH_SLOT, PrivateValue(length));
+ setFixedSlot(BYTEOFFSET_SLOT, PrivateValue(byteOffsetStart));
+}
+
+size_t ArrayBufferViewObject::bytesPerElement() const {
+ if (is<TypedArrayObject>()) {
+ return as<TypedArrayObject>().bytesPerElement();
+ }
+
+ MOZ_ASSERT(is<DataViewObject>());
+ return 1;
+}
+
bool ArrayBufferViewObject::hasResizableBuffer() const {
if (auto* buffer = bufferEither()) {
return buffer->isResizable();
@@ -190,6 +269,98 @@ bool ArrayBufferViewObject::hasResizableBuffer() const {
return false;
}
+size_t ArrayBufferViewObject::dataPointerOffset() const {
+ // Views without a buffer have a zero offset.
+ if (!hasBuffer()) {
+ MOZ_ASSERT(byteOffsetSlotValue() == 0);
+ return 0;
+ }
+
+ // Views on shared buffers store the offset in |byteOffset|.
+ if (isSharedMemory()) {
+ return byteOffsetSlotValue();
+ }
+
+ // Can be called during tracing, so the buffer is possibly forwarded.
+ const auto* bufferObj = gc::MaybeForwarded(&bufferValue().toObject());
+
+ // Two distinct classes are used for non-shared buffers.
+ MOZ_ASSERT(
+ gc::MaybeForwardedObjectIs<FixedLengthArrayBufferObject>(bufferObj) ||
+ gc::MaybeForwardedObjectIs<ResizableArrayBufferObject>(bufferObj));
+
+ // Ensure these two classes can be casted to ArrayBufferObject.
+ static_assert(
+ std::is_base_of_v<ArrayBufferObject, FixedLengthArrayBufferObject>);
+ static_assert(
+ std::is_base_of_v<ArrayBufferObject, ResizableArrayBufferObject>);
+
+ // Manual cast necessary because the buffer is possibly forwarded.
+ const auto* buffer = static_cast<const ArrayBufferObject*>(bufferObj);
+
+ // Views on resizable buffers store the offset in |initialByteOffset|.
+ if (buffer->isResizable() && !buffer->isDetached()) {
+ return initialByteOffsetValue();
+ }
+
+ // Callers expect that this method returns zero for detached buffers.
+ MOZ_ASSERT_IF(buffer->isDetached(), byteOffsetSlotValue() == 0);
+
+ // Views on fixed-length buffers store the offset in |byteOffset|.
+ return byteOffsetSlotValue();
+}
+
+mozilla::Maybe<size_t> ArrayBufferViewObject::byteOffset() const {
+ // |byteOffset| is set to zero for detached or out-of-bounds views, so a
+ // non-zero value indicates the view is in-bounds.
+ size_t byteOffset = byteOffsetSlotValue();
+ if (byteOffset > 0) {
+ MOZ_ASSERT(!hasDetachedBuffer());
+ MOZ_ASSERT_IF(hasResizableBuffer(), !isOutOfBounds());
+ return mozilla::Some(byteOffset);
+ }
+ if (hasDetachedBufferOrIsOutOfBounds()) {
+ return mozilla::Nothing{};
+ }
+ return mozilla::Some(0);
+}
+
+mozilla::Maybe<size_t> ArrayBufferViewObject::length() const {
+ // |length| is set to zero for detached or out-of-bounds views, so a non-zero
+ // value indicates the view is in-bounds.
+ size_t length = lengthSlotValue();
+ if (MOZ_LIKELY(length > 0)) {
+ MOZ_ASSERT(!hasDetachedBuffer());
+ MOZ_ASSERT_IF(hasResizableBuffer(), !isOutOfBounds());
+ MOZ_ASSERT(!isSharedMemory() || !hasResizableBuffer() || !isAutoLength(),
+ "length is zero for auto-length growable shared buffers");
+ return mozilla::Some(length);
+ }
+
+ if (hasDetachedBufferOrIsOutOfBounds()) {
+ return mozilla::Nothing{};
+ }
+
+ if (isSharedMemory()) {
+ auto* buffer = bufferShared();
+ MOZ_ASSERT(buffer, "shared memory doesn't use inline data");
+
+ // Views backed by a growable SharedArrayBuffer can never get out-of-bounds,
+ // but we have to dynamically compute the length when the auto-length flag
+ // is set.
+ if (buffer->isGrowable() && isAutoLength()) {
+ size_t bufferByteLength = buffer->byteLength();
+ size_t byteOffset = byteOffsetSlotValue();
+ MOZ_ASSERT(byteOffset <= bufferByteLength);
+ MOZ_ASSERT(byteOffset == initialByteOffset(),
+ "views on growable shared buffers can't get out-of-bounds");
+
+ return mozilla::Some((bufferByteLength - byteOffset) / bytesPerElement());
+ }
+ }
+ return mozilla::Some(0);
+}
+
#if defined(DEBUG) || defined(JS_JITSPEW)
void ArrayBufferViewObject::dumpOwnFields(js::JSONPrinter& json) const {
json.formatProperty("length", "%zu",
diff --git a/js/src/vm/ArrayBufferViewObject.h b/js/src/vm/ArrayBufferViewObject.h
index c1c1ccac88..babba93f9e 100644
--- a/js/src/vm/ArrayBufferViewObject.h
+++ b/js/src/vm/ArrayBufferViewObject.h
@@ -7,6 +7,8 @@
#ifndef vm_ArrayBufferViewObject_h
#define vm_ArrayBufferViewObject_h
+#include "mozilla/Maybe.h"
+
#include "builtin/TypedArrayConstants.h"
#include "vm/ArrayBufferObject.h"
#include "vm/NativeObject.h"
@@ -47,6 +49,14 @@ class ArrayBufferViewObject : public NativeObject {
static constexpr size_t RESERVED_SLOTS = 4;
+ // Additional slots for views on resizable/growable (Shared)ArrayBufferObject.
+
+ static const uint8_t AUTO_LENGTH_SLOT = 4;
+ static const uint8_t INITIAL_LENGTH_SLOT = 5;
+ static const uint8_t INITIAL_BYTE_OFFSET_SLOT = 6;
+
+ static constexpr size_t RESIZABLE_RESERVED_SLOTS = 7;
+
#ifdef DEBUG
static const uint8_t ZeroLengthArrayData = 0x4A;
#endif
@@ -63,6 +73,15 @@ class ArrayBufferViewObject : public NativeObject {
static constexpr int dataOffset() {
return NativeObject::getFixedSlotOffset(DATA_SLOT);
}
+ static constexpr int autoLengthOffset() {
+ return NativeObject::getFixedSlotOffset(AUTO_LENGTH_SLOT);
+ }
+ static constexpr int initialLengthOffset() {
+ return NativeObject::getFixedSlotOffset(INITIAL_LENGTH_SLOT);
+ }
+ static constexpr int initialByteOffsetOffset() {
+ return NativeObject::getFixedSlotOffset(INITIAL_BYTE_OFFSET_SLOT);
+ }
private:
void* dataPointerEither_() const {
@@ -76,10 +95,19 @@ class ArrayBufferViewObject : public NativeObject {
size_t byteOffset, size_t length,
uint32_t bytesPerElement);
+ enum class AutoLength : bool { No, Yes };
+
+ [[nodiscard]] bool initResizable(JSContext* cx,
+ ArrayBufferObjectMaybeShared* buffer,
+ size_t byteOffset, size_t length,
+ uint32_t bytesPerElement,
+ AutoLength autoLength);
+
static ArrayBufferObjectMaybeShared* ensureBufferObject(
JSContext* cx, Handle<ArrayBufferViewObject*> obj);
void notifyBufferDetached();
+ void notifyBufferResized();
void notifyBufferMoved(uint8_t* srcBufStart, uint8_t* dstBufStart);
void initDataPointer(SharedMem<uint8_t*> viewData) {
@@ -156,6 +184,24 @@ class ArrayBufferViewObject : public NativeObject {
bool hasResizableBuffer() const;
+ private:
+ bool hasDetachedBufferOrIsOutOfBounds() const {
+ // Shared buffers can't be detached or get out-of-bounds.
+ if (isSharedMemory()) {
+ return false;
+ }
+
+ // A view with a null buffer has never had its buffer exposed to become
+ // detached or get out-of-bounds.
+ auto* buffer = bufferUnshared();
+ if (!buffer) {
+ return false;
+ }
+
+ return buffer->isDetached() || (buffer->isResizable() && isOutOfBounds());
+ }
+
+ public:
bool isLengthPinned() const {
Value buffer = bufferValue();
if (buffer.isBoolean()) {
@@ -193,11 +239,75 @@ class ArrayBufferViewObject : public NativeObject {
static bool ensureNonInline(JSContext* cx,
JS::Handle<ArrayBufferViewObject*> view);
+ private:
+ void computeResizableLengthAndByteOffset(size_t bytesPerElement);
+
+ size_t bytesPerElement() const;
+
protected:
- size_t byteOffset() const {
+ size_t lengthSlotValue() const {
+ return size_t(getFixedSlot(LENGTH_SLOT).toPrivate());
+ }
+
+ size_t byteOffsetSlotValue() const {
return size_t(getFixedSlot(BYTEOFFSET_SLOT).toPrivate());
}
+ /**
+ * Offset into the buffer's data-pointer. Different from |byteOffset| for
+ * views on non-detached resizable buffers which are currently out-of-bounds.
+ */
+ size_t dataPointerOffset() const;
+
+ /**
+ * Return the current length, or |Nothing| if the view is detached or
+ * out-of-bounds.
+ */
+ mozilla::Maybe<size_t> length() const;
+
+ public:
+ /**
+ * Return the current byteOffset, or |Nothing| if the view is detached or
+ * out-of-bounds.
+ */
+ mozilla::Maybe<size_t> byteOffset() const;
+
+ private:
+ size_t initialByteOffsetValue() const {
+ // No assertion for resizable buffers here, because this method is called
+ // from dataPointerOffset(), which can be called during tracing.
+ return size_t(getFixedSlot(INITIAL_BYTE_OFFSET_SLOT).toPrivate());
+ }
+
+ public:
+ // The following methods can only be called on views for resizable buffers.
+
+ bool isAutoLength() const {
+ MOZ_ASSERT(hasResizableBuffer());
+ return getFixedSlot(AUTO_LENGTH_SLOT).toBoolean();
+ }
+
+ size_t initialLength() const {
+ MOZ_ASSERT(hasResizableBuffer());
+ return size_t(getFixedSlot(INITIAL_LENGTH_SLOT).toPrivate());
+ }
+
+ size_t initialByteOffset() const {
+ MOZ_ASSERT(hasResizableBuffer());
+ return initialByteOffsetValue();
+ }
+
+ bool isOutOfBounds() const {
+ MOZ_ASSERT(hasResizableBuffer());
+
+ // The view is out-of-bounds if the length and byteOffset slots are both set
+ // to zero and the initial length or initial byteOffset are non-zero. If the
+ // initial length and initial byteOffset are both zero, the view can never
+ // get out-of-bounds.
+ return lengthSlotValue() == 0 && byteOffsetSlotValue() == 0 &&
+ (initialLength() > 0 || initialByteOffset() > 0);
+ }
+
public:
static void trace(JSTracer* trc, JSObject* obj);
diff --git a/js/src/vm/BigIntType.h b/js/src/vm/BigIntType.h
index 24f544cd81..fb9f4085e6 100644
--- a/js/src/vm/BigIntType.h
+++ b/js/src/vm/BigIntType.h
@@ -419,6 +419,7 @@ class BigInt final : public js::gc::CellWithLengthAndFlags {
static JSLinearString* toStringGeneric(JSContext* cx, Handle<BigInt*>,
unsigned radix);
+ friend struct ::JSStructuredCloneReader; // So it can call the following:
static BigInt* destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x);
bool absFitsInUint64() const { return digitLength() <= 64 / DigitBits; }
diff --git a/js/src/vm/CharacterEncoding.cpp b/js/src/vm/CharacterEncoding.cpp
index 79d28ab719..3d05275e2d 100644
--- a/js/src/vm/CharacterEncoding.cpp
+++ b/js/src/vm/CharacterEncoding.cpp
@@ -286,11 +286,6 @@ static bool InflateUTF8ToUTF16(JSContext* cx, const UTF8Chars& src,
break;
}
} else {
- // Non-ASCII code unit. Determine its length in bytes (n).
- uint32_t n = 1;
- while (v & (0x80 >> n)) {
- n++;
- }
#define INVALID(report, arg, n2) \
do { \
@@ -315,6 +310,14 @@ static bool InflateUTF8ToUTF16(JSContext* cx, const UTF8Chars& src,
} \
} while (0)
+ // Non-ASCII code unit. Determine its length in bytes (n).
+ //
+ // Avoid undefined behavior from passing in 0
+ // (https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#index-_005f_005fbuiltin_005fclz)
+ // by turning on the low bit so that 0xff will set n=31-24=7, which will
+ // be detected as an invalid character.
+ uint32_t n = mozilla::CountLeadingZeroes32(~int8_t(src[i]) | 0x1) - 24;
+
// Check the leading byte.
if (n < 2 || n > 4) {
INVALID(ReportInvalidCharacter, i, 1);
diff --git a/js/src/vm/CommonPropertyNames.h b/js/src/vm/CommonPropertyNames.h
index 7e81f4cb8a..d4376ec6a4 100644
--- a/js/src/vm/CommonPropertyNames.h
+++ b/js/src/vm/CommonPropertyNames.h
@@ -332,6 +332,7 @@
MACRO_(join, "join") \
MACRO2(js, "js") \
MACRO_(jsStringModule, "js-string") \
+ MACRO_(json, "json") \
MACRO_(keys, "keys") \
IF_DECORATORS(MACRO_(kind, "kind")) \
MACRO_(label, "label") \
diff --git a/js/src/vm/EnvironmentObject.cpp b/js/src/vm/EnvironmentObject.cpp
index cbb14f93f2..008cfca260 100644
--- a/js/src/vm/EnvironmentObject.cpp
+++ b/js/src/vm/EnvironmentObject.cpp
@@ -416,6 +416,40 @@ ModuleEnvironmentObject* ModuleEnvironmentObject::create(
return env;
}
+/* static */
+ModuleEnvironmentObject* ModuleEnvironmentObject::createSynthetic(
+ JSContext* cx, Handle<ModuleObject*> module) {
+ Rooted<SharedShape*> shape(cx,
+ CreateEnvironmentShapeForSyntheticModule(
+ cx, &class_, JSSLOT_FREE(&class_), module));
+ MOZ_ASSERT(shape->getObjectClass() == &class_);
+
+ Rooted<ModuleEnvironmentObject*> env(
+ cx, CreateEnvironmentObject<ModuleEnvironmentObject>(cx, shape,
+ TenuredObject));
+ if (!env) {
+ return nullptr;
+ }
+
+ env->initReservedSlot(MODULE_SLOT, ObjectValue(*module));
+
+ // Initialize this early so that we can manipulate the env object without
+ // causing assertions.
+ env->initEnclosingEnvironment(&cx->global()->lexicalEnvironment());
+
+ // It is not be possible to add or remove bindings from a module environment
+ // after this point as module code is always strict.
+#ifdef DEBUG
+ for (ShapePropertyIter<NoGC> iter(env->shape()); !iter.done(); iter++) {
+ MOZ_ASSERT(!iter->configurable());
+ }
+ MOZ_ASSERT(env->hasFlag(ObjectFlag::NotExtensible));
+ MOZ_ASSERT(!env->inDictionaryMode());
+#endif
+
+ return env;
+}
+
ModuleObject& ModuleEnvironmentObject::module() const {
return getReservedSlot(MODULE_SLOT).toObject().as<ModuleObject>();
}
diff --git a/js/src/vm/EnvironmentObject.h b/js/src/vm/EnvironmentObject.h
index fd60128e1f..192d8d2ce7 100644
--- a/js/src/vm/EnvironmentObject.h
+++ b/js/src/vm/EnvironmentObject.h
@@ -622,6 +622,8 @@ class ModuleEnvironmentObject : public EnvironmentObject {
static const JSClassOps classOps_;
public:
+ using EnvironmentObject::setAliasedBinding;
+
static const JSClass class_;
static constexpr uint32_t RESERVED_SLOTS = 2;
@@ -630,6 +632,9 @@ class ModuleEnvironmentObject : public EnvironmentObject {
static ModuleEnvironmentObject* create(JSContext* cx,
Handle<ModuleObject*> module);
+ static ModuleEnvironmentObject* createSynthetic(JSContext* cx,
+ Handle<ModuleObject*> module);
+
ModuleObject& module() const;
IndirectBindingMap& importBindings() const;
@@ -648,6 +653,8 @@ class ModuleEnvironmentObject : public EnvironmentObject {
// `env` may be a DebugEnvironmentProxy, but not a hollow environment.
static ModuleEnvironmentObject* find(JSObject* env);
+ uint32_t firstSyntheticValueSlot() { return RESERVED_SLOTS; }
+
private:
static bool lookupProperty(JSContext* cx, HandleObject obj, HandleId id,
MutableHandleObject objp, PropertyResult* propp);
diff --git a/js/src/vm/GlobalObject.cpp b/js/src/vm/GlobalObject.cpp
index 9c9003a2ba..6782433fd3 100644
--- a/js/src/vm/GlobalObject.cpp
+++ b/js/src/vm/GlobalObject.cpp
@@ -202,7 +202,7 @@ bool GlobalObject::skipDeselectedConstructor(JSContext* cx, JSProtoKey key) {
return false;
case JSProto_Segmenter:
-# if defined(MOZ_ICU4X) && defined(NIGHTLY_BUILD)
+# if defined(MOZ_ICU4X)
return false;
# else
return true;
diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
index 92dec6698f..a296336385 100644
--- a/js/src/vm/GlobalObject.h
+++ b/js/src/vm/GlobalObject.h
@@ -129,8 +129,8 @@ class GlobalObjectData {
HeapPtr<JSObject*> constructor;
HeapPtr<JSObject*> prototype;
};
- using CtorArray =
- mozilla::EnumeratedArray<JSProtoKey, JSProto_LIMIT, ConstructorWithProto>;
+ using CtorArray = mozilla::EnumeratedArray<JSProtoKey, ConstructorWithProto,
+ size_t(JSProto_LIMIT)>;
CtorArray builtinConstructors;
// Built-in prototypes for this global. Note that this is different from the
@@ -154,8 +154,8 @@ class GlobalObjectData {
Limit
};
- using ProtoArray =
- mozilla::EnumeratedArray<ProtoKind, ProtoKind::Limit, HeapPtr<JSObject*>>;
+ using ProtoArray = mozilla::EnumeratedArray<ProtoKind, HeapPtr<JSObject*>,
+ size_t(ProtoKind::Limit)>;
ProtoArray builtinProtos;
HeapPtr<GlobalScope*> emptyGlobalScope;
@@ -195,8 +195,9 @@ class GlobalObjectData {
// Shape for PlainObject with %Object.prototype% as proto, for each object
// AllocKind.
- using PlainObjectShapeArray = mozilla::EnumeratedArray<
- PlainObjectSlotsKind, PlainObjectSlotsKind::Limit, HeapPtr<SharedShape*>>;
+ using PlainObjectShapeArray =
+ mozilla::EnumeratedArray<PlainObjectSlotsKind, HeapPtr<SharedShape*>,
+ size_t(PlainObjectSlotsKind::Limit)>;
PlainObjectShapeArray plainObjectShapesWithDefaultProto;
// Shape for JSFunction with %Function.prototype% as proto, for both
diff --git a/js/src/vm/HelperThreadState.h b/js/src/vm/HelperThreadState.h
index 8e601a385c..a43efef7af 100644
--- a/js/src/vm/HelperThreadState.h
+++ b/js/src/vm/HelperThreadState.h
@@ -26,13 +26,12 @@
#include <stdint.h> // uint32_t, uint64_t
#include <utility> // std::move
-#include "ds/Fifo.h" // Fifo
-#include "frontend/CompilationStencil.h" // frontend::CompilationStencil
-#include "gc/GCRuntime.h" // gc::GCRuntime
-#include "js/AllocPolicy.h" // SystemAllocPolicy
-#include "js/CompileOptions.h" // JS::ReadOnlyCompileOptions
-#include "js/experimental/CompileScript.h" // JS::CompilationStorage
-#include "js/experimental/JSStencil.h" // JS::InstantiationStorage
+#include "ds/Fifo.h" // Fifo
+#include "frontend/CompilationStencil.h" // frontend::CompilationStencil
+#include "gc/GCRuntime.h" // gc::GCRuntime
+#include "js/AllocPolicy.h" // SystemAllocPolicy
+#include "js/CompileOptions.h" // JS::ReadOnlyCompileOptions
+#include "js/experimental/JSStencil.h" // JS::InstantiationStorage
#include "js/HelperThreadAPI.h" // JS::HelperThreadTaskCallback, JS::DispatchReason
#include "js/MemoryMetrics.h" // JS::GlobalStats
#include "js/ProfilingStack.h" // JS::RegisterThreadCallback, JS::UnregisterThreadCallback
@@ -115,13 +114,19 @@ class GlobalHelperThreadState {
PromiseHelperTaskVector;
// Count of running task by each threadType.
- mozilla::EnumeratedArray<ThreadType, ThreadType::THREAD_TYPE_MAX, size_t>
+ mozilla::EnumeratedArray<ThreadType, size_t,
+ size_t(ThreadType::THREAD_TYPE_MAX)>
runningTaskCount;
size_t totalCountRunningTasks;
WriteOnceData<JS::RegisterThreadCallback> registerThread;
WriteOnceData<JS::UnregisterThreadCallback> unregisterThread;
+ // Count of helper threads 'reserved' for parallel marking. This is used to
+ // prevent too many runtimes trying to mark in parallel at once. Does not stop
+ // threads from being used for other kinds of task, including GC tasks.
+ HelperThreadLockData<size_t> gcParallelMarkingThreads;
+
private:
// The lists below are all protected by |lock|.
diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp
index 36792b1d09..da8231c1dc 100644
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -859,6 +859,8 @@ void GlobalHelperThreadState::finish(AutoLockHelperThreadState& lock) {
return;
}
+ MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), gcParallelMarkingThreads == 0);
+
finishThreads(lock);
// Make sure there are no Ion free tasks left. We check this here because,
diff --git a/js/src/vm/Interpreter.cpp b/js/src/vm/Interpreter.cpp
index 255ab9aa94..9eec4b81dd 100644
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -31,7 +31,6 @@
#include "builtin/Object.h"
#include "builtin/Promise.h"
#include "gc/GC.h"
-#include "jit/AtomicOperations.h"
#include "jit/BaselineJIT.h"
#include "jit/Jit.h"
#include "jit/JitRuntime.h"
@@ -4738,15 +4737,6 @@ bool js::GreaterThanOrEqual(JSContext* cx, MutableHandleValue lhs,
return GreaterThanOrEqualOperation(cx, lhs, rhs, res);
}
-bool js::AtomicIsLockFree(JSContext* cx, HandleValue in, int* out) {
- int i;
- if (!ToInt32(cx, in, &i)) {
- return false;
- }
- *out = js::jit::AtomicOperations::isLockfreeJS(i);
- return true;
-}
-
bool js::DeleteNameOperation(JSContext* cx, Handle<PropertyName*> name,
HandleObject scopeObj, MutableHandleValue res) {
RootedObject scope(cx), pobj(cx);
diff --git a/js/src/vm/Interpreter.h b/js/src/vm/Interpreter.h
index d962e7f344..8ac7db7004 100644
--- a/js/src/vm/Interpreter.h
+++ b/js/src/vm/Interpreter.h
@@ -589,8 +589,6 @@ bool GreaterThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
bool GreaterThanOrEqual(JSContext* cx, MutableHandleValue lhs,
MutableHandleValue rhs, bool* res);
-bool AtomicIsLockFree(JSContext* cx, HandleValue in, int* out);
-
template <bool strict>
bool DelPropOperation(JSContext* cx, HandleValue val,
Handle<PropertyName*> name, bool* res);
diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
index 304054e0ec..d02f9de8cf 100644
--- a/js/src/vm/Iteration.cpp
+++ b/js/src/vm/Iteration.cpp
@@ -829,7 +829,7 @@ static PropertyIteratorObject* CreatePropertyIterator(
bool supportsIndices, PropertyIndexVector* indices,
uint32_t cacheableProtoChainLength) {
MOZ_ASSERT_IF(indices, supportsIndices);
- if (props.length() > NativeIterator::PropCountLimit) {
+ if (props.length() >= NativeIterator::PropCountLimit) {
ReportAllocationOverflow(cx);
return nullptr;
}
diff --git a/js/src/vm/JSContext-inl.h b/js/src/vm/JSContext-inl.h
index f20c4b3c7d..252fefd88d 100644
--- a/js/src/vm/JSContext-inl.h
+++ b/js/src/vm/JSContext-inl.h
@@ -352,52 +352,6 @@ inline void JSContext::setRealmForJitExceptionHandler(JS::Realm* realm) {
realm_ = realm;
}
-inline JSScript* JSContext::currentScript(
- jsbytecode** ppc, AllowCrossRealm allowCrossRealm) const {
- if (ppc) {
- *ppc = nullptr;
- }
-
- js::Activation* act = activation();
- if (!act) {
- return nullptr;
- }
-
- MOZ_ASSERT(act->cx() == this);
-
- // Cross-compartment implies cross-realm.
- if (allowCrossRealm == AllowCrossRealm::DontAllow &&
- act->compartment() != compartment()) {
- return nullptr;
- }
-
- JSScript* script = nullptr;
- jsbytecode* pc = nullptr;
- if (act->isJit()) {
- if (act->hasWasmExitFP()) {
- return nullptr;
- }
- js::jit::GetPcScript(const_cast<JSContext*>(this), &script, &pc);
- } else {
- js::InterpreterFrame* fp = act->asInterpreter()->current();
- MOZ_ASSERT(!fp->runningInJit());
- script = fp->script();
- pc = act->asInterpreter()->regs().pc;
- }
-
- MOZ_ASSERT(script->containsPC(pc));
-
- if (allowCrossRealm == AllowCrossRealm::DontAllow &&
- script->realm() != realm()) {
- return nullptr;
- }
-
- if (ppc) {
- *ppc = pc;
- }
- return script;
-}
-
inline js::RuntimeCaches& JSContext::caches() { return runtime()->caches(); }
template <typename T, js::AllowGC allowGC, typename... Args>
diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
index 4d355dc828..5a4bfa86cd 100644
--- a/js/src/vm/JSContext.cpp
+++ b/js/src/vm/JSContext.cpp
@@ -55,6 +55,7 @@
#include "vm/BytecodeUtil.h" // JSDVG_IGNORE_STACK
#include "vm/ErrorObject.h"
#include "vm/ErrorReporting.h"
+#include "vm/FrameIter.h"
#include "vm/JSFunction.h"
#include "vm/JSObject.h"
#include "vm/PlainObject.h" // js::PlainObject
@@ -997,7 +998,6 @@ JSContext::JSContext(JSRuntime* runtime, const JS::ContextOptions& options)
suppressProfilerSampling(false),
tempLifoAlloc_(this, (size_t)TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
debuggerMutations(this, 0),
- ionPcScriptCache(this, nullptr),
status(this, JS::ExceptionStatus::None),
unwrappedException_(this),
unwrappedExceptionStack_(this),
@@ -1263,6 +1263,34 @@ void JSContext::resetJitStackLimit() {
void JSContext::initJitStackLimit() { resetJitStackLimit(); }
+JSScript* JSContext::currentScript(jsbytecode** ppc,
+ AllowCrossRealm allowCrossRealm) {
+ if (ppc) {
+ *ppc = nullptr;
+ }
+
+ // Fast path: there are no JS frames on the stack if there's no activation.
+ if (!activation()) {
+ return nullptr;
+ }
+
+ FrameIter iter(this);
+ if (iter.done() || !iter.hasScript()) {
+ return nullptr;
+ }
+
+ JSScript* script = iter.script();
+ if (allowCrossRealm == AllowCrossRealm::DontAllow &&
+ script->realm() != realm()) {
+ return nullptr;
+ }
+
+ if (ppc) {
+ *ppc = iter.pc();
+ }
+ return script;
+}
+
#ifdef JS_CRASH_DIAGNOSTICS
void ContextChecks::check(AbstractFramePtr frame, int argIndex) {
if (frame) {
diff --git a/js/src/vm/JSContext.h b/js/src/vm/JSContext.h
index 1801816fa0..57aa236801 100644
--- a/js/src/vm/JSContext.h
+++ b/js/src/vm/JSContext.h
@@ -20,7 +20,6 @@
#include "gc/GCEnum.h"
#include "gc/Memory.h"
#include "irregexp/RegExpTypes.h"
-#include "jit/PcScriptCache.h"
#include "js/ContextOptions.h" // JS::ContextOptions
#include "js/Exception.h"
#include "js/GCVector.h"
@@ -563,9 +562,6 @@ struct JS_PUBLIC_API JSContext : public JS::RootingContext,
js::ContextData<uint32_t> debuggerMutations;
- // Cache for jit::GetPcScript().
- js::ContextData<js::UniquePtr<js::jit::PcScriptCache>> ionPcScriptCache;
-
private:
// Indicates if an exception is pending and the reason for it.
js::ContextData<JS::ExceptionStatus> status;
@@ -693,9 +689,9 @@ struct JS_PUBLIC_API JSContext : public JS::RootingContext,
* overridden by passing AllowCrossRealm::Allow.
*/
enum class AllowCrossRealm { DontAllow = false, Allow = true };
- inline JSScript* currentScript(
- jsbytecode** pc = nullptr,
- AllowCrossRealm allowCrossRealm = AllowCrossRealm::DontAllow) const;
+ JSScript* currentScript(
+ jsbytecode** ppc = nullptr,
+ AllowCrossRealm allowCrossRealm = AllowCrossRealm::DontAllow);
inline void minorGC(JS::GCReason reason);
diff --git a/js/src/vm/JSONParser.cpp b/js/src/vm/JSONParser.cpp
index addd9aa263..f151f261b5 100644
--- a/js/src/vm/JSONParser.cpp
+++ b/js/src/vm/JSONParser.cpp
@@ -692,8 +692,9 @@ inline bool JSONFullParseHandlerAnyChar::finishObject(
if (gcHeap == gc::Heap::Tenured) {
newKind = TenuredObject;
}
+ // properties is traced in the parser; see JSONParser<CharT>::trace()
JSObject* obj = NewPlainObjectWithMaybeDuplicateKeys(
- cx, properties->begin(), properties->length(), newKind);
+ cx, Handle<IdValueVector>::fromMarkedLocation(properties), newKind);
if (!obj) {
return false;
}
diff --git a/js/src/vm/JSONParser.h b/js/src/vm/JSONParser.h
index 3a3da721f6..91e33c02b3 100644
--- a/js/src/vm/JSONParser.h
+++ b/js/src/vm/JSONParser.h
@@ -167,7 +167,7 @@ class MOZ_STACK_CLASS JSONFullParseHandlerAnyChar {
// State for an object that is currently being parsed. This includes all
// the key/value pairs that have been seen so far.
- using PropertyVector = JS::GCVector<IdValuePair, 10>;
+ using PropertyVector = IdValueVector;
enum class ParseType {
// Parsing a string as if by JSON.parse.
diff --git a/js/src/vm/JSObject.cpp b/js/src/vm/JSObject.cpp
index bafc7a4437..ea4dfeb6f7 100644
--- a/js/src/vm/JSObject.cpp
+++ b/js/src/vm/JSObject.cpp
@@ -3147,38 +3147,44 @@ js::gc::AllocKind JSObject::allocKindForTenure(
MOZ_ASSERT(IsInsideNursery(this));
- if (canHaveFixedElements()) {
- const NativeObject& nobj = as<NativeObject>();
- MOZ_ASSERT(nobj.numFixedSlots() == 0);
+ if (is<NativeObject>()) {
+ if (canHaveFixedElements()) {
+ const NativeObject& nobj = as<NativeObject>();
+ MOZ_ASSERT(nobj.numFixedSlots() == 0);
- /* Use minimal size object if we are just going to copy the pointer. */
- if (!nursery.isInside(nobj.getUnshiftedElementsHeader())) {
- return gc::AllocKind::OBJECT0_BACKGROUND;
- }
+ /* Use minimal size object if we are just going to copy the pointer. */
+ if (!nursery.isInside(nobj.getUnshiftedElementsHeader())) {
+ return gc::AllocKind::OBJECT0_BACKGROUND;
+ }
- size_t nelements = nobj.getDenseCapacity();
- return ForegroundToBackgroundAllocKind(GetGCArrayKind(nelements));
- }
+ size_t nelements = nobj.getDenseCapacity();
+ return ForegroundToBackgroundAllocKind(GetGCArrayKind(nelements));
+ }
- if (is<JSFunction>()) {
- return as<JSFunction>().getAllocKind();
- }
+ if (is<JSFunction>()) {
+ return as<JSFunction>().getAllocKind();
+ }
- // Fixed length typed arrays in the nursery may have a lazily allocated
- // buffer, make sure there is room for the array's fixed data when moving the
- // array.
- if (is<FixedLengthTypedArrayObject>() &&
- !as<FixedLengthTypedArrayObject>().hasBuffer()) {
- gc::AllocKind allocKind;
- if (as<FixedLengthTypedArrayObject>().hasInlineElements()) {
- size_t nbytes = as<FixedLengthTypedArrayObject>().byteLength();
- allocKind = FixedLengthTypedArrayObject::AllocKindForLazyBuffer(nbytes);
- } else {
- allocKind = GetGCObjectKind(getClass());
+ // Fixed length typed arrays in the nursery may have a lazily allocated
+ // buffer, make sure there is room for the array's fixed data when moving
+ // the array.
+ if (is<FixedLengthTypedArrayObject>() &&
+ !as<FixedLengthTypedArrayObject>().hasBuffer()) {
+ gc::AllocKind allocKind;
+ if (as<FixedLengthTypedArrayObject>().hasInlineElements()) {
+ size_t nbytes = as<FixedLengthTypedArrayObject>().byteLength();
+ allocKind = FixedLengthTypedArrayObject::AllocKindForLazyBuffer(nbytes);
+ } else {
+ allocKind = GetGCObjectKind(getClass());
+ }
+ return ForegroundToBackgroundAllocKind(allocKind);
}
- return ForegroundToBackgroundAllocKind(allocKind);
+
+ return as<NativeObject>().allocKindForTenure();
}
+ // Handle all non-native objects.
+
// Proxies that are CrossCompartmentWrappers may be nursery allocated.
if (is<ProxyObject>()) {
return as<ProxyObject>().allocKindForTenure();
@@ -3194,13 +3200,9 @@ js::gc::AllocKind JSObject::allocKindForTenure(
// WasmArrayObjects sometimes have a variable-length tail which contains the
// data for small arrays. Make sure we copy it all over to the new object.
- if (is<WasmArrayObject>()) {
- gc::AllocKind allocKind = as<WasmArrayObject>().allocKind();
- return allocKind;
- }
-
- // All nursery allocatable non-native objects are handled above.
- return as<NativeObject>().allocKindForTenure();
+ MOZ_ASSERT(is<WasmArrayObject>());
+ gc::AllocKind allocKind = as<WasmArrayObject>().allocKind();
+ return allocKind;
}
void JSObject::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
diff --git a/js/src/vm/JSScript.cpp b/js/src/vm/JSScript.cpp
index 6ea871ad42..7c3ed975a0 100644
--- a/js/src/vm/JSScript.cpp
+++ b/js/src/vm/JSScript.cpp
@@ -3245,8 +3245,10 @@ void JSScript::resetWarmUpCounterToDelayIonCompilation() {
#if defined(DEBUG) || defined(JS_JITSPEW)
void BaseScript::dumpStringContent(js::GenericPrinter& out) const {
- out.printf("%s:%u:%u @ 0x%p", filename() ? filename() : "<null>", lineno(),
- column().oneOriginValue(), this);
+ StringEscape esc('"');
+ EscapePrinter ep(out, esc);
+ ep.printf("%s:%u:%u @ 0x%p", filename() ? filename() : "<null>", lineno(),
+ column().oneOriginValue(), this);
}
void JSScript::dump(JSContext* cx) {
diff --git a/js/src/vm/MemoryMetrics.cpp b/js/src/vm/MemoryMetrics.cpp
index 3a852cf9ea..c9c43fceed 100644
--- a/js/src/vm/MemoryMetrics.cpp
+++ b/js/src/vm/MemoryMetrics.cpp
@@ -211,11 +211,12 @@ static void StatsZoneCallback(JSRuntime* rt, void* data, Zone* zone,
rtStats->currZoneStats = &zStats;
zone->addSizeOfIncludingThis(
- rtStats->mallocSizeOf_, &zStats.code, &zStats.regexpZone, &zStats.jitZone,
- &zStats.cacheIRStubs, &zStats.uniqueIdMap, &zStats.initialPropMapTable,
- &zStats.shapeTables, &rtStats->runtime.atomsMarkBitmaps,
- &zStats.compartmentObjects, &zStats.crossCompartmentWrappersTables,
- &zStats.compartmentsPrivateData, &zStats.scriptCountsMap);
+ rtStats->mallocSizeOf_, &zStats.zoneObject, &zStats.code,
+ &zStats.regexpZone, &zStats.jitZone, &zStats.cacheIRStubs,
+ &zStats.uniqueIdMap, &zStats.initialPropMapTable, &zStats.shapeTables,
+ &rtStats->runtime.atomsMarkBitmaps, &zStats.compartmentObjects,
+ &zStats.crossCompartmentWrappersTables, &zStats.compartmentsPrivateData,
+ &zStats.scriptCountsMap);
}
static void StatsRealmCallback(JSContext* cx, void* data, Realm* realm,
diff --git a/js/src/vm/Modules.cpp b/js/src/vm/Modules.cpp
index 87546cf280..f461e7bec1 100644
--- a/js/src/vm/Modules.cpp
+++ b/js/src/vm/Modules.cpp
@@ -15,7 +15,9 @@
#include "jstypes.h" // JS_PUBLIC_API
+#include "builtin/JSON.h" // js::ParseJSONWithReviver
#include "builtin/ModuleObject.h" // js::FinishDynamicModuleImport, js::{,Requested}ModuleObject
+#include "builtin/Promise.h" // js::CreatePromiseObjectForAsync, js::AsyncFunctionReturned
#include "ds/Sort.h"
#include "frontend/BytecodeCompiler.h" // js::frontend::CompileModule
#include "frontend/FrontendContext.h" // js::AutoReportFrontendContext
@@ -33,6 +35,8 @@
#include "vm/JSAtomUtils-inl.h" // AtomToId
#include "vm/JSContext-inl.h" // JSContext::{c,releaseC}heck
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
using namespace js;
@@ -120,6 +124,46 @@ JS_PUBLIC_API JSObject* JS::CompileModule(JSContext* cx,
return CompileModuleHelper(cx, options, srcBuf);
}
+JS_PUBLIC_API JSObject* JS::CompileJsonModule(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ SourceText<char16_t>& srcBuf) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ JS::RootedValue jsonValue(cx);
+ auto charRange =
+ mozilla::Range<const char16_t>(srcBuf.get(), srcBuf.length());
+ if (!js::ParseJSONWithReviver(cx, charRange, NullHandleValue, &jsonValue)) {
+ return nullptr;
+ }
+
+ Rooted<ExportNameVector> exportNames(cx);
+ if (!exportNames.reserve(1)) {
+ return nullptr;
+ }
+ exportNames.infallibleAppend(cx->names().default_);
+
+ Rooted<ModuleObject*> moduleObject(
+ cx, ModuleObject::createSynthetic(cx, &exportNames));
+ if (!moduleObject) {
+ return nullptr;
+ }
+
+ Rooted<GCVector<Value>> exportValues(cx, GCVector<Value>(cx));
+ if (!exportValues.reserve(1)) {
+ return nullptr;
+ }
+ exportValues.infallibleAppend(jsonValue);
+
+ if (!ModuleObject::createSyntheticEnvironment(cx, moduleObject,
+ exportValues)) {
+ return nullptr;
+ }
+
+ return moduleObject;
+}
+
JS_PUBLIC_API void JS::SetModulePrivate(JSObject* module, const Value& value) {
JSRuntime* rt = module->zone()->runtimeFromMainThread();
module->as<ModuleObject>().scriptSourceObject()->setPrivate(rt, value);
@@ -150,6 +194,10 @@ JS_PUBLIC_API bool JS::ModuleEvaluate(JSContext* cx,
CHECK_THREAD(cx);
cx->releaseCheck(moduleRecord);
+ if (moduleRecord.as<ModuleObject>()->hasSyntheticModuleFields()) {
+ return SyntheticModuleEvaluate(cx, moduleRecord.as<ModuleObject>(), rval);
+ }
+
return js::ModuleEvaluate(cx, moduleRecord.as<ModuleObject>(), rval);
}
@@ -312,6 +360,10 @@ static bool ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
Handle<JSAtom*> exportName,
MutableHandle<ResolveSet> resolveSet,
MutableHandle<Value> result);
+static bool SyntheticModuleResolveExport(JSContext* cx,
+ Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName,
+ MutableHandle<Value> result);
static ModuleNamespaceObject* ModuleNamespaceCreate(
JSContext* cx, Handle<ModuleObject*> module,
MutableHandle<UniquePtr<ExportNameVector>> exports);
@@ -345,7 +397,7 @@ static const char* ModuleStatusName(ModuleStatus status) {
}
}
-static bool ContainsElement(Handle<ExportNameVector> list, JSAtom* atom) {
+static bool ContainsElement(const ExportNameVector& list, JSAtom* atom) {
for (JSAtom* a : list) {
if (a == atom) {
return true;
@@ -378,6 +430,20 @@ static size_t CountElements(Handle<ModuleVector> stack, ModuleObject* module) {
}
#endif
+// https://tc39.es/proposal-json-modules/#sec-smr-getexportednames
+static bool SyntheticModuleGetExportedNames(
+ JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ExportNameVector> exportedNames) {
+ MOZ_ASSERT(exportedNames.empty());
+
+ if (!exportedNames.appendAll(module->syntheticExportNames())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
// https://tc39.es/ecma262/#sec-getexportednames
// ES2023 16.2.1.6.2 GetExportedNames
static bool ModuleGetExportedNames(
@@ -387,6 +453,10 @@ static bool ModuleGetExportedNames(
// Step 4. Let exportedNames be a new empty List.
MOZ_ASSERT(exportedNames.empty());
+ if (module->hasSyntheticModuleFields()) {
+ return SyntheticModuleGetExportedNames(cx, module, exportedNames);
+ }
+
// Step 2. If exportStarSet contains module, then:
if (exportStarSet.has(module)) {
// Step 2.a. We've reached the starting point of an export * circularity.
@@ -482,12 +552,10 @@ static ModuleObject* HostResolveImportedModule(
if (!requestedModule) {
return nullptr;
}
-
if (requestedModule->status() < expectedMinimumStatus) {
ThrowUnexpectedModuleStatus(cx, requestedModule->status());
return nullptr;
}
-
return requestedModule;
}
@@ -510,6 +578,10 @@ static ModuleObject* HostResolveImportedModule(
bool js::ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
Handle<JSAtom*> exportName,
MutableHandle<Value> result) {
+ if (module->hasSyntheticModuleFields()) {
+ return ::SyntheticModuleResolveExport(cx, module, exportName, result);
+ }
+
// Step 1. If resolveSet is not present, set resolveSet to a new empty List.
Rooted<ResolveSet> resolveSet(cx);
@@ -683,6 +755,22 @@ static bool ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
return true;
}
+// https://tc39.es/proposal-json-modules/#sec-smr-resolveexport
+static bool SyntheticModuleResolveExport(JSContext* cx,
+ Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName,
+ MutableHandle<Value> result) {
+ // Step 2. If module.[[ExportNames]] does not contain exportName, return null.
+ if (!ContainsElement(module->syntheticExportNames(), exportName)) {
+ result.setNull();
+ return true;
+ }
+
+ // Step 3. Return ResolvedBinding Record { [[Module]]: module,
+ // [[BindingName]]: exportName }.
+ return CreateResolvedBindingObject(cx, module, exportName, result);
+}
+
// https://tc39.es/ecma262/#sec-getmodulenamespace
// ES2023 16.2.1.10 GetModuleNamespace
ModuleNamespaceObject* js::GetOrCreateModuleNamespace(
@@ -1097,6 +1185,14 @@ bool js::ModuleLink(JSContext* cx, Handle<ModuleObject*> module) {
static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
MutableHandle<ModuleVector> stack, size_t index,
size_t* indexOut) {
+ // Step 1. If module is not a Cyclic Module Record, then
+ if (!module->hasCyclicModuleFields()) {
+ // Step 1.a. Perform ? module.Link(). (Skipped)
+ // Step 2.b. Return index.
+ *indexOut = index;
+ return true;
+ }
+
// Step 2. If module.[[Status]] is linking, linked, evaluating-async, or
// evaluated, then:
if (module->status() == ModuleStatus::Linking ||
@@ -1155,25 +1251,29 @@ static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
}
// Step 9.c. If requiredModule is a Cyclic Module Record, then:
- // Step 9.c.i. Assert: requiredModule.[[Status]] is either linking, linked,
- // evaluating-async, or evaluated.
- MOZ_ASSERT(requiredModule->status() == ModuleStatus::Linking ||
- requiredModule->status() == ModuleStatus::Linked ||
- requiredModule->status() == ModuleStatus::EvaluatingAsync ||
- requiredModule->status() == ModuleStatus::Evaluated);
-
- // Step 9.c.ii. Assert: requiredModule.[[Status]] is linking if and only if
- // requiredModule is in stack.
- MOZ_ASSERT((requiredModule->status() == ModuleStatus::Linking) ==
- ContainsElement(stack, requiredModule));
-
- // Step 9.c.iii. If requiredModule.[[Status]] is linking, then:
- if (requiredModule->status() == ModuleStatus::Linking) {
- // Step 9.c.iii.1. Set module.[[DFSAncestorIndex]] to
- // min(module.[[DFSAncestorIndex]],
- // requiredModule.[[DFSAncestorIndex]]).
- module->setDfsAncestorIndex(std::min(module->dfsAncestorIndex(),
- requiredModule->dfsAncestorIndex()));
+ if (requiredModule->hasCyclicModuleFields()) {
+ // Step 9.c.i. Assert: requiredModule.[[Status]] is either linking,
+ // linked,
+ // evaluating-async, or evaluated.
+ MOZ_ASSERT(requiredModule->status() == ModuleStatus::Linking ||
+ requiredModule->status() == ModuleStatus::Linked ||
+ requiredModule->status() == ModuleStatus::EvaluatingAsync ||
+ requiredModule->status() == ModuleStatus::Evaluated);
+
+ // Step 9.c.ii. Assert: requiredModule.[[Status]] is linking if and only
+ // if
+ // requiredModule is in stack.
+ MOZ_ASSERT((requiredModule->status() == ModuleStatus::Linking) ==
+ ContainsElement(stack, requiredModule));
+
+ // Step 9.c.iii. If requiredModule.[[Status]] is linking, then:
+ if (requiredModule->status() == ModuleStatus::Linking) {
+ // Step 9.c.iii.1. Set module.[[DFSAncestorIndex]] to
+ // min(module.[[DFSAncestorIndex]],
+ // requiredModule.[[DFSAncestorIndex]]).
+ module->setDfsAncestorIndex(std::min(
+ module->dfsAncestorIndex(), requiredModule->dfsAncestorIndex()));
+ }
}
}
@@ -1213,6 +1313,28 @@ static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
return true;
}
+bool js::SyntheticModuleEvaluate(JSContext* cx, Handle<ModuleObject*> moduleArg,
+ MutableHandle<Value> result) {
+ // Steps 1-12 happens elsewhere in the engine.
+
+ // Step 13. Let pc be ! NewPromiseCapability(%Promise%).
+ Rooted<PromiseObject*> resultPromise(cx, CreatePromiseObjectForAsync(cx));
+ if (!resultPromise) {
+ return false;
+ }
+
+ // Step 14. IfAbruptRejectPromise(result, pc) (Skipped)
+
+ // 15. Perform ! pc.[[Resolve]](result).
+ if (!AsyncFunctionReturned(cx, resultPromise, result)) {
+ return false;
+ }
+
+ // 16. Return pc.[[Promise]].
+ result.set(ObjectValue(*resultPromise));
+ return true;
+}
+
// https://tc39.es/ecma262/#sec-moduleevaluation
// ES2023 16.2.1.5.2 Evaluate
bool js::ModuleEvaluate(JSContext* cx, Handle<ModuleObject*> moduleArg,
@@ -1349,6 +1471,17 @@ bool js::ModuleEvaluate(JSContext* cx, Handle<ModuleObject*> moduleArg,
static bool InnerModuleEvaluation(JSContext* cx, Handle<ModuleObject*> module,
MutableHandle<ModuleVector> stack,
size_t index, size_t* indexOut) {
+ // Step 1: If module is not a Cyclic Module Record, then
+ if (!module->hasCyclicModuleFields()) {
+ // Step 1.a. Let promise be ! module.Evaluate(). (Skipped)
+ // Step 1.b. Assert: promise.[[PromiseState]] is not pending. (Skipped)
+ // Step 1.c. If promise.[[PromiseState]] is rejected, then (Skipped)
+ // Step 1.c.i Return ThrowCompletion(promise.[[PromiseResult]]). (Skipped)
+ // Step 1.d. Return index.
+ *indexOut = index;
+ return true;
+ }
+
// Step 2. If module.[[Status]] is evaluating-async or evaluated, then:
if (module->status() == ModuleStatus::EvaluatingAsync ||
module->status() == ModuleStatus::Evaluated) {
@@ -1419,55 +1552,59 @@ static bool InnerModuleEvaluation(JSContext* cx, Handle<ModuleObject*> module,
}
// Step 11.d. If requiredModule is a Cyclic Module Record, then:
- // Step 11.d.i. Assert: requiredModule.[[Status]] is either evaluating,
- // evaluating-async, or evaluated.
- MOZ_ASSERT(requiredModule->status() == ModuleStatus::Evaluating ||
- requiredModule->status() == ModuleStatus::EvaluatingAsync ||
- requiredModule->status() == ModuleStatus::Evaluated);
-
- // Step 11.d.ii. Assert: requiredModule.[[Status]] is evaluating if and only
- // if requiredModule is in stack.
- MOZ_ASSERT((requiredModule->status() == ModuleStatus::Evaluating) ==
- ContainsElement(stack, requiredModule));
-
- // Step 11.d.iii. If requiredModule.[[Status]] is evaluating, then:
- if (requiredModule->status() == ModuleStatus::Evaluating) {
- // Step 11.d.iii.1. Set module.[[DFSAncestorIndex]] to
- // min(module.[[DFSAncestorIndex]],
- // requiredModule.[[DFSAncestorIndex]]).
- module->setDfsAncestorIndex(std::min(module->dfsAncestorIndex(),
- requiredModule->dfsAncestorIndex()));
- } else {
- // Step 11.d.iv. Else:
- // Step 11.d.iv.1. Set requiredModule to requiredModule.[[CycleRoot]].
- requiredModule = requiredModule->getCycleRoot();
-
- // Step 11.d.iv.2. Assert: requiredModule.[[Status]] is evaluating-async
- // or evaluated.
- MOZ_ASSERT(requiredModule->status() >= ModuleStatus::EvaluatingAsync ||
+ if (requiredModule->hasCyclicModuleFields()) {
+ // Step 11.d.i. Assert: requiredModule.[[Status]] is either evaluating,
+ // evaluating-async, or evaluated.
+ MOZ_ASSERT(requiredModule->status() == ModuleStatus::Evaluating ||
+ requiredModule->status() == ModuleStatus::EvaluatingAsync ||
requiredModule->status() == ModuleStatus::Evaluated);
- // Step 11.d.iv.3. If requiredModule.[[EvaluationError]] is not empty,
- // return ? requiredModule.[[EvaluationError]].
- if (requiredModule->hadEvaluationError()) {
- Rooted<Value> error(cx, requiredModule->evaluationError());
- cx->setPendingException(error, ShouldCaptureStack::Maybe);
- return false;
+ // Step 11.d.ii. Assert: requiredModule.[[Status]] is evaluating if and
+ // only if requiredModule is in stack.
+ MOZ_ASSERT((requiredModule->status() == ModuleStatus::Evaluating) ==
+ ContainsElement(stack, requiredModule));
+
+ // Step 11.d.iii. If requiredModule.[[Status]] is evaluating, then:
+ if (requiredModule->status() == ModuleStatus::Evaluating) {
+ // Step 11.d.iii.1. Set module.[[DFSAncestorIndex]] to
+ // min(module.[[DFSAncestorIndex]],
+ // requiredModule.[[DFSAncestorIndex]]).
+ module->setDfsAncestorIndex(std::min(
+ module->dfsAncestorIndex(), requiredModule->dfsAncestorIndex()));
+ } else {
+ // Step 11.d.iv. Else:
+ // Step 11.d.iv.1. Set requiredModule to requiredModule.[[CycleRoot]].
+ requiredModule = requiredModule->getCycleRoot();
+
+ // Step 11.d.iv.2. Assert: requiredModule.[[Status]] is evaluating-async
+ // or evaluated.
+ MOZ_ASSERT(requiredModule->status() >= ModuleStatus::EvaluatingAsync ||
+ requiredModule->status() == ModuleStatus::Evaluated);
+
+ // Step 11.d.iv.3. If requiredModule.[[EvaluationError]] is not empty,
+ // return ? requiredModule.[[EvaluationError]].
+ if (requiredModule->hadEvaluationError()) {
+ Rooted<Value> error(cx, requiredModule->evaluationError());
+ cx->setPendingException(error, ShouldCaptureStack::Maybe);
+ return false;
+ }
}
- }
- // Step 11.d.v. If requiredModule.[[AsyncEvaluation]] is true, then:
- if (requiredModule->isAsyncEvaluating() &&
- requiredModule->status() != ModuleStatus::Evaluated) {
- // Step 11.d.v.2. Append module to requiredModule.[[AsyncParentModules]].
- if (!ModuleObject::appendAsyncParentModule(cx, requiredModule, module)) {
- return false;
- }
+ // Step 11.d.v. If requiredModule.[[AsyncEvaluation]] is true, then:
+ if (requiredModule->isAsyncEvaluating() &&
+ requiredModule->status() != ModuleStatus::Evaluated) {
+ // Step 11.d.v.2. Append module to
+ // requiredModule.[[AsyncParentModules]].
+ if (!ModuleObject::appendAsyncParentModule(cx, requiredModule,
+ module)) {
+ return false;
+ }
- // Step 11.d.v.1. Set module.[[PendingAsyncDependencies]] to
- // module.[[PendingAsyncDependencies]] + 1.
- module->setPendingAsyncDependencies(module->pendingAsyncDependencies() +
- 1);
+ // Step 11.d.v.1. Set module.[[PendingAsyncDependencies]] to
+ // module.[[PendingAsyncDependencies]] + 1.
+ module->setPendingAsyncDependencies(module->pendingAsyncDependencies() +
+ 1);
+ }
}
}
diff --git a/js/src/vm/Modules.h b/js/src/vm/Modules.h
index 18b97ac3d7..21aff46b90 100644
--- a/js/src/vm/Modules.h
+++ b/js/src/vm/Modules.h
@@ -34,6 +34,8 @@ bool ModuleLink(JSContext* cx, Handle<ModuleObject*> module);
// Start evaluating the module. If TLA is enabled, result will be a promise.
bool ModuleEvaluate(JSContext* cx, Handle<ModuleObject*> module,
MutableHandle<Value> result);
+bool SyntheticModuleEvaluate(JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<Value> result);
void AsyncModuleExecutionFulfilled(JSContext* cx, Handle<ModuleObject*> module);
diff --git a/js/src/vm/NativeObject.cpp b/js/src/vm/NativeObject.cpp
index c952e1b40a..640a185981 100644
--- a/js/src/vm/NativeObject.cpp
+++ b/js/src/vm/NativeObject.cpp
@@ -943,8 +943,8 @@ bool NativeObject::growElements(JSContext* cx, uint32_t reqCapacity) {
// For arrays with writable length, and all non-Array objects, call
// `NativeObject::goodElementsAllocationAmount()` to determine the
// amount to allocate from the the requested capacity and existing length.
- if (!goodElementsAllocationAmount(cx, reqCapacity + numShifted,
- getElementsHeader()->length,
+ uint32_t length = is<ArrayObject>() ? as<ArrayObject>().length() : 0;
+ if (!goodElementsAllocationAmount(cx, reqCapacity + numShifted, length,
&newAllocated)) {
return false;
}
diff --git a/js/src/vm/Opcodes.h b/js/src/vm/Opcodes.h
index d71ca98fd0..438f361bcf 100644
--- a/js/src/vm/Opcodes.h
+++ b/js/src/vm/Opcodes.h
@@ -2937,7 +2937,8 @@
/*
* Push the number of actual arguments as Int32Value.
*
- * This is emitted for the ArgumentsLength() intrinsic in self-hosted code.
+ * This is emitted for the ArgumentsLength() intrinsic in self-hosted code,
+ * and if the script uses only arguments.length.
*
* Category: Variables and scopes
* Type: Getting binding values
diff --git a/js/src/vm/PlainObject.cpp b/js/src/vm/PlainObject.cpp
index 8341636796..0eef3acbce 100644
--- a/js/src/vm/PlainObject.cpp
+++ b/js/src/vm/PlainObject.cpp
@@ -207,16 +207,15 @@ void js::NewPlainObjectWithPropsCache::add(SharedShape* shape) {
entries_[0] = shape;
}
-static bool ShapeMatches(IdValuePair* properties, size_t nproperties,
- SharedShape* shape) {
- if (shape->slotSpan() != nproperties) {
+static bool ShapeMatches(Handle<IdValueVector> properties, SharedShape* shape) {
+ if (shape->slotSpan() != properties.length()) {
return false;
}
SharedShapePropertyIter<NoGC> iter(shape);
- for (size_t i = nproperties; i > 0; i--) {
+ for (size_t i = properties.length(); i > 0; i--) {
MOZ_ASSERT(iter->isDataProperty());
MOZ_ASSERT(iter->flags() == PropertyFlags::defaultDataPropFlags);
- if (properties[i - 1].id != iter->key()) {
+ if (properties[i - 1].get().id != iter->key()) {
return false;
}
iter++;
@@ -226,10 +225,10 @@ static bool ShapeMatches(IdValuePair* properties, size_t nproperties,
}
SharedShape* js::NewPlainObjectWithPropsCache::lookup(
- IdValuePair* properties, size_t nproperties) const {
+ Handle<IdValueVector> properties) const {
for (size_t i = 0; i < NumEntries; i++) {
SharedShape* shape = entries_[i];
- if (shape && ShapeMatches(properties, nproperties, shape)) {
+ if (shape && ShapeMatches(properties, shape)) {
return shape;
}
}
@@ -239,35 +238,33 @@ SharedShape* js::NewPlainObjectWithPropsCache::lookup(
enum class KeysKind { UniqueNames, Unknown };
template <KeysKind Kind>
-static PlainObject* NewPlainObjectWithProperties(JSContext* cx,
- IdValuePair* properties,
- size_t nproperties,
- NewObjectKind newKind) {
+static PlainObject* NewPlainObjectWithProperties(
+ JSContext* cx, Handle<IdValueVector> properties, NewObjectKind newKind) {
auto& cache = cx->realm()->newPlainObjectWithPropsCache;
// If we recently created an object with these properties, we can use that
// Shape directly.
- if (SharedShape* shape = cache.lookup(properties, nproperties)) {
+ if (SharedShape* shape = cache.lookup(properties)) {
Rooted<SharedShape*> shapeRoot(cx, shape);
PlainObject* obj = PlainObject::createWithShape(cx, shapeRoot, newKind);
if (!obj) {
return nullptr;
}
- MOZ_ASSERT(obj->slotSpan() == nproperties);
- for (size_t i = 0; i < nproperties; i++) {
- obj->initSlot(i, properties[i].value);
+ MOZ_ASSERT(obj->slotSpan() == properties.length());
+ for (size_t i = 0; i < properties.length(); i++) {
+ obj->initSlot(i, properties[i].get().value);
}
return obj;
}
- gc::AllocKind allocKind = gc::GetGCObjectKind(nproperties);
+ gc::AllocKind allocKind = gc::GetGCObjectKind(properties.length());
Rooted<PlainObject*> obj(cx,
NewPlainObjectWithAllocKind(cx, allocKind, newKind));
if (!obj) {
return nullptr;
}
- if (nproperties == 0) {
+ if (properties.empty()) {
return obj;
}
@@ -275,9 +272,9 @@ static PlainObject* NewPlainObjectWithProperties(JSContext* cx,
Rooted<Value> value(cx);
bool canCache = true;
- for (size_t i = 0; i < nproperties; i++) {
- key = properties[i].id;
- value = properties[i].value;
+ for (const auto& prop : properties) {
+ key = prop.id;
+ value = prop.value;
// Integer keys may need to be stored in dense elements. This is uncommon so
// just fall back to NativeDefineDataProperty.
@@ -314,7 +311,7 @@ static PlainObject* NewPlainObjectWithProperties(JSContext* cx,
if (canCache && !obj->inDictionaryMode()) {
MOZ_ASSERT(obj->getDenseInitializedLength() == 0);
- MOZ_ASSERT(obj->slotSpan() == nproperties);
+ MOZ_ASSERT(obj->slotSpan() == properties.length());
cache.add(obj->sharedShape());
}
@@ -322,17 +319,14 @@ static PlainObject* NewPlainObjectWithProperties(JSContext* cx,
}
PlainObject* js::NewPlainObjectWithUniqueNames(JSContext* cx,
- IdValuePair* properties,
- size_t nproperties,
+ Handle<IdValueVector> properties,
NewObjectKind newKind) {
- return NewPlainObjectWithProperties<KeysKind::UniqueNames>(
- cx, properties, nproperties, newKind);
+ return NewPlainObjectWithProperties<KeysKind::UniqueNames>(cx, properties,
+ newKind);
}
-PlainObject* js::NewPlainObjectWithMaybeDuplicateKeys(JSContext* cx,
- IdValuePair* properties,
- size_t nproperties,
- NewObjectKind newKind) {
+PlainObject* js::NewPlainObjectWithMaybeDuplicateKeys(
+ JSContext* cx, Handle<IdValueVector> properties, NewObjectKind newKind) {
return NewPlainObjectWithProperties<KeysKind::Unknown>(cx, properties,
- nproperties, newKind);
+ newKind);
}
diff --git a/js/src/vm/PlainObject.h b/js/src/vm/PlainObject.h
index 5dcbe29a46..aa2a202fa0 100644
--- a/js/src/vm/PlainObject.h
+++ b/js/src/vm/PlainObject.h
@@ -7,6 +7,7 @@
#ifndef vm_PlainObject_h
#define vm_PlainObject_h
+#include "ds/IdValuePair.h"
#include "gc/AllocKind.h" // js::gc::AllocKind
#include "js/Class.h" // JSClass
#include "js/RootingAPI.h" // JS::Handle
@@ -98,13 +99,13 @@ extern PlainObject* NewPlainObjectWithProtoAndAllocKind(
// Create a plain object with the given properties. The list must not contain
// duplicate keys or integer keys.
extern PlainObject* NewPlainObjectWithUniqueNames(
- JSContext* cx, IdValuePair* properties, size_t nproperties,
+ JSContext* cx, Handle<IdValueVector> properties,
NewObjectKind newKind = GenericObject);
// Create a plain object with the given properties. The list may contain integer
// keys or duplicate keys.
extern PlainObject* NewPlainObjectWithMaybeDuplicateKeys(
- JSContext* cx, IdValuePair* properties, size_t nproperties,
+ JSContext* cx, Handle<IdValueVector> properties,
NewObjectKind newKind = GenericObject);
} // namespace js
diff --git a/js/src/vm/PortableBaselineInterpret.cpp b/js/src/vm/PortableBaselineInterpret.cpp
index e2acaf2d7b..2990942dc6 100644
--- a/js/src/vm/PortableBaselineInterpret.cpp
+++ b/js/src/vm/PortableBaselineInterpret.cpp
@@ -33,6 +33,8 @@
#include "jit/JitScript.h"
#include "jit/JSJitFrameIter.h"
#include "jit/VMFunctions.h"
+#include "proxy/DeadObjectProxy.h"
+#include "proxy/DOMProxy.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/EnvironmentObject.h"
@@ -448,6 +450,12 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
goto cacheop_##name; \
}
+#define PREDICT_RETURN() \
+ if (icregs.cacheIRReader.peekOp() == CacheOp::ReturnFromIC) { \
+ TRACE_PRINTF("stub successful, predicted return\n"); \
+ return ICInterpretOpResult::Return; \
+ }
+
CacheOp cacheop;
DISPATCH_CACHEOP();
@@ -498,6 +506,15 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardIsNotUninitializedLexical) {
+ ValOperandId valId = icregs.cacheIRReader.valOperandId();
+ Value val = Value::fromRawBits(icregs.icVals[valId.id()]);
+ if (val == MagicValue(JS_UNINITIALIZED_LEXICAL)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardToBoolean) {
ValOperandId inputId = icregs.cacheIRReader.valOperandId();
Value v = Value::fromRawBits(icregs.icVals[inputId.id()]);
@@ -565,6 +582,15 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardToNonGCThing) {
+ ValOperandId inputId = icregs.cacheIRReader.valOperandId();
+ Value input = Value::fromRawBits(icregs.icVals[inputId.id()]);
+ if (input.isGCThing()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardBooleanToInt32) {
ValOperandId inputId = icregs.cacheIRReader.valOperandId();
Int32OperandId resultId = icregs.cacheIRReader.int32OperandId();
@@ -595,6 +621,36 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
+ CACHEOP_CASE(Int32ToIntPtr) {
+ Int32OperandId inputId = icregs.cacheIRReader.int32OperandId();
+ IntPtrOperandId resultId = icregs.cacheIRReader.intPtrOperandId();
+ BOUNDSCHECK(resultId);
+ int32_t input = int32_t(icregs.icVals[inputId.id()]);
+ // Note that this must sign-extend to pointer width:
+ icregs.icVals[resultId.id()] = intptr_t(input);
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardToInt32ModUint32) {
+ ValOperandId inputId = icregs.cacheIRReader.valOperandId();
+ Int32OperandId resultId = icregs.cacheIRReader.int32OperandId();
+ BOUNDSCHECK(resultId);
+ Value input = Value::fromRawBits(icregs.icVals[inputId.id()]);
+ if (input.isInt32()) {
+ icregs.icVals[resultId.id()] = Int32Value(input.toInt32()).asRawBits();
+ DISPATCH_CACHEOP();
+ } else if (input.isDouble()) {
+ double doubleVal = input.toDouble();
+ // Accept any double that fits in an int64_t but truncate the top 32 bits.
+ if (doubleVal >= double(INT64_MIN) && doubleVal <= double(INT64_MAX)) {
+ icregs.icVals[resultId.id()] =
+ Int32Value(int64_t(doubleVal)).asRawBits();
+ DISPATCH_CACHEOP();
+ }
+ }
+ return ICInterpretOpResult::NextIC;
+ }
+
CACHEOP_CASE(GuardNonDoubleType) {
ValOperandId inputId = icregs.cacheIRReader.valOperandId();
ValueType type = icregs.cacheIRReader.valueType();
@@ -668,15 +724,24 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
ObjOperandId objId = icregs.cacheIRReader.objOperandId();
uint32_t protoOffset = icregs.cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
- uintptr_t expectedProto =
- cstub->stubInfo()->getStubRawWord(cstub, protoOffset);
- if (reinterpret_cast<uintptr_t>(obj->staticPrototype()) != expectedProto) {
+ JSObject* proto = reinterpret_cast<JSObject*>(
+ cstub->stubInfo()->getStubRawWord(cstub, protoOffset));
+ if (obj->staticPrototype() != proto) {
return ICInterpretOpResult::NextIC;
}
PREDICT_NEXT(LoadProto);
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardNullProto) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (obj->taggedProto().raw()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardClass) {
ObjOperandId objId = icregs.cacheIRReader.objOperandId();
GuardClassKind kind = icregs.cacheIRReader.guardClassKind();
@@ -697,16 +762,31 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
break;
+ case GuardClassKind::ResizableArrayBuffer:
+ if (object->getClass() != &ResizableArrayBufferObject::class_) {
+ return ICInterpretOpResult::NextIC;
+ }
+ break;
case GuardClassKind::FixedLengthSharedArrayBuffer:
if (object->getClass() != &FixedLengthSharedArrayBufferObject::class_) {
return ICInterpretOpResult::NextIC;
}
break;
+ case GuardClassKind::GrowableSharedArrayBuffer:
+ if (object->getClass() != &GrowableSharedArrayBufferObject::class_) {
+ return ICInterpretOpResult::NextIC;
+ }
+ break;
case GuardClassKind::FixedLengthDataView:
if (object->getClass() != &FixedLengthDataViewObject::class_) {
return ICInterpretOpResult::NextIC;
}
break;
+ case GuardClassKind::ResizableDataView:
+ if (object->getClass() != &ResizableDataViewObject::class_) {
+ return ICInterpretOpResult::NextIC;
+ }
+ break;
case GuardClassKind::MappedArguments:
if (object->getClass() != &MappedArgumentsObject::class_) {
return ICInterpretOpResult::NextIC;
@@ -747,6 +827,18 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardAnyClass) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t claspOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ JSClass* clasp = reinterpret_cast<JSClass*>(
+ cstub->stubInfo()->getStubRawWord(cstub, claspOffset));
+ if (obj->getClass() != clasp) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardGlobalGeneration) {
uint32_t expectedOffset = icregs.cacheIRReader.stubOffset();
uint32_t generationAddrOffset = icregs.cacheIRReader.stubOffset();
@@ -760,12 +852,131 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(HasClassResult) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t claspOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ JSClass* clasp = reinterpret_cast<JSClass*>(
+ cstub->stubInfo()->getStubRawWord(cstub, claspOffset));
+ icregs.icResult = BooleanValue(obj->getClass() == clasp).asRawBits();
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardCompartment) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t globalOffset = icregs.cacheIRReader.stubOffset();
+ uint32_t compartmentOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ JSObject* global = reinterpret_cast<JSObject*>(
+ cstub->stubInfo()->getStubRawWord(cstub, globalOffset));
+ JS::Compartment* compartment = reinterpret_cast<JS::Compartment*>(
+ cstub->stubInfo()->getStubRawWord(cstub, compartmentOffset));
+ if (IsDeadProxyObject(global)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ if (obj->compartment() != compartment) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsExtensible) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (obj->nonProxyIsExtensible()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsNativeObject) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (!obj->is<NativeObject>()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsProxy) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (!obj->is<ProxyObject>()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsNotProxy) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (obj->is<ProxyObject>()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsNotArrayBufferMaybeShared) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ const JSClass* clasp = obj->getClass();
+ if (clasp == &ArrayBufferObject::protoClass_ ||
+ clasp == &SharedArrayBufferObject::protoClass_) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsTypedArray) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (!IsTypedArrayClass(obj->getClass())) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardHasProxyHandler) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t handlerOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ BaseProxyHandler* handler = reinterpret_cast<BaseProxyHandler*>(
+ cstub->stubInfo()->getStubRawWord(cstub, handlerOffset));
+ if (obj->as<ProxyObject>().handler() != handler) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardIsNotDOMProxy) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (obj->as<ProxyObject>().handler()->family() ==
+ GetDOMProxyHandlerFamily()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardSpecificObject) {
ObjOperandId objId = icregs.cacheIRReader.objOperandId();
uint32_t expectedOffset = icregs.cacheIRReader.stubOffset();
- uintptr_t expected =
- cstub->stubInfo()->getStubRawWord(cstub, expectedOffset);
- if (expected != icregs.icVals[objId.id()]) {
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ JSObject* expected = reinterpret_cast<JSObject*>(
+ cstub->stubInfo()->getStubRawWord(cstub, expectedOffset));
+ if (obj != expected) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardObjectIdentity) {
+ ObjOperandId obj1Id = icregs.cacheIRReader.objOperandId();
+ ObjOperandId obj2Id = icregs.cacheIRReader.objOperandId();
+ JSObject* obj1 = reinterpret_cast<JSObject*>(icregs.icVals[obj1Id.id()]);
+ JSObject* obj2 = reinterpret_cast<JSObject*>(icregs.icVals[obj2Id.id()]);
+ if (obj1 != obj2) {
return ICInterpretOpResult::NextIC;
}
DISPATCH_CACHEOP();
@@ -808,6 +1019,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
uintptr_t expected =
cstub->stubInfo()->getStubRawWord(cstub, expectedOffset);
if (expected != icregs.icVals[strId.id()]) {
+ // TODO: BaselineCacheIRCompiler also checks for equal strings
return ICInterpretOpResult::NextIC;
}
DISPATCH_CACHEOP();
@@ -833,23 +1045,211 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardNoDenseElements) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (obj->as<NativeObject>().getDenseInitializedLength() != 0) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardStringToIndex) {
+ StringOperandId strId = icregs.cacheIRReader.stringOperandId();
+ Int32OperandId resultId = icregs.cacheIRReader.int32OperandId();
+ BOUNDSCHECK(resultId);
+ JSString* str = reinterpret_cast<JSString*>(icregs.icVals[strId.id()]);
+ int32_t result;
+ if (str->hasIndexValue()) {
+ uint32_t index = str->getIndexValue();
+ MOZ_ASSERT(index <= INT32_MAX);
+ result = index;
+ } else {
+ result = GetIndexFromString(str);
+ if (result < 0) {
+ return ICInterpretOpResult::NextIC;
+ }
+ }
+ icregs.icVals[resultId.id()] = result;
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardStringToInt32) {
+ StringOperandId strId = icregs.cacheIRReader.stringOperandId();
+ Int32OperandId resultId = icregs.cacheIRReader.int32OperandId();
+ BOUNDSCHECK(resultId);
+ JSString* str = reinterpret_cast<JSString*>(icregs.icVals[strId.id()]);
+ int32_t result;
+ // Use indexed value as fast path if possible.
+ if (str->hasIndexValue()) {
+ uint32_t index = str->getIndexValue();
+ MOZ_ASSERT(index <= INT32_MAX);
+ result = index;
+ } else {
+ if (!GetInt32FromStringPure(frameMgr.cxForLocalUseOnly(), str, &result)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ }
+ icregs.icVals[resultId.id()] = result;
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardStringToNumber) {
+ StringOperandId strId = icregs.cacheIRReader.stringOperandId();
+ NumberOperandId resultId = icregs.cacheIRReader.numberOperandId();
+ BOUNDSCHECK(resultId);
+ JSString* str = reinterpret_cast<JSString*>(icregs.icVals[strId.id()]);
+ Value result;
+ // Use indexed value as fast path if possible.
+ if (str->hasIndexValue()) {
+ uint32_t index = str->getIndexValue();
+ MOZ_ASSERT(index <= INT32_MAX);
+ result = Int32Value(index);
+ } else {
+ double value;
+ if (!StringToNumberPure(frameMgr.cxForLocalUseOnly(), str, &value)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ result = DoubleValue(value);
+ }
+ icregs.icVals[resultId.id()] = result.asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(BooleanToNumber) {
+ BooleanOperandId booleanId = icregs.cacheIRReader.booleanOperandId();
+ NumberOperandId resultId = icregs.cacheIRReader.numberOperandId();
+ BOUNDSCHECK(resultId);
+ uint64_t boolean = icregs.icVals[booleanId.id()];
+ MOZ_ASSERT((boolean & ~1) == 0);
+ icregs.icVals[resultId.id()] = Int32Value(boolean).asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardHasGetterSetter) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t idOffset = icregs.cacheIRReader.stubOffset();
+ uint32_t getterSetterOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ jsid id =
+ jsid::fromRawBits(cstub->stubInfo()->getStubRawWord(cstub, idOffset));
+ GetterSetter* getterSetter = reinterpret_cast<GetterSetter*>(
+ cstub->stubInfo()->getStubRawWord(cstub, getterSetterOffset));
+ if (!ObjectHasGetterSetterPure(frameMgr.cxForLocalUseOnly(), obj, id,
+ getterSetter)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardInt32IsNonNegative) {
+ Int32OperandId indexId = icregs.cacheIRReader.int32OperandId();
+ int32_t index = int32_t(icregs.icVals[indexId.id()]);
+ if (index < 0) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardDynamicSlotIsSpecificObject) {
ObjOperandId objId = icregs.cacheIRReader.objOperandId();
ObjOperandId expectedId = icregs.cacheIRReader.objOperandId();
uint32_t slotOffset = icregs.cacheIRReader.stubOffset();
JSObject* expected =
reinterpret_cast<JSObject*>(icregs.icVals[expectedId.id()]);
- uintptr_t offset = cstub->stubInfo()->getStubRawInt32(cstub, slotOffset);
+ uintptr_t slot = cstub->stubInfo()->getStubRawInt32(cstub, slotOffset);
NativeObject* nobj =
reinterpret_cast<NativeObject*>(icregs.icVals[objId.id()]);
HeapSlot* slots = nobj->getSlotsUnchecked();
- Value actual = slots[offset / sizeof(Value)];
+ // Note that unlike similar opcodes, GuardDynamicSlotIsSpecificObject takes
+ // a slot index rather than a byte offset.
+ Value actual = slots[slot];
if (actual != ObjectValue(*expected)) {
return ICInterpretOpResult::NextIC;
}
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardDynamicSlotIsNotObject) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t slotOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t slot = cstub->stubInfo()->getStubRawInt32(cstub, slotOffset);
+ NativeObject* nobj = &obj->as<NativeObject>();
+ HeapSlot* slots = nobj->getSlotsUnchecked();
+ // Note that unlike similar opcodes, GuardDynamicSlotIsNotObject takes a
+ // slot index rather than a byte offset.
+ Value actual = slots[slot];
+ if (actual.isObject()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardFixedSlotValue) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
+ uint32_t valOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t offset = cstub->stubInfo()->getStubRawInt32(cstub, offsetOffset);
+ Value val = Value::fromRawBits(
+ cstub->stubInfo()->getStubRawInt64(cstub, valOffset));
+ GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
+ reinterpret_cast<uintptr_t>(obj) + offset);
+ Value actual = slot->get();
+ if (actual != val) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardDynamicSlotValue) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
+ uint32_t valOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t offset = cstub->stubInfo()->getStubRawInt32(cstub, offsetOffset);
+ Value val = Value::fromRawBits(
+ cstub->stubInfo()->getStubRawInt64(cstub, valOffset));
+ NativeObject* nobj = &obj->as<NativeObject>();
+ HeapSlot* slots = nobj->getSlotsUnchecked();
+ Value actual = slots[offset / sizeof(Value)];
+ if (actual != val) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(LoadFixedSlot) {
+ ValOperandId resultId = icregs.cacheIRReader.valOperandId();
+ BOUNDSCHECK(resultId);
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t offset = cstub->stubInfo()->getStubRawInt32(cstub, offsetOffset);
+ GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
+ reinterpret_cast<uintptr_t>(obj) + offset);
+ Value actual = slot->get();
+ icregs.icVals[resultId.id()] = actual.asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(LoadDynamicSlot) {
+ ValOperandId resultId = icregs.cacheIRReader.valOperandId();
+ BOUNDSCHECK(resultId);
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t slotOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t slot = cstub->stubInfo()->getStubRawInt32(cstub, slotOffset);
+ NativeObject* nobj = &obj->as<NativeObject>();
+ HeapSlot* slots = nobj->getSlotsUnchecked();
+ // Note that unlike similar opcodes, LoadDynamicSlot takes a slot index
+ // rather than a byte offset.
+ Value actual = slots[slot];
+ icregs.icVals[resultId.id()] = actual.asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(GuardNoAllocationMetadataBuilder) {
uint32_t builderAddrOffset = icregs.cacheIRReader.stubOffset();
uintptr_t builderAddr =
@@ -860,6 +1260,73 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(GuardFunctionHasJitEntry) {
+ ObjOperandId funId = icregs.cacheIRReader.objOperandId();
+ bool constructing = icregs.cacheIRReader.readBool();
+ JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
+ uint16_t flags = FunctionFlags::HasJitEntryFlags(constructing);
+ if (!fun->as<JSFunction>().flags().hasFlags(flags)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardFunctionHasNoJitEntry) {
+ ObjOperandId funId = icregs.cacheIRReader.objOperandId();
+ JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
+ uint16_t flags = FunctionFlags::HasJitEntryFlags(/*constructing =*/false);
+ if (fun->as<JSFunction>().flags().hasFlags(flags)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardFunctionIsNonBuiltinCtor) {
+ ObjOperandId funId = icregs.cacheIRReader.objOperandId();
+ JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
+ if (!fun->as<JSFunction>().isNonBuiltinConstructor()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardFunctionIsConstructor) {
+ ObjOperandId funId = icregs.cacheIRReader.objOperandId();
+ JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
+ if (!fun->as<JSFunction>().isConstructor()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardNotClassConstructor) {
+ ObjOperandId funId = icregs.cacheIRReader.objOperandId();
+ JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
+ if (fun->as<JSFunction>().isClassConstructor()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardArrayIsPacked) {
+ ObjOperandId arrayId = icregs.cacheIRReader.objOperandId();
+ JSObject* array = reinterpret_cast<JSObject*>(icregs.icVals[arrayId.id()]);
+ if (!IsPackedArray(array)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(GuardArgumentsObjectFlags) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint8_t flags = icregs.cacheIRReader.readByte();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ if (obj->as<ArgumentsObject>().hasFlags(flags)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(LoadObject) {
ObjOperandId resultId = icregs.cacheIRReader.objOperandId();
BOUNDSCHECK(resultId);
@@ -893,6 +1360,35 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(LoadEnclosingEnvironment) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ ObjOperandId resultId = icregs.cacheIRReader.objOperandId();
+ BOUNDSCHECK(resultId);
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ JSObject* env = &obj->as<EnvironmentObject>().enclosingEnvironment();
+ icregs.icVals[resultId.id()] = reinterpret_cast<uintptr_t>(env);
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(LoadWrapperTarget) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ ObjOperandId resultId = icregs.cacheIRReader.objOperandId();
+ BOUNDSCHECK(resultId);
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ JSObject* target = &obj->as<ProxyObject>().private_().toObject();
+ icregs.icVals[resultId.id()] = reinterpret_cast<uintptr_t>(target);
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(LoadValueTag) {
+ ValOperandId valId = icregs.cacheIRReader.valOperandId();
+ ValueTagOperandId resultId = icregs.cacheIRReader.valueTagOperandId();
+ BOUNDSCHECK(resultId);
+ Value val = Value::fromRawBits(icregs.icVals[valId.id()]);
+ icregs.icVals[resultId.id()] = val.extractNonDoubleType();
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(LoadArgumentFixedSlot) {
ValOperandId resultId = icregs.cacheIRReader.valOperandId();
BOUNDSCHECK(resultId);
@@ -915,6 +1411,70 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(TruncateDoubleToUInt32) {
+ NumberOperandId inputId = icregs.cacheIRReader.numberOperandId();
+ Int32OperandId resultId = icregs.cacheIRReader.int32OperandId();
+ BOUNDSCHECK(resultId);
+ Value input = Value::fromRawBits(icregs.icVals[inputId.id()]);
+ icregs.icVals[resultId.id()] = JS::ToInt32(input.toNumber());
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(MegamorphicLoadSlotResult) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t nameOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ jsid name =
+ jsid::fromRawBits(cstub->stubInfo()->getStubRawWord(cstub, nameOffset));
+ if (!obj->shape()->isNative()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ Value result;
+ if (!GetNativeDataPropertyPureWithCacheLookup(
+ frameMgr.cxForLocalUseOnly(), obj, name, nullptr, &result)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ icregs.icResult = result.asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(MegamorphicLoadSlotByValueResult) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ ValOperandId idId = icregs.cacheIRReader.valOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ Value id = Value::fromRawBits(icregs.icVals[idId.id()]);
+ if (!obj->shape()->isNative()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ Value values[2] = {id};
+ if (!GetNativeDataPropertyByValuePure(frameMgr.cxForLocalUseOnly(), obj,
+ nullptr, values)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ icregs.icResult = values[1].asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(MegamorphicSetElement) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ ValOperandId idId = icregs.cacheIRReader.valOperandId();
+ ValOperandId rhsId = icregs.cacheIRReader.valOperandId();
+ bool strict = icregs.cacheIRReader.readBool();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ Value id = Value::fromRawBits(icregs.icVals[idId.id()]);
+ Value rhs = Value::fromRawBits(icregs.icVals[rhsId.id()]);
+ {
+ PUSH_IC_FRAME();
+ ReservedRooted<JSObject*> obj0(&state.obj0, obj);
+ ReservedRooted<Value> value0(&state.value0, id);
+ ReservedRooted<Value> value1(&state.value1, rhs);
+ if (!SetElementMegamorphic<false>(cx, obj0, value0, value1, strict)) {
+ return ICInterpretOpResult::Error;
+ }
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(StoreFixedSlot) {
ObjOperandId objId = icregs.cacheIRReader.objOperandId();
uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
@@ -926,7 +1486,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
reinterpret_cast<uintptr_t>(nobj) + offset);
Value val = Value::fromRawBits(icregs.icVals[rhsId.id()]);
slot->set(val);
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -942,7 +1502,75 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
size_t dynSlot = offset / sizeof(Value);
size_t slot = dynSlot + nobj->numFixedSlots();
slots[dynSlot].set(nobj, HeapSlot::Slot, slot, val);
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(AddAndStoreFixedSlot) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
+ ValOperandId rhsId = icregs.cacheIRReader.valOperandId();
+ uint32_t newShapeOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ int32_t offset = cstub->stubInfo()->getStubRawInt32(cstub, offsetOffset);
+ Value rhs = Value::fromRawBits(icregs.icVals[rhsId.id()]);
+ Shape* newShape = reinterpret_cast<Shape*>(
+ cstub->stubInfo()->getStubRawWord(cstub, newShapeOffset));
+ obj->setShape(newShape);
+ GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
+ reinterpret_cast<uintptr_t>(obj) + offset);
+ slot->init(rhs);
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(AddAndStoreDynamicSlot) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
+ ValOperandId rhsId = icregs.cacheIRReader.valOperandId();
+ uint32_t newShapeOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ int32_t offset = cstub->stubInfo()->getStubRawInt32(cstub, offsetOffset);
+ Value rhs = Value::fromRawBits(icregs.icVals[rhsId.id()]);
+ Shape* newShape = reinterpret_cast<Shape*>(
+ cstub->stubInfo()->getStubRawWord(cstub, newShapeOffset));
+ NativeObject* nobj = &obj->as<NativeObject>();
+ obj->setShape(newShape);
+ HeapSlot* slots = nobj->getSlotsUnchecked();
+ size_t dynSlot = offset / sizeof(Value);
+ size_t slot = dynSlot + nobj->numFixedSlots();
+ slots[dynSlot].init(nobj, HeapSlot::Slot, slot, rhs);
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(AllocateAndStoreDynamicSlot) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ uint32_t offsetOffset = icregs.cacheIRReader.stubOffset();
+ ValOperandId rhsId = icregs.cacheIRReader.valOperandId();
+ uint32_t newShapeOffset = icregs.cacheIRReader.stubOffset();
+ uint32_t numNewSlotsOffset = icregs.cacheIRReader.stubOffset();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ int32_t offset = cstub->stubInfo()->getStubRawInt32(cstub, offsetOffset);
+ Value rhs = Value::fromRawBits(icregs.icVals[rhsId.id()]);
+ Shape* newShape = reinterpret_cast<Shape*>(
+ cstub->stubInfo()->getStubRawWord(cstub, newShapeOffset));
+ int32_t numNewSlots =
+ cstub->stubInfo()->getStubRawInt32(cstub, numNewSlotsOffset);
+ NativeObject* nobj = &obj->as<NativeObject>();
+ // We have to (re)allocate dynamic slots. Do this first, as it's the
+ // only fallible operation here. Note that growSlotsPure is fallible but
+ // does not GC. Otherwise this is the same as AddAndStoreDynamicSlot above.
+ if (!NativeObject::growSlotsPure(frameMgr.cxForLocalUseOnly(), nobj,
+ numNewSlots)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ obj->setShape(newShape);
+ HeapSlot* slots = nobj->getSlotsUnchecked();
+ size_t dynSlot = offset / sizeof(Value);
+ size_t slot = dynSlot + nobj->numFixedSlots();
+ slots[dynSlot].init(nobj, HeapSlot::Slot, slot, rhs);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -964,7 +1592,71 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
Value val = Value::fromRawBits(icregs.icVals[rhsId.id()]);
slot->set(nobj, HeapSlot::Element, index + elems->numShiftedElements(),
val);
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(StoreDenseElementHole) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ Int32OperandId indexId = icregs.cacheIRReader.int32OperandId();
+ ValOperandId rhsId = icregs.cacheIRReader.valOperandId();
+ bool handleAdd = icregs.cacheIRReader.readBool();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t index = uint32_t(icregs.icVals[indexId.id()]);
+ Value rhs = Value::fromRawBits(icregs.icVals[rhsId.id()]);
+ NativeObject* nobj = &obj->as<NativeObject>();
+ uint32_t initLength = nobj->getDenseInitializedLength();
+ if (index < initLength) {
+ nobj->setDenseElement(index, rhs);
+ } else if (!handleAdd || index > initLength) {
+ return ICInterpretOpResult::NextIC;
+ } else {
+ if (index >= nobj->getDenseCapacity()) {
+ if (!NativeObject::addDenseElementPure(frameMgr.cxForLocalUseOnly(),
+ nobj)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ }
+ nobj->setDenseInitializedLength(initLength + 1);
+
+ // Baseline always updates the length field by directly accessing its
+ // offset in ObjectElements. If the object is not an ArrayObject then this
+ // field is never read, so it's okay to skip the update here in that case.
+ if (nobj->is<ArrayObject>()) {
+ ArrayObject* aobj = &nobj->as<ArrayObject>();
+ uint32_t len = aobj->length();
+ if (len <= index) {
+ aobj->setLength(len + 1);
+ }
+ }
+
+ nobj->initDenseElement(index, rhs);
+ }
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(ArrayPush) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ ValOperandId rhsId = icregs.cacheIRReader.valOperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ Value rhs = Value::fromRawBits(icregs.icVals[rhsId.id()]);
+ ArrayObject* aobj = &obj->as<ArrayObject>();
+ uint32_t initLength = aobj->getDenseInitializedLength();
+ if (aobj->length() != initLength) {
+ return ICInterpretOpResult::NextIC;
+ }
+ if (initLength >= aobj->getDenseCapacity()) {
+ if (!NativeObject::addDenseElementPure(frameMgr.cxForLocalUseOnly(),
+ aobj)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ }
+ aobj->setDenseInitializedLength(initLength + 1);
+ aobj->setLength(initLength + 1);
+ aobj->initDenseElement(initLength, rhs);
+ icregs.icResult = Int32Value(initLength + 1).asRawBits();
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -972,7 +1664,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
ValOperandId inputId = icregs.cacheIRReader.valOperandId();
Value val = Value::fromRawBits(icregs.icVals[inputId.id()]);
icregs.icResult = BooleanValue(val.isObject()).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -989,6 +1681,66 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(StoreTypedArrayElement) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ Scalar::Type elementType = icregs.cacheIRReader.scalarType();
+ IntPtrOperandId indexId = icregs.cacheIRReader.intPtrOperandId();
+ uint32_t rhsId = icregs.cacheIRReader.rawOperandId();
+ bool handleOOB = icregs.cacheIRReader.readBool();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uintptr_t index = uintptr_t(icregs.icVals[indexId.id()]);
+ uint64_t rhs = icregs.icVals[rhsId];
+ if (obj->as<TypedArrayObject>().length().isNothing()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ if (index >= obj->as<TypedArrayObject>().length().value()) {
+ if (!handleOOB) {
+ return ICInterpretOpResult::NextIC;
+ }
+ } else {
+ Value v;
+ switch (elementType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ v = Int32Value(rhs);
+ break;
+
+ case Scalar::Float32:
+ case Scalar::Float64:
+ v = Value::fromRawBits(rhs);
+ MOZ_ASSERT(v.isNumber());
+ break;
+
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ v = BigIntValue(reinterpret_cast<JS::BigInt*>(rhs));
+ break;
+
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ // SetTypedArrayElement doesn't do anything that can actually GC or need a
+ // new context when the value can only be Int32, Double, or BigInt, as the
+ // above switch statement enforces.
+ FakeRooted<TypedArrayObject*> obj0(nullptr, &obj->as<TypedArrayObject>());
+ FakeRooted<Value> value0(nullptr, v);
+ ObjectOpResult result;
+ MOZ_ASSERT(elementType == obj0->type());
+ MOZ_ALWAYS_TRUE(SetTypedArrayElement(frameMgr.cxForLocalUseOnly(), obj0,
+ index, value0, result));
+ MOZ_ALWAYS_TRUE(result.ok());
+ }
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(CallInt32ToString) {
Int32OperandId inputId = icregs.cacheIRReader.int32OperandId();
StringOperandId resultId = icregs.cacheIRReader.stringOperandId();
@@ -1031,8 +1783,12 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
}
// For now, fail any constructing or different-realm cases.
- if (flags.isConstructing() || !flags.isSameRealm()) {
- TRACE_PRINTF("failing: constructing or not same realm\n");
+ if (flags.isConstructing()) {
+ TRACE_PRINTF("failing: constructing\n");
+ return ICInterpretOpResult::NextIC;
+ }
+ if (!flags.isSameRealm()) {
+ TRACE_PRINTF("failing: not same realm\n");
return ICInterpretOpResult::NextIC;
}
// And support only "standard" arg formats.
@@ -1123,6 +1879,15 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
}
}
+ PREDICT_RETURN();
+ DISPATCH_CACHEOP();
+ }
+
+ CACHEOP_CASE(MetaScriptedThisShape) {
+ uint32_t thisShapeOffset = icregs.cacheIRReader.stubOffset();
+ // This op is only metadata for the Warp Transpiler and should be ignored.
+ (void)thisShapeOffset;
+ PREDICT_NEXT(CallScriptedFunction);
DISPATCH_CACHEOP();
}
@@ -1139,7 +1904,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
"slot %" PRIx64 "\n",
nobj, int(offsetOffset), int(offset), slot, slot->asRawBits());
icregs.icResult = slot->asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1151,7 +1916,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
reinterpret_cast<NativeObject*>(icregs.icVals[objId.id()]);
HeapSlot* slots = nobj->getSlotsUnchecked();
icregs.icResult = slots[offset / sizeof(Value)].get().asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1171,7 +1936,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
icregs.icResult = val.asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1184,7 +1949,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
icregs.icResult = Int32Value(length).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1202,6 +1967,22 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
+ CACHEOP_CASE(LoadArgumentsObjectArgResult) {
+ ObjOperandId objId = icregs.cacheIRReader.objOperandId();
+ Int32OperandId indexId = icregs.cacheIRReader.int32OperandId();
+ JSObject* obj = reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]);
+ uint32_t index = uint32_t(icregs.icVals[indexId.id()]);
+ ArgumentsObject* args = &obj->as<ArgumentsObject>();
+ if (index >= args->initialLength() || args->hasOverriddenElement()) {
+ return ICInterpretOpResult::NextIC;
+ }
+ if (args->argIsForwarded(index)) {
+ return ICInterpretOpResult::NextIC;
+ }
+ icregs.icResult = args->arg(index).asRawBits();
+ DISPATCH_CACHEOP();
+ }
+
CACHEOP_CASE(LinearizeForCharAccess) {
StringOperandId strId = icregs.cacheIRReader.stringOperandId();
Int32OperandId indexId = icregs.cacheIRReader.int32OperandId();
@@ -1258,7 +2039,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
}
}
icregs.icResult = StringValue(result).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1286,7 +2067,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
result = Int32Value(c);
}
icregs.icResult = result.asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1298,7 +2079,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
icregs.icResult = Int32Value(length).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1307,7 +2088,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
icregs.icResult =
ObjectValue(*reinterpret_cast<JSObject*>(icregs.icVals[objId.id()]))
.asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1316,6 +2097,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
icregs.icResult =
StringValue(reinterpret_cast<JSString*>(icregs.icVals[strId.id()]))
.asRawBits();
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1324,14 +2106,14 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
icregs.icResult =
SymbolValue(reinterpret_cast<JS::Symbol*>(icregs.icVals[symId.id()]))
.asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadInt32Result) {
Int32OperandId valId = icregs.cacheIRReader.int32OperandId();
icregs.icResult = Int32Value(icregs.icVals[valId.id()]).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1342,7 +2124,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
val = DoubleValue(val.toInt32());
}
icregs.icResult = val.asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1351,14 +2133,14 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
icregs.icResult =
BigIntValue(reinterpret_cast<JS::BigInt*>(icregs.icVals[valId.id()]))
.asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadBooleanResult) {
bool val = icregs.cacheIRReader.readBool();
icregs.icResult = BooleanValue(val).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1376,7 +2158,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
JSString* str = reinterpret_cast<JSString*>(
cstub->stubInfo()->getStubRawWord(cstub, strOffset));
icregs.icResult = StringValue(str).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1392,12 +2174,14 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC; \
} \
icregs.icResult = Int32Value(int32_t(result)).asRawBits(); \
- PREDICT_NEXT(ReturnFromIC); \
+ PREDICT_RETURN(); \
DISPATCH_CACHEOP(); \
}
+ // clang-format off
INT32_OP(Add, +, {});
INT32_OP(Sub, -, {});
+ // clang-format on
INT32_OP(Mul, *, {
if (rhs * lhs == 0 && ((rhs < 0) ^ (lhs < 0))) {
return ICInterpretOpResult::NextIC;
@@ -1422,8 +2206,11 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
});
+ // clang-format off
INT32_OP(BitOr, |, {});
+ INT32_OP(BitXor, ^, {});
INT32_OP(BitAnd, &, {});
+ // clang-format on
CACHEOP_CASE(Int32PowResult) {
Int32OperandId lhsId = icregs.cacheIRReader.int32OperandId();
@@ -1458,7 +2245,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
}
icregs.icResult = Int32Value(int32_t(result)).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1470,7 +2257,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
icregs.icResult = Int32Value(int32_t(value)).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1478,7 +2265,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
ValOperandId inputId = icregs.cacheIRReader.valOperandId();
int32_t val = int32_t(icregs.icVals[inputId.id()]);
icregs.icResult = BooleanValue(val != 0).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1487,7 +2274,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
JSString* str =
reinterpret_cast<JSLinearString*>(icregs.icVals[strId.id()]);
icregs.icResult = BooleanValue(str->length() > 0).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1499,21 +2286,21 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
return ICInterpretOpResult::NextIC;
}
icregs.icResult = BooleanValue(!cls->emulatesUndefined()).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadValueResult) {
uint32_t valOffset = icregs.cacheIRReader.stubOffset();
icregs.icResult = cstub->stubInfo()->getStubRawInt64(cstub, valOffset);
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadOperandResult) {
ValOperandId inputId = icregs.cacheIRReader.valOperandId();
icregs.icResult = icregs.icVals[inputId.id()];
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1533,7 +2320,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
} else {
return ICInterpretOpResult::NextIC;
}
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1548,57 +2335,72 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
ReservedRooted<JSString*> rhs(
&state.str1, reinterpret_cast<JSString*>(icregs.icVals[rhsId.id()]));
bool result;
- switch (op) {
- case JSOp::Eq:
- case JSOp::StrictEq:
- if (lhs->length() != rhs->length()) {
- result = false;
+ if (lhs == rhs) {
+ // If operands point to the same instance, the strings are trivially
+ // equal.
+ result = op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
+ op == JSOp::Ge;
+ } else {
+ switch (op) {
+ case JSOp::Eq:
+ case JSOp::StrictEq:
+ if (lhs->isAtom() && rhs->isAtom()) {
+ result = false;
+ break;
+ }
+ if (lhs->length() != rhs->length()) {
+ result = false;
+ break;
+ }
+ if (!StringsEqual<EqualityKind::Equal>(cx, lhs, rhs, &result)) {
+ return ICInterpretOpResult::Error;
+ }
break;
- }
- if (!StringsEqual<EqualityKind::Equal>(cx, lhs, rhs, &result)) {
- return ICInterpretOpResult::Error;
- }
- break;
- case JSOp::Ne:
- case JSOp::StrictNe:
- if (lhs->length() != rhs->length()) {
- result = true;
+ case JSOp::Ne:
+ case JSOp::StrictNe:
+ if (lhs->isAtom() && rhs->isAtom()) {
+ result = true;
+ break;
+ }
+ if (lhs->length() != rhs->length()) {
+ result = true;
+ break;
+ }
+ if (!StringsEqual<EqualityKind::NotEqual>(cx, lhs, rhs, &result)) {
+ return ICInterpretOpResult::Error;
+ }
break;
- }
- if (!StringsEqual<EqualityKind::NotEqual>(cx, lhs, rhs, &result)) {
- return ICInterpretOpResult::Error;
- }
- break;
- case JSOp::Lt:
- if (!StringsCompare<ComparisonKind::LessThan>(cx, lhs, rhs,
- &result)) {
- return ICInterpretOpResult::Error;
- }
- break;
- case JSOp::Ge:
- if (!StringsCompare<ComparisonKind::GreaterThanOrEqual>(cx, lhs, rhs,
- &result)) {
- return ICInterpretOpResult::Error;
- }
- break;
- case JSOp::Le:
- if (!StringsCompare<ComparisonKind::GreaterThanOrEqual>(
- cx, /* N.B. swapped order */ rhs, lhs, &result)) {
- return ICInterpretOpResult::Error;
- }
- break;
- case JSOp::Gt:
- if (!StringsCompare<ComparisonKind::LessThan>(
- cx, /* N.B. swapped order */ rhs, lhs, &result)) {
- return ICInterpretOpResult::Error;
- }
- break;
- default:
- MOZ_CRASH("bad opcode");
+ case JSOp::Lt:
+ if (!StringsCompare<ComparisonKind::LessThan>(cx, lhs, rhs,
+ &result)) {
+ return ICInterpretOpResult::Error;
+ }
+ break;
+ case JSOp::Ge:
+ if (!StringsCompare<ComparisonKind::GreaterThanOrEqual>(
+ cx, lhs, rhs, &result)) {
+ return ICInterpretOpResult::Error;
+ }
+ break;
+ case JSOp::Le:
+ if (!StringsCompare<ComparisonKind::GreaterThanOrEqual>(
+ cx, /* N.B. swapped order */ rhs, lhs, &result)) {
+ return ICInterpretOpResult::Error;
+ }
+ break;
+ case JSOp::Gt:
+ if (!StringsCompare<ComparisonKind::LessThan>(
+ cx, /* N.B. swapped order */ rhs, lhs, &result)) {
+ return ICInterpretOpResult::Error;
+ }
+ break;
+ default:
+ MOZ_CRASH("bad opcode");
+ }
}
icregs.icResult = BooleanValue(result).asRawBits();
}
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1636,7 +2438,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
MOZ_CRASH("Unexpected opcode");
}
icregs.icResult = BooleanValue(result).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1671,7 +2473,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
MOZ_CRASH("bad opcode");
}
icregs.icResult = BooleanValue(result).asRawBits();
- PREDICT_NEXT(ReturnFromIC);
+ PREDICT_RETURN();
DISPATCH_CACHEOP();
}
@@ -1686,15 +2488,9 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
DISPATCH_CACHEOP();
}
- CACHEOP_CASE_UNIMPL(GuardToNonGCThing)
- CACHEOP_CASE_UNIMPL(Int32ToIntPtr)
CACHEOP_CASE_UNIMPL(GuardNumberToIntPtrIndex)
- CACHEOP_CASE_UNIMPL(GuardToInt32ModUint32)
CACHEOP_CASE_UNIMPL(GuardToUint8Clamped)
CACHEOP_CASE_UNIMPL(GuardMultipleShapes)
- CACHEOP_CASE_UNIMPL(GuardNullProto)
- CACHEOP_CASE_UNIMPL(GuardAnyClass)
- CACHEOP_CASE_UNIMPL(HasClassResult)
CACHEOP_CASE_UNIMPL(CallRegExpMatcherResult)
CACHEOP_CASE_UNIMPL(CallRegExpSearcherResult)
CACHEOP_CASE_UNIMPL(RegExpSearcherLastLimitResult)
@@ -1708,53 +2504,19 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(RegExpPrototypeOptimizableResult)
CACHEOP_CASE_UNIMPL(RegExpInstanceOptimizableResult)
CACHEOP_CASE_UNIMPL(GetFirstDollarIndexResult)
- CACHEOP_CASE_UNIMPL(GuardCompartment)
- CACHEOP_CASE_UNIMPL(GuardIsExtensible)
- CACHEOP_CASE_UNIMPL(GuardIsNativeObject)
- CACHEOP_CASE_UNIMPL(GuardIsProxy)
- CACHEOP_CASE_UNIMPL(GuardIsNotProxy)
- CACHEOP_CASE_UNIMPL(GuardIsNotArrayBufferMaybeShared)
- CACHEOP_CASE_UNIMPL(GuardIsTypedArray)
CACHEOP_CASE_UNIMPL(GuardIsFixedLengthTypedArray)
- CACHEOP_CASE_UNIMPL(GuardHasProxyHandler)
- CACHEOP_CASE_UNIMPL(GuardIsNotDOMProxy)
- CACHEOP_CASE_UNIMPL(GuardObjectIdentity)
- CACHEOP_CASE_UNIMPL(GuardNoDenseElements)
- CACHEOP_CASE_UNIMPL(GuardStringToIndex)
- CACHEOP_CASE_UNIMPL(GuardStringToInt32)
- CACHEOP_CASE_UNIMPL(GuardStringToNumber)
+ CACHEOP_CASE_UNIMPL(GuardIsResizableTypedArray)
CACHEOP_CASE_UNIMPL(StringToAtom)
- CACHEOP_CASE_UNIMPL(BooleanToNumber)
- CACHEOP_CASE_UNIMPL(GuardHasGetterSetter)
- CACHEOP_CASE_UNIMPL(GuardInt32IsNonNegative)
CACHEOP_CASE_UNIMPL(GuardIndexIsValidUpdateOrAdd)
CACHEOP_CASE_UNIMPL(GuardIndexIsNotDenseElement)
CACHEOP_CASE_UNIMPL(GuardTagNotEqual)
CACHEOP_CASE_UNIMPL(GuardXrayExpandoShapeAndDefaultProto)
CACHEOP_CASE_UNIMPL(GuardXrayNoExpando)
- CACHEOP_CASE_UNIMPL(GuardDynamicSlotIsNotObject)
- CACHEOP_CASE_UNIMPL(GuardFixedSlotValue)
- CACHEOP_CASE_UNIMPL(GuardDynamicSlotValue)
+ CACHEOP_CASE_UNIMPL(GuardEitherClass)
CACHEOP_CASE_UNIMPL(LoadScriptedProxyHandler)
CACHEOP_CASE_UNIMPL(IdToStringOrSymbol)
- CACHEOP_CASE_UNIMPL(LoadFixedSlot)
- CACHEOP_CASE_UNIMPL(LoadDynamicSlot)
- CACHEOP_CASE_UNIMPL(GuardFunctionHasJitEntry)
- CACHEOP_CASE_UNIMPL(GuardFunctionHasNoJitEntry)
- CACHEOP_CASE_UNIMPL(GuardFunctionIsNonBuiltinCtor)
- CACHEOP_CASE_UNIMPL(GuardFunctionIsConstructor)
- CACHEOP_CASE_UNIMPL(GuardNotClassConstructor)
- CACHEOP_CASE_UNIMPL(GuardArrayIsPacked)
- CACHEOP_CASE_UNIMPL(GuardArgumentsObjectFlags)
- CACHEOP_CASE_UNIMPL(LoadEnclosingEnvironment)
- CACHEOP_CASE_UNIMPL(LoadWrapperTarget)
- CACHEOP_CASE_UNIMPL(LoadValueTag)
- CACHEOP_CASE_UNIMPL(TruncateDoubleToUInt32)
CACHEOP_CASE_UNIMPL(DoubleToUint8Clamped)
- CACHEOP_CASE_UNIMPL(MegamorphicLoadSlotResult)
- CACHEOP_CASE_UNIMPL(MegamorphicLoadSlotByValueResult)
CACHEOP_CASE_UNIMPL(MegamorphicStoreSlot)
- CACHEOP_CASE_UNIMPL(MegamorphicSetElement)
CACHEOP_CASE_UNIMPL(MegamorphicHasPropResult)
CACHEOP_CASE_UNIMPL(SmallObjectVariableKeyHasOwnResult)
CACHEOP_CASE_UNIMPL(ObjectToIteratorResult)
@@ -1763,12 +2525,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(LoadDOMExpandoValueGuardGeneration)
CACHEOP_CASE_UNIMPL(LoadDOMExpandoValueIgnoreGeneration)
CACHEOP_CASE_UNIMPL(GuardDOMExpandoMissingOrGuardShape)
- CACHEOP_CASE_UNIMPL(AddAndStoreFixedSlot)
- CACHEOP_CASE_UNIMPL(AddAndStoreDynamicSlot)
- CACHEOP_CASE_UNIMPL(AllocateAndStoreDynamicSlot)
CACHEOP_CASE_UNIMPL(AddSlotAndCallAddPropHook)
- CACHEOP_CASE_UNIMPL(StoreDenseElementHole)
- CACHEOP_CASE_UNIMPL(ArrayPush)
CACHEOP_CASE_UNIMPL(ArrayJoinResult)
CACHEOP_CASE_UNIMPL(ObjectKeysResult)
CACHEOP_CASE_UNIMPL(PackedArrayPopResult)
@@ -1785,10 +2542,22 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(IsTypedArrayConstructorResult)
CACHEOP_CASE_UNIMPL(ArrayBufferViewByteOffsetInt32Result)
CACHEOP_CASE_UNIMPL(ArrayBufferViewByteOffsetDoubleResult)
+ CACHEOP_CASE_UNIMPL(ResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result)
+ CACHEOP_CASE_UNIMPL(ResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult)
CACHEOP_CASE_UNIMPL(TypedArrayByteLengthInt32Result)
CACHEOP_CASE_UNIMPL(TypedArrayByteLengthDoubleResult)
+ CACHEOP_CASE_UNIMPL(ResizableTypedArrayByteLengthInt32Result)
+ CACHEOP_CASE_UNIMPL(ResizableTypedArrayByteLengthDoubleResult)
+ CACHEOP_CASE_UNIMPL(ResizableTypedArrayLengthInt32Result)
+ CACHEOP_CASE_UNIMPL(ResizableTypedArrayLengthDoubleResult)
CACHEOP_CASE_UNIMPL(TypedArrayElementSizeResult)
+ CACHEOP_CASE_UNIMPL(ResizableDataViewByteLengthInt32Result)
+ CACHEOP_CASE_UNIMPL(ResizableDataViewByteLengthDoubleResult)
+ CACHEOP_CASE_UNIMPL(GrowableSharedArrayBufferByteLengthInt32Result)
+ CACHEOP_CASE_UNIMPL(GrowableSharedArrayBufferByteLengthDoubleResult)
CACHEOP_CASE_UNIMPL(GuardHasAttachedArrayBuffer)
+ CACHEOP_CASE_UNIMPL(GuardResizableArrayBufferViewInBounds)
+ CACHEOP_CASE_UNIMPL(GuardResizableArrayBufferViewInBoundsOrDetached)
CACHEOP_CASE_UNIMPL(NewArrayIteratorResult)
CACHEOP_CASE_UNIMPL(NewStringIteratorResult)
CACHEOP_CASE_UNIMPL(NewRegExpStringIteratorResult)
@@ -1843,7 +2612,6 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(DoubleParseIntResult)
CACHEOP_CASE_UNIMPL(ObjectToStringResult)
CACHEOP_CASE_UNIMPL(ReflectGetPrototypeOfResult)
- CACHEOP_CASE_UNIMPL(StoreTypedArrayElement)
CACHEOP_CASE_UNIMPL(AtomicsCompareExchangeResult)
CACHEOP_CASE_UNIMPL(AtomicsExchangeResult)
CACHEOP_CASE_UNIMPL(AtomicsAddResult)
@@ -1875,7 +2643,6 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(CallScriptedProxyGetResult)
CACHEOP_CASE_UNIMPL(CallScriptedProxyGetByValueResult)
#endif
- CACHEOP_CASE_UNIMPL(MetaScriptedThisShape)
CACHEOP_CASE_UNIMPL(BindFunctionResult)
CACHEOP_CASE_UNIMPL(SpecializedBindFunctionResult)
CACHEOP_CASE_UNIMPL(LoadFixedSlotTypedResult)
@@ -1887,7 +2654,6 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(LoadTypedArrayElementResult)
CACHEOP_CASE_UNIMPL(LoadDataViewValueResult)
CACHEOP_CASE_UNIMPL(StoreDataViewValueResult)
- CACHEOP_CASE_UNIMPL(LoadArgumentsObjectArgResult)
CACHEOP_CASE_UNIMPL(LoadArgumentsObjectArgHoleResult)
CACHEOP_CASE_UNIMPL(LoadArgumentsObjectArgExistsResult)
CACHEOP_CASE_UNIMPL(LoadArgumentsObjectLengthResult)
@@ -1932,7 +2698,6 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(BigIntDivResult)
CACHEOP_CASE_UNIMPL(BigIntModResult)
CACHEOP_CASE_UNIMPL(BigIntPowResult)
- CACHEOP_CASE_UNIMPL(Int32BitXorResult)
CACHEOP_CASE_UNIMPL(Int32LeftShiftResult)
CACHEOP_CASE_UNIMPL(Int32RightShiftResult)
CACHEOP_CASE_UNIMPL(Int32URightShiftResult)
@@ -1996,8 +2761,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(Breakpoint)
CACHEOP_CASE_UNIMPL(WrapResult)
CACHEOP_CASE_UNIMPL(Bailout)
- CACHEOP_CASE_UNIMPL(AssertRecoveredOnBailoutResult)
- CACHEOP_CASE_UNIMPL(GuardIsNotUninitializedLexical) {
+ CACHEOP_CASE_UNIMPL(AssertRecoveredOnBailoutResult) {
TRACE_PRINTF("unknown CacheOp: %s\n", CacheIROpNames[int(cacheop)]);
return ICInterpretOpResult::NextIC;
}
@@ -2498,6 +3262,13 @@ PBIResult PortableBaselineInterpret(JSContext* cx_, State& state, Stack& stack,
}
ret->setUndefined();
+ // Check if we are being debugged, and set a flag in the frame if so. This
+ // flag must be set before calling InitFunctionEnvironmentObjects.
+ if (script->isDebuggee()) {
+ TRACE_PRINTF("Script is debuggee\n");
+ frame->setIsDebuggee();
+ }
+
if (CalleeTokenIsFunction(frame->calleeToken())) {
JSFunction* func = CalleeTokenToFunction(frame->calleeToken());
frame->setEnvironmentChain(func->environment());
@@ -2511,12 +3282,8 @@ PBIResult PortableBaselineInterpret(JSContext* cx_, State& state, Stack& stack,
}
}
- // Check if we are being debugged, and set a flag in the frame if
- // so.
+ // The debug prologue can't run until the function environment is set up.
if (script->isDebuggee()) {
- TRACE_PRINTF("Script is debuggee\n");
- frame->setIsDebuggee();
-
PUSH_EXIT_FRAME();
if (!DebugPrologue(cx, frame)) {
goto error;
diff --git a/js/src/vm/Realm.cpp b/js/src/vm/Realm.cpp
index d2ad39f3db..4e3eba5677 100644
--- a/js/src/vm/Realm.cpp
+++ b/js/src/vm/Realm.cpp
@@ -237,11 +237,17 @@ void Realm::traceRoots(JSTracer* trc,
// The global is never nursery allocated, so we don't need to
// trace it when doing a minor collection.
//
- // If a realm is on-stack, we mark its global so that
- // JSContext::global() remains valid.
+ // If a realm is on-stack, we mark its global so that JSContext::global()
+ // remains valid.
if (shouldTraceGlobal() && global_) {
TraceRoot(trc, global_.unbarrieredAddress(), "on-stack realm global");
}
+
+ // If the realm is still being initialized we set a flag so that it doesn't
+ // get deleted, since there may be GC things that contain pointers to it.
+ if (shouldTraceGlobal() && initializingGlobal_) {
+ allocatedDuringIncrementalGC_ = true;
+ }
}
// Nothing below here needs to be treated as a root if we aren't marking
diff --git a/js/src/vm/Realm.h b/js/src/vm/Realm.h
index 2e6d56aa5e..4518b4ced4 100644
--- a/js/src/vm/Realm.h
+++ b/js/src/vm/Realm.h
@@ -17,6 +17,7 @@
#include <stddef.h>
#include "builtin/Array.h"
+#include "ds/IdValuePair.h"
#include "gc/Barrier.h"
#include "js/GCVariant.h"
#include "js/RealmOptions.h"
@@ -129,7 +130,7 @@ class NewPlainObjectWithPropsCache {
public:
NewPlainObjectWithPropsCache() { purge(); }
- SharedShape* lookup(IdValuePair* properties, size_t nproperties) const;
+ SharedShape* lookup(Handle<IdValueVector> properties) const;
void add(SharedShape* shape);
void purge() {
@@ -436,9 +437,6 @@ class JS::Realm : public JS::shadow::Realm {
// features are required.
bool isUnlimitedStacksCapturingEnabled = false;
- // Whether or not the deprecation warning for bug 1873186 has been shown.
- bool warnedAboutDateLateWeekday = false;
-
private:
void updateDebuggerObservesFlag(unsigned flag);
diff --git a/js/src/vm/RegExpObject.cpp b/js/src/vm/RegExpObject.cpp
index 256aade5f8..29806c21d4 100644
--- a/js/src/vm/RegExpObject.cpp
+++ b/js/src/vm/RegExpObject.cpp
@@ -298,7 +298,6 @@ void RegExpObject::initAndZeroLastIndex(JSAtom* source, RegExpFlags flags,
zeroLastIndex(cx);
}
-#if defined(DEBUG) || defined(JS_JITSPEW)
template <typename KnownF, typename UnknownF>
void ForEachRegExpFlag(JS::RegExpFlags flags, KnownF known, UnknownF unknown) {
uint8_t raw = flags.value();
@@ -336,6 +335,14 @@ void ForEachRegExpFlag(JS::RegExpFlags flags, KnownF known, UnknownF unknown) {
}
}
+std::ostream& JS::operator<<(std::ostream& os, RegExpFlags flags) {
+ ForEachRegExpFlag(
+ flags, [&](const char* name, const char* c) { os << c; },
+ [&](uint8_t value) { os << '?'; });
+ return os;
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
void RegExpObject::dumpOwnFields(js::JSONPrinter& json) const {
{
js::GenericPrinter& out = json.beginStringProperty("source");
@@ -1120,36 +1127,7 @@ static bool ParseRegExpFlags(const CharT* chars, size_t length,
for (size_t i = 0; i < length; i++) {
uint8_t flag;
- switch (chars[i]) {
- case 'd':
- flag = RegExpFlag::HasIndices;
- break;
- case 'g':
- flag = RegExpFlag::Global;
- break;
- case 'i':
- flag = RegExpFlag::IgnoreCase;
- break;
- case 'm':
- flag = RegExpFlag::Multiline;
- break;
- case 's':
- flag = RegExpFlag::DotAll;
- break;
- case 'u':
- flag = RegExpFlag::Unicode;
- break;
- case 'v':
- flag = RegExpFlag::UnicodeSets;
- break;
- case 'y':
- flag = RegExpFlag::Sticky;
- break;
- default:
- *invalidFlag = chars[i];
- return false;
- }
- if (*flagsOut & flag) {
+ if (!JS::MaybeParseRegExpFlag(chars[i], &flag) || *flagsOut & flag) {
*invalidFlag = chars[i];
return false;
}
diff --git a/js/src/vm/RegExpShared.h b/js/src/vm/RegExpShared.h
index 4ff68e9ee1..07f57d1e7a 100644
--- a/js/src/vm/RegExpShared.h
+++ b/js/src/vm/RegExpShared.h
@@ -103,7 +103,7 @@ class RegExpShared
size_t byteCodeLength() const {
MOZ_ASSERT(byteCode);
- return byteCode->length;
+ return byteCode->length();
}
};
diff --git a/js/src/vm/Runtime.h b/js/src/vm/Runtime.h
index 934a534185..57d4fb1411 100644
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -526,9 +526,9 @@ struct JSRuntime {
js::GeckoProfilerRuntime& geckoProfiler() { return geckoProfiler_.ref(); }
// Heap GC roots for PersistentRooted pointers.
- js::MainThreadData<
- mozilla::EnumeratedArray<JS::RootKind, JS::RootKind::Limit,
- mozilla::LinkedList<js::PersistentRootedBase>>>
+ js::MainThreadData<mozilla::EnumeratedArray<
+ JS::RootKind, mozilla::LinkedList<js::PersistentRootedBase>,
+ size_t(JS::RootKind::Limit)>>
heapRoots;
void tracePersistentRoots(JSTracer* trc);
diff --git a/js/src/vm/Scope.cpp b/js/src/vm/Scope.cpp
index c48d0976d8..45cbeb6419 100644
--- a/js/src/vm/Scope.cpp
+++ b/js/src/vm/Scope.cpp
@@ -148,6 +148,33 @@ SharedShape* js::CreateEnvironmentShape(JSContext* cx, BindingIter& bi,
map, mapLength, objectFlags);
}
+SharedShape* js::CreateEnvironmentShapeForSyntheticModule(
+ JSContext* cx, const JSClass* cls, uint32_t numSlots,
+ Handle<ModuleObject*> module) {
+ Rooted<SharedPropMap*> map(cx);
+ uint32_t mapLength = 0;
+
+ PropertyFlags propFlags = {PropertyFlag::Enumerable};
+ ObjectFlags objectFlags = ModuleEnvironmentObject::OBJECT_FLAGS;
+
+ RootedId id(cx);
+ uint32_t slotIndex = numSlots;
+ for (JSAtom* exportName : module->syntheticExportNames()) {
+ id = NameToId(exportName->asPropertyName());
+ if (!SharedPropMap::addPropertyWithKnownSlot(cx, cls, &map, &mapLength, id,
+ propFlags, slotIndex,
+ &objectFlags)) {
+ return nullptr;
+ }
+ slotIndex++;
+ }
+
+ uint32_t numFixed = gc::GetGCKindSlots(gc::GetGCObjectKind(numSlots));
+ return SharedShape::getInitialOrPropMapShape(cx, cls, cx->realm(),
+ TaggedProto(nullptr), numFixed,
+ map, mapLength, objectFlags);
+}
+
template <class DataT>
inline size_t SizeOfAllocatedData(DataT* data) {
return SizeOfScopeData<DataT>(data->length);
diff --git a/js/src/vm/Scope.h b/js/src/vm/Scope.h
index a914a14f28..22777f100a 100644
--- a/js/src/vm/Scope.h
+++ b/js/src/vm/Scope.h
@@ -1765,6 +1765,10 @@ SharedShape* CreateEnvironmentShape(JSContext* cx, BindingIter& bi,
const JSClass* cls, uint32_t numSlots,
ObjectFlags objectFlags);
+SharedShape* CreateEnvironmentShapeForSyntheticModule(
+ JSContext* cx, const JSClass* cls, uint32_t numSlots,
+ Handle<ModuleObject*> module);
+
SharedShape* EmptyEnvironmentShape(JSContext* cx, const JSClass* cls,
uint32_t numSlots, ObjectFlags objectFlags);
diff --git a/js/src/vm/SharedArrayObject.h b/js/src/vm/SharedArrayObject.h
index 572fe2e6fb..525ee78451 100644
--- a/js/src/vm/SharedArrayObject.h
+++ b/js/src/vm/SharedArrayObject.h
@@ -116,7 +116,9 @@ class SharedArrayRawBuffer {
// this method merely sets the number of user accessible bytes of this buffer.
bool grow(size_t newByteLength);
- static int32_t liveBuffers();
+ static size_t offsetOfByteLength() {
+ return offsetof(SharedArrayRawBuffer, length_);
+ }
};
class WasmSharedArrayRawBuffer : public SharedArrayRawBuffer {
@@ -364,6 +366,10 @@ class SharedArrayBufferObject : public ArrayBufferObjectMaybeShared {
return rawBufferObject()->dataPointerShared();
}
+ static constexpr int rawBufferOffset() {
+ return NativeObject::getFixedSlotOffset(RAWBUF_SLOT);
+ }
+
// WebAssembly support:
// Create a SharedArrayBufferObject using the provided buffer and size.
diff --git a/js/src/vm/SharedStencil.h b/js/src/vm/SharedStencil.h
index 58666919dc..a402895bd6 100644
--- a/js/src/vm/SharedStencil.h
+++ b/js/src/vm/SharedStencil.h
@@ -789,7 +789,11 @@ using SharedImmutableScriptDataTable =
SharedImmutableScriptData::Hasher, SystemAllocPolicy>;
struct MemberInitializers {
+#ifdef ENABLE_DECORATORS
+ static constexpr size_t NumBits = 30;
+#else
static constexpr size_t NumBits = 31;
+#endif
static constexpr uint32_t MaxInitializers = BitMask(NumBits);
#ifdef DEBUG
@@ -798,20 +802,37 @@ struct MemberInitializers {
bool hasPrivateBrand : 1;
+#ifdef ENABLE_DECORATORS
+ bool hasDecorators : 1;
+#endif
+
// This struct will eventually have a vector of constant values for optimizing
// field initializers.
uint32_t numMemberInitializers : NumBits;
- MemberInitializers(bool hasPrivateBrand, uint32_t numMemberInitializers)
+ MemberInitializers(bool hasPrivateBrand,
+#ifdef ENABLE_DECORATORS
+ bool hasDecorators,
+#endif
+ uint32_t numMemberInitializers)
:
#ifdef DEBUG
valid(true),
#endif
hasPrivateBrand(hasPrivateBrand),
+#ifdef ENABLE_DECORATORS
+ hasDecorators(hasDecorators),
+#endif
numMemberInitializers(numMemberInitializers) {
+#ifdef ENABLE_DECORATORS
+ MOZ_ASSERT(
+ this->numMemberInitializers == numMemberInitializers,
+ "numMemberInitializers should easily fit in the 30-bit bitfield");
+#else
MOZ_ASSERT(
this->numMemberInitializers == numMemberInitializers,
"numMemberInitializers should easily fit in the 31-bit bitfield");
+#endif
}
static MemberInitializers Invalid() { return MemberInitializers(); }
@@ -820,17 +841,33 @@ struct MemberInitializers {
// fields. This is used when we elide the trivial data but still need a valid
// set to stop scope walking.
static const MemberInitializers& Empty() {
- static const MemberInitializers zeroInitializers(false, 0);
+ static const MemberInitializers zeroInitializers(false,
+#ifdef ENABLE_DECORATORS
+ false,
+#endif
+ 0);
return zeroInitializers;
}
uint32_t serialize() const {
+#ifdef ENABLE_DECORATORS
+ auto serialised = (hasPrivateBrand << (NumBits + 1)) |
+ hasDecorators << NumBits | numMemberInitializers;
+ return serialised;
+#else
return (hasPrivateBrand << NumBits) | numMemberInitializers;
+#endif
}
static MemberInitializers deserialize(uint32_t bits) {
+#ifdef ENABLE_DECORATORS
+ return MemberInitializers((bits & Bit(NumBits + 1)) != 0,
+ (bits & Bit(NumBits)) != 0,
+ bits & BitMask(NumBits));
+#else
return MemberInitializers((bits & Bit(NumBits)) != 0,
bits & BitMask(NumBits));
+#endif
}
private:
@@ -840,6 +877,9 @@ struct MemberInitializers {
valid(false),
#endif
hasPrivateBrand(false),
+#ifdef ENABLE_DECORATORS
+ hasDecorators(false),
+#endif
numMemberInitializers(0) {
}
};
diff --git a/js/src/vm/Stack.cpp b/js/src/vm/Stack.cpp
index ffbe88147d..d222ddc51c 100644
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -642,7 +642,20 @@ JS::ProfilingFrameIterator::getPhysicalFrameAndEntry(
if (isWasm()) {
Frame frame;
- frame.kind = Frame_Wasm;
+ switch (wasmIter().category()) {
+ case wasm::ProfilingFrameIterator::Baseline: {
+ frame.kind = FrameKind::Frame_WasmBaseline;
+ break;
+ }
+ case wasm::ProfilingFrameIterator::Ion: {
+ frame.kind = FrameKind::Frame_WasmIon;
+ break;
+ }
+ default: {
+ frame.kind = FrameKind::Frame_WasmOther;
+ break;
+ }
+ }
frame.stackAddress = stackAddr;
frame.returnAddress_ = nullptr;
frame.activation = activation_;
diff --git a/js/src/vm/StringType-inl.h b/js/src/vm/StringType-inl.h
index b0424b868c..8954a46aac 100644
--- a/js/src/vm/StringType-inl.h
+++ b/js/src/vm/StringType-inl.h
@@ -321,6 +321,10 @@ inline JSRope::JSRope(JSString* left, JSString* right, size_t length) {
// |length| must be the sum of the length of both child nodes.
MOZ_ASSERT(left->length() + right->length() == length);
+ // |isLatin1| is set when both children are guaranteed to contain only Latin-1
+ // characters. Note that flattening either rope child can clear the Latin-1
+ // flag of that child, so it's possible that a Latin-1 rope can end up with
+ // both children being two-byte (dependent) strings.
bool isLatin1 = left->hasLatin1Chars() && right->hasLatin1Chars();
// Do not try to make a rope that could fit inline.
diff --git a/js/src/vm/StringType.cpp b/js/src/vm/StringType.cpp
index 03f6a7e1ac..63afd8864b 100644
--- a/js/src/vm/StringType.cpp
+++ b/js/src/vm/StringType.cpp
@@ -2224,10 +2224,12 @@ void JSInlineString::dumpOwnRepresentationFields(js::JSONPrinter& json) const {}
void JSLinearString::dumpOwnRepresentationFields(js::JSONPrinter& json) const {
if (!isInline()) {
- js::gc::StoreBuffer* sb = storeBuffer();
- bool inNursery = sb && sb->nursery().isInside(nonInlineCharsRaw());
-
- json.boolProperty("inNursery", inNursery);
+ // Include whether the chars are in the nursery even for tenured
+ // strings, which should always be false. For investigating bugs, it's
+ // better to not assume that.
+ js::Nursery& nursery = runtimeFromMainThread()->gc.nursery();
+ bool inNursery = nursery.isInside(nonInlineCharsRaw());
+ json.boolProperty("charsInNursery", inNursery);
}
}
#endif
diff --git a/js/src/vm/StringType.h b/js/src/vm/StringType.h
index ea2174be42..f2850c33a4 100644
--- a/js/src/vm/StringType.h
+++ b/js/src/vm/StringType.h
@@ -297,7 +297,10 @@ class JSString : public js::gc::CellWithLengthAndFlags {
* If LATIN1_CHARS_BIT is set, the string's characters are stored as Latin1
* instead of TwoByte. This flag can also be set for ropes, if both the
* left and right nodes are Latin1. Flattening will result in a Latin1
- * string in this case.
+ * string in this case. When we flatten a TwoByte rope, we turn child ropes
+ * (including Latin1 ropes) into TwoByte dependent strings. If one of these
+ * strings is also part of another Latin1 rope tree, we can have a Latin1 rope
+ * with a TwoByte descendent.
*
* The other flags store the string's type. Instead of using a dense index
* to represent the most-derived type, string types are encoded to allow
@@ -385,6 +388,15 @@ class JSString : public js::gc::CellWithLengthAndFlags {
static_assert((TYPE_FLAGS_MASK & js::gc::HeaderWord::RESERVED_MASK) == 0,
"GC reserved bits must not be used for Strings");
+ // Linear strings:
+ // - Content and representation are Latin-1 characters.
+ // - Unmodifiable after construction.
+ //
+ // Ropes:
+ // - Content are Latin-1 characters.
+ // - Flag may be cleared when the rope is changed into a dependent string.
+ //
+ // Also see LATIN1_CHARS_BIT description under "Flag Encoding".
static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
// Whether this atom's characters store an uint32 index value less than or
diff --git a/js/src/vm/StructuredClone.cpp b/js/src/vm/StructuredClone.cpp
index 8f1e131021..e2e67a2ee3 100644
--- a/js/src/vm/StructuredClone.cpp
+++ b/js/src/vm/StructuredClone.cpp
@@ -2568,7 +2568,7 @@ BigInt* JSStructuredCloneReader::readBigInt(uint32_t data) {
if (!in.readArray(result->digits().data(), length)) {
return nullptr;
}
- return result;
+ return JS::BigInt::destructivelyTrimHighZeroDigits(context(), result);
}
static uint32_t TagToV1ArrayType(uint32_t tag) {
diff --git a/js/src/vm/TypedArrayObject.cpp b/js/src/vm/TypedArrayObject.cpp
index 0264b481b3..35a2237cd5 100644
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -382,6 +382,7 @@ class TypedArrayObjectTemplate {
using FixedLengthTypedArray = FixedLengthTypedArrayObjectTemplate<NativeType>;
using ResizableTypedArray = ResizableTypedArrayObjectTemplate<NativeType>;
+ using AutoLength = ArrayBufferViewObject::AutoLength;
static constexpr auto ByteLengthLimit = TypedArrayObject::ByteLengthLimit;
static constexpr auto INLINE_BUFFER_LIMIT =
@@ -574,7 +575,7 @@ class TypedArrayObjectTemplate {
static bool computeAndCheckLength(
JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> bufferMaybeUnwrapped,
uint64_t byteOffset, uint64_t lengthIndex, size_t* length,
- bool* autoLength) {
+ AutoLength* autoLength) {
MOZ_ASSERT(byteOffset % BYTES_PER_ELEMENT == 0);
MOZ_ASSERT(byteOffset < uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT));
MOZ_ASSERT_IF(lengthIndex != UINT64_MAX,
@@ -605,7 +606,7 @@ class TypedArrayObjectTemplate {
// Resizable buffers without an explicit length are auto-length.
if (bufferMaybeUnwrapped->isResizable()) {
*length = 0;
- *autoLength = true;
+ *autoLength = AutoLength::Yes;
return true;
}
@@ -642,7 +643,7 @@ class TypedArrayObjectTemplate {
MOZ_ASSERT(len <= ByteLengthLimit / BYTES_PER_ELEMENT);
*length = len;
- *autoLength = false;
+ *autoLength = AutoLength::No;
return true;
}
@@ -654,7 +655,7 @@ class TypedArrayObjectTemplate {
uint64_t byteOffset, uint64_t lengthIndex, HandleObject proto) {
// Steps 5-8.
size_t length = 0;
- bool autoLength = false;
+ auto autoLength = AutoLength::No;
if (!computeAndCheckLength(cx, buffer, byteOffset, lengthIndex, &length,
&autoLength)) {
return nullptr;
@@ -703,7 +704,7 @@ class TypedArrayObjectTemplate {
unwrappedBuffer = &unwrapped->as<ArrayBufferObjectMaybeShared>();
size_t length = 0;
- bool autoLength = false;
+ auto autoLength = AutoLength::No;
if (!computeAndCheckLength(cx, unwrappedBuffer, byteOffset, lengthIndex,
&length, &autoLength)) {
return nullptr;
@@ -1025,13 +1026,13 @@ class ResizableTypedArrayObjectTemplate
}
static ResizableTypedArrayObject* newBuiltinClassInstance(
- JSContext* cx, gc::AllocKind allocKind) {
+ JSContext* cx, gc::AllocKind allocKind, gc::Heap heap) {
RootedObject proto(cx, GlobalObject::getOrCreatePrototype(cx, protoKey()));
if (!proto) {
return nullptr;
}
return NewTypedArrayObject<ResizableTypedArrayObject>(
- cx, instanceClass(), proto, allocKind, gc::Heap::Default);
+ cx, instanceClass(), proto, allocKind, heap);
}
static ResizableTypedArrayObject* makeProtoInstance(JSContext* cx,
@@ -1044,11 +1045,12 @@ class ResizableTypedArrayObjectTemplate
static ResizableTypedArrayObject* makeInstance(
JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> buffer,
- size_t byteOffset, size_t len, bool autoLength, HandleObject proto) {
+ size_t byteOffset, size_t len, AutoLength autoLength,
+ HandleObject proto) {
MOZ_ASSERT(buffer);
MOZ_ASSERT(buffer->isResizable());
MOZ_ASSERT(!buffer->isDetached());
- MOZ_ASSERT(!autoLength || len == 0,
+ MOZ_ASSERT(autoLength == AutoLength::No || len == 0,
"length is zero for 'auto' length views");
MOZ_ASSERT(len <= ByteLengthLimit / BYTES_PER_ELEMENT);
@@ -1059,16 +1061,43 @@ class ResizableTypedArrayObjectTemplate
if (proto) {
obj = makeProtoInstance(cx, proto, allocKind);
} else {
- obj = newBuiltinClassInstance(cx, allocKind);
+ obj = newBuiltinClassInstance(cx, allocKind, gc::Heap::Default);
}
- if (!obj || !obj->init(cx, buffer, byteOffset, len, BYTES_PER_ELEMENT)) {
+ if (!obj || !obj->initResizable(cx, buffer, byteOffset, len,
+ BYTES_PER_ELEMENT, autoLength)) {
return nullptr;
}
- obj->setFixedSlot(AUTO_LENGTH_SLOT, BooleanValue(autoLength));
-
return obj;
}
+
+ static ResizableTypedArrayObject* makeTemplateObject(JSContext* cx) {
+ gc::AllocKind allocKind = gc::GetGCObjectKind(instanceClass());
+
+ AutoSetNewObjectMetadata metadata(cx);
+
+ auto* tarray = newBuiltinClassInstance(cx, allocKind, gc::Heap::Tenured);
+ if (!tarray) {
+ return nullptr;
+ }
+
+ tarray->initFixedSlot(TypedArrayObject::BUFFER_SLOT, JS::FalseValue());
+ tarray->initFixedSlot(TypedArrayObject::LENGTH_SLOT,
+ PrivateValue(size_t(0)));
+ tarray->initFixedSlot(TypedArrayObject::BYTEOFFSET_SLOT,
+ PrivateValue(size_t(0)));
+ tarray->initFixedSlot(AUTO_LENGTH_SLOT, BooleanValue(false));
+ tarray->initFixedSlot(ResizableTypedArrayObject::INITIAL_LENGTH_SLOT,
+ PrivateValue(size_t(0)));
+ tarray->initFixedSlot(ResizableTypedArrayObject::INITIAL_BYTE_OFFSET_SLOT,
+ PrivateValue(size_t(0)));
+
+ // Template objects don't need memory for their elements, since there
+ // won't be any elements to store.
+ MOZ_ASSERT(tarray->getReservedSlot(DATA_SLOT).isUndefined());
+
+ return tarray;
+ }
};
template <typename NativeType>
@@ -1499,18 +1528,29 @@ static bool GetTemplateObjectForNative(JSContext* cx,
return !!res;
}
+ if (!arg.isObject()) {
+ return true;
+ }
+ auto* obj = &arg.toObject();
+
// We don't support wrappers, because of the complicated interaction between
// wrapped ArrayBuffers and TypedArrays, see |fromBufferWrapped()|.
- if (arg.isObject() && !IsWrapper(&arg.toObject())) {
- // We don't use the template's length in the object case, so we can create
- // the template typed array with an initial length of zero.
- uint32_t len = 0;
+ if (IsWrapper(obj)) {
+ return true;
+ }
+
+ // We don't use the template's length in the object case, so we can create
+ // the template typed array with an initial length of zero.
+ uint32_t len = 0;
+
+ if (!obj->is<ArrayBufferObjectMaybeShared>() ||
+ !obj->as<ArrayBufferObjectMaybeShared>().isResizable()) {
res.set(
FixedLengthTypedArrayObjectTemplate<T>::makeTemplateObject(cx, len));
- return !!res;
+ } else {
+ res.set(ResizableTypedArrayObjectTemplate<T>::makeTemplateObject(cx));
}
-
- return true;
+ return !!res;
}
/* static */ bool TypedArrayObject::GetTemplateObjectForNative(
@@ -2199,131 +2239,6 @@ bool TypedArrayObjectTemplate<uint64_t>::getElement(JSContext* cx,
}
} /* anonymous namespace */
-/**
- * IsIntegerIndexedObjectOutOfBounds ( iieoRecord )
- *
- * IsIntegerIndexedObjectOutOfBounds can be rewritten into the following spec
- * steps when inlining the call to
- * MakeIntegerIndexedObjectWithBufferWitnessRecord.
- *
- * 1. Let buffer be O.[[ViewedArrayBuffer]].
- * 2. If IsDetachedBuffer(buffer) is true, then
- * a. Return true.
- * 3. If IsFixedLengthArrayBuffer(buffer) is true, then
- * a. Return false.
- * 4. Let bufferByteLength be ArrayBufferByteLength(buffer, order).
- * 5. Let byteOffsetStart be O.[[ByteOffset]].
- * 6. If byteOffsetStart > bufferByteLength, then
- * a. Return true.
- * 7. If O.[[ArrayLength]] is auto, then
- * a. Return false.
- * 8. Let elementSize be TypedArrayElementSize(O).
- * 9. Let byteOffsetEnd be byteOffsetStart + O.[[ArrayLength]] × elementSize.
- * 10. If byteOffsetEnd > bufferByteLength, then
- * a. Return true.
- * 11. Return false.
- *
- * The additional call to IsFixedLengthArrayBuffer is an optimization to skip
- * unnecessary validation which don't apply for fixed length typed arrays.
- *
- * https://tc39.es/ecma262/#sec-isintegerindexedobjectoutofbounds
- * https://tc39.es/ecma262/#sec-makeintegerindexedobjectwithbufferwitnessrecord
- */
-mozilla::Maybe<size_t> TypedArrayObject::byteOffset() const {
- if (MOZ_UNLIKELY(hasDetachedBuffer())) {
- return mozilla::Nothing{};
- }
-
- size_t byteOffsetStart = ArrayBufferViewObject::byteOffset();
-
- if (MOZ_LIKELY(is<FixedLengthTypedArrayObject>())) {
- return mozilla::Some(byteOffsetStart);
- }
-
- auto* buffer = bufferEither();
- MOZ_ASSERT(buffer->isResizable());
-
- size_t bufferByteLength = buffer->byteLength();
- if (byteOffsetStart > bufferByteLength) {
- return mozilla::Nothing{};
- }
-
- if (as<ResizableTypedArrayObject>().isAutoLength()) {
- return mozilla::Some(byteOffsetStart);
- }
-
- size_t viewByteLength = rawByteLength();
- size_t byteOffsetEnd = byteOffsetStart + viewByteLength;
- if (byteOffsetEnd > bufferByteLength) {
- return mozilla::Nothing{};
- }
- return mozilla::Some(byteOffsetStart);
-}
-
-/**
- * IntegerIndexedObjectLength ( iieoRecord )
- *
- * IntegerIndexedObjectLength can be rewritten into the following spec
- * steps when inlining the calls to IsIntegerIndexedObjectOutOfBounds and
- * MakeIntegerIndexedObjectWithBufferWitnessRecord.
- *
- * 1. Let buffer be O.[[ViewedArrayBuffer]].
- * 2. If IsDetachedBuffer(buffer) is true, then
- * a. Return out-of-bounds.
- * 3. If IsFixedLengthArrayBuffer(buffer) is true, then
- * a. Return O.[[ArrayLength]].
- * 4. Let bufferByteLength be ArrayBufferByteLength(buffer, order).
- * 5. Let byteOffsetStart be O.[[ByteOffset]].
- * 6. If byteOffsetStart > bufferByteLength, then
- * a. Return out-of-bounds.
- * 7. If O.[[ArrayLength]] is auto, then
- * a. Let elementSize be TypedArrayElementSize(O).
- * b. Return floor((bufferByteLength - byteOffsetStart) / elementSize).
- * 8. Let elementSize be TypedArrayElementSize(O).
- * 9. Let byteOffsetEnd be byteOffsetStart + O.[[ArrayLength]] × elementSize.
- * 10. If byteOffsetEnd > bufferByteLength, then
- * a. Return out-of-bounds.
- * 11. Return O.[[ArrayLength]].
- *
- * The additional call to IsFixedLengthArrayBuffer is an optimization to skip
- * unnecessary validation which don't apply for fixed length typed arrays.
- *
- * https://tc39.es/ecma262/#sec-integerindexedobjectlength
- * https://tc39.es/ecma262/#sec-isintegerindexedobjectoutofbounds
- * https://tc39.es/ecma262/#sec-makeintegerindexedobjectwithbufferwitnessrecord
- */
-mozilla::Maybe<size_t> TypedArrayObject::length() const {
- if (MOZ_UNLIKELY(hasDetachedBuffer())) {
- return mozilla::Nothing{};
- }
-
- if (MOZ_LIKELY(is<FixedLengthTypedArrayObject>())) {
- size_t arrayLength = rawLength();
- return mozilla::Some(arrayLength);
- }
-
- auto* buffer = bufferEither();
- MOZ_ASSERT(buffer->isResizable());
-
- size_t bufferByteLength = buffer->byteLength();
- size_t byteOffsetStart = ArrayBufferViewObject::byteOffset();
- if (byteOffsetStart > bufferByteLength) {
- return mozilla::Nothing{};
- }
-
- if (as<ResizableTypedArrayObject>().isAutoLength()) {
- size_t bytes = bufferByteLength - byteOffsetStart;
- return mozilla::Some(bytes / bytesPerElement());
- }
-
- size_t arrayLength = rawLength();
- size_t byteOffsetEnd = byteOffsetStart + arrayLength * bytesPerElement();
- if (byteOffsetEnd > bufferByteLength) {
- return mozilla::Nothing{};
- }
- return mozilla::Some(arrayLength);
-}
-
namespace js {
template <>
diff --git a/js/src/vm/TypedArrayObject.h b/js/src/vm/TypedArrayObject.h
index b6b1f00e72..46531ec4ee 100644
--- a/js/src/vm/TypedArrayObject.h
+++ b/js/src/vm/TypedArrayObject.h
@@ -69,27 +69,30 @@ class TypedArrayObject : public ArrayBufferViewObject {
static bool ensureHasBuffer(JSContext* cx,
Handle<TypedArrayObject*> typedArray);
- protected:
- size_t rawByteLength() const { return rawLength() * bytesPerElement(); }
-
- size_t rawLength() const {
- return size_t(getFixedSlot(LENGTH_SLOT).toPrivate());
- }
-
public:
- mozilla::Maybe<size_t> byteOffset() const;
+ /**
+ * Return the current length, or |Nothing| if the TypedArray is detached or
+ * out-of-bounds.
+ */
+ mozilla::Maybe<size_t> length() const {
+ return ArrayBufferViewObject::length();
+ }
+ /**
+ * Return the current byteLength, or |Nothing| if the TypedArray is detached
+ * or out-of-bounds.
+ */
mozilla::Maybe<size_t> byteLength() const {
return length().map(
[this](size_t value) { return value * bytesPerElement(); });
}
- mozilla::Maybe<size_t> length() const;
-
// Self-hosted TypedArraySubarray function needs to read [[ByteOffset]], even
// when it's currently out-of-bounds.
size_t byteOffsetMaybeOutOfBounds() const {
- return ArrayBufferViewObject::byteOffset();
+ // dataPointerOffset() returns the [[ByteOffset]] spec value, except when
+ // the buffer is detached. (bug 1840991)
+ return ArrayBufferViewObject::dataPointerOffset();
}
template <AllowGC allowGC>
@@ -148,11 +151,13 @@ class FixedLengthTypedArrayObject : public TypedArrayObject {
static inline gc::AllocKind AllocKindForLazyBuffer(size_t nbytes);
- size_t byteOffset() const { return ArrayBufferViewObject::byteOffset(); }
+ size_t byteOffset() const {
+ return ArrayBufferViewObject::byteOffsetSlotValue();
+ }
- size_t byteLength() const { return rawByteLength(); }
+ size_t byteLength() const { return length() * bytesPerElement(); }
- size_t length() const { return rawLength(); }
+ size_t length() const { return ArrayBufferViewObject::lengthSlotValue(); }
bool hasInlineElements() const;
void setInlineElements();
@@ -176,13 +181,7 @@ class FixedLengthTypedArrayObject : public TypedArrayObject {
class ResizableTypedArrayObject : public TypedArrayObject {
public:
- static const uint8_t AUTO_LENGTH_SLOT = TypedArrayObject::RESERVED_SLOTS;
-
- static const uint8_t RESERVED_SLOTS = TypedArrayObject::RESERVED_SLOTS + 1;
-
- bool isAutoLength() const {
- return getFixedSlot(AUTO_LENGTH_SLOT).toBoolean();
- }
+ static const uint8_t RESERVED_SLOTS = RESIZABLE_RESERVED_SLOTS;
};
extern TypedArrayObject* NewTypedArrayWithTemplateAndLength(
diff --git a/js/src/vm/UbiNodeCensus.cpp b/js/src/vm/UbiNodeCensus.cpp
index ba3ccd0898..7e33341d30 100644
--- a/js/src/vm/UbiNodeCensus.cpp
+++ b/js/src/vm/UbiNodeCensus.cpp
@@ -6,6 +6,8 @@
#include "js/UbiNodeCensus.h"
+#include "mozilla/ScopeExit.h"
+
#include "builtin/MapObject.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/Printer.h"
@@ -1062,17 +1064,19 @@ JS_PUBLIC_API bool CensusHandler::operator()(
/*** Parsing Breakdowns *****************************************************/
-static CountTypePtr ParseChildBreakdown(JSContext* cx, HandleObject breakdown,
- PropertyName* prop) {
+static CountTypePtr ParseChildBreakdown(
+ JSContext* cx, HandleObject breakdown, PropertyName* prop,
+ MutableHandle<GCVector<JSLinearString*>> seen) {
RootedValue v(cx);
if (!GetProperty(cx, breakdown, breakdown, prop, &v)) {
return nullptr;
}
- return ParseBreakdown(cx, v);
+ return ParseBreakdown(cx, v, seen);
}
-JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
- HandleValue breakdownValue) {
+JS_PUBLIC_API CountTypePtr
+ParseBreakdown(JSContext* cx, HandleValue breakdownValue,
+ MutableHandle<GCVector<JSLinearString*>> seen) {
if (breakdownValue.isUndefined()) {
// Construct the default type, { by: 'count' }
CountTypePtr simple(cx->new_<SimpleCount>());
@@ -1097,6 +1101,24 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
return nullptr;
}
+ for (auto candidate : seen.get()) {
+ if (EqualStrings(by, candidate)) {
+ UniqueChars byBytes = QuoteString(cx, by, '"');
+ if (!byBytes) {
+ return nullptr;
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_CENSUS_BREAKDOWN_NESTED,
+ byBytes.get());
+ return nullptr;
+ }
+ }
+ if (!seen.append(by)) {
+ return nullptr;
+ }
+ auto popper = mozilla::MakeScopeExit([&]() { seen.popBack(); });
+
if (StringEqualsLiteral(by, "count")) {
RootedValue countValue(cx), bytesValue(cx);
if (!GetProperty(cx, breakdown, breakdown, cx->names().count,
@@ -1140,13 +1162,14 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
}
if (StringEqualsLiteral(by, "objectClass")) {
- CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ CountTypePtr thenType(
+ ParseChildBreakdown(cx, breakdown, cx->names().then, seen));
if (!thenType) {
return nullptr;
}
CountTypePtr otherType(
- ParseChildBreakdown(cx, breakdown, cx->names().other));
+ ParseChildBreakdown(cx, breakdown, cx->names().other, seen));
if (!otherType) {
return nullptr;
}
@@ -1156,27 +1179,27 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
if (StringEqualsLiteral(by, "coarseType")) {
CountTypePtr objectsType(
- ParseChildBreakdown(cx, breakdown, cx->names().objects));
+ ParseChildBreakdown(cx, breakdown, cx->names().objects, seen));
if (!objectsType) {
return nullptr;
}
CountTypePtr scriptsType(
- ParseChildBreakdown(cx, breakdown, cx->names().scripts));
+ ParseChildBreakdown(cx, breakdown, cx->names().scripts, seen));
if (!scriptsType) {
return nullptr;
}
CountTypePtr stringsType(
- ParseChildBreakdown(cx, breakdown, cx->names().strings));
+ ParseChildBreakdown(cx, breakdown, cx->names().strings, seen));
if (!stringsType) {
return nullptr;
}
CountTypePtr otherType(
- ParseChildBreakdown(cx, breakdown, cx->names().other));
+ ParseChildBreakdown(cx, breakdown, cx->names().other, seen));
if (!otherType) {
return nullptr;
}
CountTypePtr domNodeType(
- ParseChildBreakdown(cx, breakdown, cx->names().domNode));
+ ParseChildBreakdown(cx, breakdown, cx->names().domNode, seen));
if (!domNodeType) {
return nullptr;
}
@@ -1186,7 +1209,8 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
}
if (StringEqualsLiteral(by, "internalType")) {
- CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ CountTypePtr thenType(
+ ParseChildBreakdown(cx, breakdown, cx->names().then, seen));
if (!thenType) {
return nullptr;
}
@@ -1195,7 +1219,8 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
}
if (StringEqualsLiteral(by, "descriptiveType")) {
- CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ CountTypePtr thenType(
+ ParseChildBreakdown(cx, breakdown, cx->names().then, seen));
if (!thenType) {
return nullptr;
}
@@ -1203,12 +1228,13 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
}
if (StringEqualsLiteral(by, "allocationStack")) {
- CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ CountTypePtr thenType(
+ ParseChildBreakdown(cx, breakdown, cx->names().then, seen));
if (!thenType) {
return nullptr;
}
CountTypePtr noStackType(
- ParseChildBreakdown(cx, breakdown, cx->names().noStack));
+ ParseChildBreakdown(cx, breakdown, cx->names().noStack, seen));
if (!noStackType) {
return nullptr;
}
@@ -1217,13 +1243,14 @@ JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
}
if (StringEqualsLiteral(by, "filename")) {
- CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ CountTypePtr thenType(
+ ParseChildBreakdown(cx, breakdown, cx->names().then, seen));
if (!thenType) {
return nullptr;
}
CountTypePtr noFilenameType(
- ParseChildBreakdown(cx, breakdown, cx->names().noFilename));
+ ParseChildBreakdown(cx, breakdown, cx->names().noFilename, seen));
if (!noFilenameType) {
return nullptr;
}
@@ -1307,8 +1334,9 @@ JS_PUBLIC_API bool ParseCensusOptions(JSContext* cx, Census& census,
return false;
}
+ Rooted<GCVector<JSLinearString*>> seen(cx, cx);
outResult = breakdown.isUndefined() ? GetDefaultBreakdown(cx)
- : ParseBreakdown(cx, breakdown);
+ : ParseBreakdown(cx, breakdown, &seen);
return !!outResult;
}
diff --git a/js/src/vm/Value.cpp b/js/src/vm/Value.cpp
index 8fcad7ee83..0da89a41c2 100644
--- a/js/src/vm/Value.cpp
+++ b/js/src/vm/Value.cpp
@@ -10,6 +10,7 @@
#include <inttypes.h>
+#include "gc/Cell.h" // js::gc::Cell
#include "js/Conversions.h" // JS::NumberToString, JS::MaximumNumberToStringLength
#include "js/Printer.h" // js::GenericPrinter, js::Fprinter
#include "vm/BigIntType.h" // JS::BigInt
@@ -41,6 +42,12 @@ const HandleValue FalseHandleValue =
const Handle<mozilla::Maybe<Value>> NothingHandleValue =
Handle<mozilla::Maybe<Value>>::fromMarkedLocation(&JSVAL_NOTHING);
+#ifdef DEBUG
+void JS::Value::assertTraceKindMatches(js::gc::Cell* cell) const {
+ MOZ_ASSERT(traceKind() == cell->getTraceKind());
+}
+#endif
+
} // namespace JS
void js::ReportBadValueTypeAndCrash(const JS::Value& value) {
diff --git a/js/src/vm/Watchtower.cpp b/js/src/vm/Watchtower.cpp
index 80023d7e81..86c748285d 100644
--- a/js/src/vm/Watchtower.cpp
+++ b/js/src/vm/Watchtower.cpp
@@ -102,23 +102,24 @@ static void InvalidateMegamorphicCache(JSContext* cx,
}
void MaybePopReturnFuses(JSContext* cx, Handle<NativeObject*> nobj) {
- JSObject* objectProto = &cx->global()->getObjectPrototype();
+ GlobalObject* global = &nobj->global();
+ JSObject* objectProto = &global->getObjectPrototype();
if (nobj == objectProto) {
nobj->realm()->realmFuses.objectPrototypeHasNoReturnProperty.popFuse(
cx, nobj->realm()->realmFuses);
return;
}
- JSObject* iteratorProto = cx->global()->maybeGetIteratorPrototype();
+ JSObject* iteratorProto = global->maybeGetIteratorPrototype();
if (nobj == iteratorProto) {
nobj->realm()->realmFuses.iteratorPrototypeHasNoReturnProperty.popFuse(
cx, nobj->realm()->realmFuses);
return;
}
- JSObject* arrayIterProto = cx->global()->maybeGetArrayIteratorPrototype();
+ JSObject* arrayIterProto = global->maybeGetArrayIteratorPrototype();
if (nobj == arrayIterProto) {
- cx->realm()->realmFuses.arrayIteratorPrototypeHasNoReturnProperty.popFuse(
+ nobj->realm()->realmFuses.arrayIteratorPrototypeHasNoReturnProperty.popFuse(
cx, nobj->realm()->realmFuses);
return;
}
@@ -208,12 +209,12 @@ static bool WatchProtoChangeImpl(JSContext* cx, HandleObject obj) {
InvalidateMegamorphicCache(cx, obj.as<NativeObject>());
NativeObject* nobj = &obj->as<NativeObject>();
- if (nobj == cx->global()->maybeGetArrayIteratorPrototype()) {
+ if (nobj == nobj->global().maybeGetArrayIteratorPrototype()) {
nobj->realm()->realmFuses.arrayIteratorPrototypeHasIteratorProto.popFuse(
cx, nobj->realm()->realmFuses);
}
- if (nobj == cx->global()->maybeGetIteratorPrototype()) {
+ if (nobj == nobj->global().maybeGetIteratorPrototype()) {
nobj->realm()->realmFuses.iteratorPrototypeHasObjectProto.popFuse(
cx, nobj->realm()->realmFuses);
}
diff --git a/js/src/wasm/GenerateBuiltinModules.py b/js/src/wasm/GenerateBuiltinModules.py
index 17270bc46e..0bd17d8821 100644
--- a/js/src/wasm/GenerateBuiltinModules.py
+++ b/js/src/wasm/GenerateBuiltinModules.py
@@ -47,6 +47,58 @@ def cppBool(v):
return "false"
+def specTypeToMIRType(specType):
+ if specType == "i32" or specType == "i64" or specType == "f32" or specType == "f64":
+ return f"ValType::{specType}().toMIRType()"
+ if (
+ specType == "externref"
+ or specType == "anyref"
+ or specType == "funcref"
+ or isinstance(specType, dict)
+ ):
+ return "MIRType::WasmAnyRef"
+ raise ValueError()
+
+
+def specHeapTypeToTypeCode(specHeapType):
+ if specHeapType == "func":
+ return "Func"
+ if specHeapType == "any":
+ return "Any"
+ if specHeapType == "extern":
+ return "Extern"
+ if specHeapType == "array":
+ return "Array"
+ if specHeapType == "struct":
+ return "Struct"
+ raise ValueError()
+
+
+def specTypeToValType(specType):
+ if specType == "i32" or specType == "i64" or specType == "f32" or specType == "f64":
+ return f"ValType::{specType}()"
+
+ if specType == "externref":
+ return "ValType(RefType::extern_())"
+
+ if specType == "anyref":
+ return "ValType(RefType::any())"
+
+ if specType == "funcref":
+ return "ValType(RefType::func())"
+
+ if isinstance(specType, dict):
+ nullable = cppBool(specType["nullable"])
+ if "type" in specType:
+ ref = specType["type"]
+ return f"ValType(RefType::fromTypeDef({ref}, {nullable}))"
+ else:
+ code = specType["code"]
+ return f"ValType(RefType::fromTypeCode(TypeCode(RefType::{specHeapTypeToTypeCode(code)}), {nullable}))"
+
+ raise ValueError()
+
+
def main(c_out, yaml_path):
data = load_yaml(yaml_path)
@@ -64,34 +116,45 @@ def main(c_out, yaml_path):
for op in data:
# Define DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_<op> as:
# `{ValType::I32, ValType::I32, ...}`.
+ valTypes = ", ".join(specTypeToValType(p) for p in op["params"])
contents += (
f"#define DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_{op['op']} "
- f"{{{', '.join(op['params'])}}}\n"
+ f"{{{valTypes}}}\n"
)
- # Define DECLARE_BUILTIN_MODULE_FUNC_PARAM_SASTYPES_<op> as:
- # `<num_types>, {_PTR, _I32, ..., _PTR, _END}`.
+ # Define DECLARE_BUILTIN_MODULE_FUNC_PARAM_MIRTYPES_<op> as:
+ # `<num_types>, {MIRType::Pointer, _I32, ..., MIRType::Pointer, _END}`.
num_types = len(op["params"]) + 1
- sas_types = (
- f"{{_PTR{''.join(', ' + (p + '.toMIRType()') for p in op['params'])}"
- )
+ mir_types = "{MIRType::Pointer"
+ mir_types += "".join(", " + specTypeToMIRType(p) for p in op["params"])
if op["uses_memory"]:
- sas_types += ", _PTR"
+ mir_types += ", MIRType::Pointer"
num_types += 1
- sas_types += ", _END}"
+ # Add the end marker
+ mir_types += ", MIRType::None}"
- contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_PARAM_SASTYPES_{op['op']} {num_types}, {sas_types}\n"
+ contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_PARAM_MIRTYPES_{op['op']} {num_types}, {mir_types}\n"
+ # Define DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_<op> as:
+ # `Some(X)` if present, or else `Nothing()`.
result_valtype = ""
- result_sastype = ""
if "result" in op:
- result_valtype = f"Some({op['result']})\n"
- result_sastype = f"{op['result']}.toMIRType()\n"
+ result_valtype = f"Some({specTypeToValType(op['result'])})\n"
else:
result_valtype = "Nothing()"
- result_sastype = "_VOID"
contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_{op['op']} {result_valtype}\n"
- contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_RESULT_SASTYPE_{op['op']} {result_sastype}\n"
- contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_{op['op']} _{op['fail_mode']}\n"
+
+ # Define DECLARE_BUILTIN_MODULE_FUNC_RESULT_MIRTYPE_<op> as:
+ # `X` if present, or else `MIRType::None`.
+ result_mirtype = ""
+ if "result" in op:
+ result_mirtype = specTypeToMIRType(op["result"]) + "\n"
+ else:
+ result_mirtype = "MIRType::None"
+ contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_RESULT_MIRTYPE_{op['op']} {result_mirtype}\n"
+
+ # Define DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_<op> as:
+ # `FailureMode::X`.
+ contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_{op['op']} FailureMode::{op['fail_mode']}\n"
generate_header(c_out, "wasm_WasmBuiltinModuleGenerated_h", contents)
diff --git a/js/src/wasm/WasmBCClass.h b/js/src/wasm/WasmBCClass.h
index c216d0ffd5..844ae3381a 100644
--- a/js/src/wasm/WasmBCClass.h
+++ b/js/src/wasm/WasmBCClass.h
@@ -297,6 +297,10 @@ struct BaseCompiler final {
// Flag indicating that the compiler is currently in a dead code region.
bool deadCode_;
+ // Store previously finished note to know if we need to insert a nop in
+ // finishTryNote.
+ size_t mostRecentFinishedTryNoteIndex_;
+
///////////////////////////////////////////////////////////////////////////
//
// State for bounds check elimination.
@@ -973,7 +977,7 @@ struct BaseCompiler final {
bool tailCall, CodeOffset* fastCallOffset,
CodeOffset* slowCallOffset);
CodeOffset callImport(unsigned instanceDataOffset, const FunctionCall& call);
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
void callRef(const Stk& calleeRef, const FunctionCall& call,
CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
# ifdef ENABLE_WASM_TAIL_CALLS
@@ -1641,7 +1645,7 @@ struct BaseCompiler final {
[[nodiscard]] bool emitRefFunc();
[[nodiscard]] bool emitRefNull();
[[nodiscard]] bool emitRefIsNull();
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool emitRefAsNonNull();
[[nodiscard]] bool emitBrOnNull();
[[nodiscard]] bool emitBrOnNonNull();
diff --git a/js/src/wasm/WasmBCMemory.cpp b/js/src/wasm/WasmBCMemory.cpp
index f4e19d95e8..b6ef67f24d 100644
--- a/js/src/wasm/WasmBCMemory.cpp
+++ b/js/src/wasm/WasmBCMemory.cpp
@@ -1212,7 +1212,7 @@ static void PopAndAllocate(BaseCompiler* bc, ValType type,
Scalar::Type viewType, AtomicOp op, RegI32* rd,
RegI32* rv, Temps* temps) {
bc->needI32(bc->specific_.eax);
- if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Add || op == AtomicOp::Sub) {
// We use xadd, so source and destination are the same. Using
// eax here is overconstraining, but for byte operations on x86
// we do need something with a byte register.
@@ -1246,7 +1246,7 @@ static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access, T srcAddr,
# else
RegI32 temp;
ScratchI32 scratch(*bc);
- if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ if (op != AtomicOp::Add && op != AtomicOp::Sub) {
temp = scratch;
}
# endif
@@ -1401,7 +1401,7 @@ namespace atomic_rmw64 {
static void PopAndAllocate(BaseCompiler* bc, AtomicOp op, RegI64* rd,
RegI64* rv, RegI64* temp) {
- if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Add || op == AtomicOp::Sub) {
// We use xaddq, so input and output must be the same register.
*rv = bc->popI64();
*rd = *rv;
@@ -1422,7 +1422,7 @@ static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
static void Deallocate(BaseCompiler* bc, AtomicOp op, RegI64 rv, RegI64 temp) {
bc->maybeFree(temp);
- if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ if (op != AtomicOp::Add && op != AtomicOp::Sub) {
bc->freeI64(rv);
}
}
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
index 196e49f76e..cb0fbde6ec 100644
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -1658,7 +1658,7 @@ bool BaseCompiler::callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
void BaseCompiler::callRef(const Stk& calleeRef, const FunctionCall& call,
CodeOffset* fastCallOffset,
CodeOffset* slowCallOffset) {
@@ -1788,15 +1788,25 @@ void BaseCompiler::finishTryNote(size_t tryNoteIndex) {
masm.nop();
}
- // Check the previous try note to ensure that we don't share an edge with
- // it that could lead to ambiguity. Insert a nop, if required.
- if (tryNotes.length() > 0) {
- const TryNote& previous = tryNotes.back();
+ // Check the most recent finished try note to ensure that we don't share an
+ // edge with it that could lead to ambiguity. Insert a nop, if required.
+ //
+ // Notice that finishTryNote is called in LIFO order -- using depth-first
+ // search numbering to see if we are traversing back from a nested try to a
+ // parent try, where we may need to ensure that the end offsets do not
+ // coincide.
+ //
+ // In the case the tryNodeIndex >= mostRecentFinishedTryNoteIndex_, we have
+ // finished a try that began after the most recent finished try, and so
+ // startTryNote will take care of any nops.
+ if (tryNoteIndex < mostRecentFinishedTryNoteIndex_) {
+ const TryNote& previous = tryNotes[mostRecentFinishedTryNoteIndex_];
uint32_t currentOffset = masm.currentOffset();
if (previous.tryBodyEnd() == currentOffset) {
masm.nop();
}
}
+ mostRecentFinishedTryNoteIndex_ = tryNoteIndex;
// Don't set the end of the try note if we've OOM'ed, as the above nop's may
// not have been placed. This is okay as this compilation will be thrown
@@ -3875,7 +3885,7 @@ bool BaseCompiler::emitBrIf() {
return emitBranchPerform(&b);
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
bool BaseCompiler::emitBrOnNull() {
MOZ_ASSERT(!hasLatentOp());
@@ -5286,7 +5296,7 @@ bool BaseCompiler::emitReturnCallIndirect() {
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
bool BaseCompiler::emitCallRef() {
const FuncType* funcType;
Nothing unused_callee;
@@ -6289,7 +6299,7 @@ bool BaseCompiler::emitRefIsNull() {
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
bool BaseCompiler::emitRefAsNonNull() {
Nothing nothing;
if (!iter_.readRefAsNonNull(&nothing)) {
@@ -9776,13 +9786,13 @@ bool BaseCompiler::emitCallBuiltinModuleFunc() {
return true;
}
- if (builtinModuleFunc->usesMemory) {
+ if (builtinModuleFunc->usesMemory()) {
// The final parameter of an builtinModuleFunc is implicitly the heap base
pushHeapBase(0);
}
// Call the builtinModuleFunc
- return emitInstanceCall(builtinModuleFunc->signature);
+ return emitInstanceCall(*builtinModuleFunc->sig());
}
//////////////////////////////////////////////////////////////////////////////
@@ -9989,36 +9999,18 @@ bool BaseCompiler::emitBody() {
case uint16_t(Op::Else):
CHECK_NEXT(emitElse());
case uint16_t(Op::Try):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitTry());
case uint16_t(Op::Catch):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitCatch());
case uint16_t(Op::CatchAll):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitCatchAll());
case uint16_t(Op::Delegate):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK(emitDelegate());
iter_.popDelegate();
NEXT();
case uint16_t(Op::Throw):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitThrow());
case uint16_t(Op::Rethrow):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitRethrow());
case uint16_t(Op::ThrowRef):
if (!moduleEnv_.exnrefEnabled()) {
@@ -10063,16 +10055,15 @@ bool BaseCompiler::emitBody() {
}
CHECK_NEXT(emitReturnCallIndirect());
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::CallRef):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitCallRef());
# ifdef ENABLE_WASM_TAIL_CALLS
case uint16_t(Op::ReturnCallRef):
- if (!moduleEnv_.functionReferencesEnabled() ||
- !moduleEnv_.tailCallsEnabled()) {
+ if (!moduleEnv_.gcEnabled() || !moduleEnv_.tailCallsEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitReturnCallRef());
@@ -10609,19 +10600,19 @@ bool BaseCompiler::emitBody() {
case uint16_t(Op::MemorySize):
CHECK_NEXT(emitMemorySize());
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefAsNonNull):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitRefAsNonNull());
case uint16_t(Op::BrOnNull):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitBrOnNull());
case uint16_t(Op::BrOnNonNull):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitBrOnNonNull());
@@ -11484,113 +11475,113 @@ bool BaseCompiler::emitBody() {
case uint32_t(ThreadOp::I32AtomicAdd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicSub):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicAnd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicOr):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicXor):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXchg):
CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Int32));
@@ -11851,6 +11842,8 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& moduleEnv,
stackMapGenerator_(stackMaps, trapExitLayout, trapExitLayoutNumWords,
*masm),
deadCode_(false),
+ // Init value is selected to ensure proper logic in finishTryNote.
+ mostRecentFinishedTryNoteIndex_(0),
bceSafe_(0),
latentOp_(LatentOp::None),
latentType_(ValType::I32),
@@ -11979,10 +11972,7 @@ bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
// Build the local types vector.
ValTypeVector locals;
- if (!locals.appendAll(moduleEnv.funcs[func.index].type->args())) {
- return false;
- }
- if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
+ if (!DecodeLocalEntriesWithParams(d, moduleEnv, func.index, &locals)) {
return false;
}
diff --git a/js/src/wasm/WasmBinary.h b/js/src/wasm/WasmBinary.h
index 2d41528157..da17a0a864 100644
--- a/js/src/wasm/WasmBinary.h
+++ b/js/src/wasm/WasmBinary.h
@@ -72,12 +72,18 @@ class Opcode {
static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
}
+ MOZ_IMPLICIT Opcode(GcOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::GcPrefix)) {
+ static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
+ }
bool isOp() const { return bits_ < uint32_t(Op::FirstPrefix); }
bool isMisc() const { return (bits_ & 255) == uint32_t(Op::MiscPrefix); }
bool isThread() const { return (bits_ & 255) == uint32_t(Op::ThreadPrefix); }
bool isMoz() const { return (bits_ & 255) == uint32_t(Op::MozPrefix); }
bool isSimd() const { return (bits_ & 255) == uint32_t(Op::SimdPrefix); }
+ bool isGc() const { return (bits_ & 255) == uint32_t(Op::GcPrefix); }
Op asOp() const {
MOZ_ASSERT(isOp());
@@ -99,6 +105,10 @@ class Opcode {
MOZ_ASSERT(isSimd());
return SimdOp(bits_ >> 8);
}
+ GcOp asGc() const {
+ MOZ_ASSERT(isGc());
+ return GcOp(bits_ >> 8);
+ }
uint32_t bits() const { return bits_; }
@@ -127,6 +137,7 @@ using MaybeSectionRange = Maybe<SectionRange>;
class Encoder {
Bytes& bytes_;
+ const TypeContext* types_;
template <class T>
[[nodiscard]] bool write(const T& v) {
@@ -201,7 +212,13 @@ class Encoder {
}
public:
- explicit Encoder(Bytes& bytes) : bytes_(bytes) { MOZ_ASSERT(empty()); }
+ explicit Encoder(Bytes& bytes) : bytes_(bytes), types_(nullptr) {
+ MOZ_ASSERT(empty());
+ }
+ explicit Encoder(Bytes& bytes, const TypeContext& types)
+ : bytes_(bytes), types_(&types) {
+ MOZ_ASSERT(empty());
+ }
size_t currentOffset() const { return bytes_.length(); }
bool empty() const { return currentOffset() == 0; }
@@ -226,9 +243,17 @@ class Encoder {
[[nodiscard]] bool writeVarS64(int64_t i) { return writeVarS<int64_t>(i); }
[[nodiscard]] bool writeValType(ValType type) {
static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
- // writeValType is only used by asm.js, which doesn't use type
- // references
- MOZ_RELEASE_ASSERT(!type.isTypeRef(), "NYI");
+ if (type.isTypeRef()) {
+ MOZ_RELEASE_ASSERT(types_,
+ "writeValType is used, but types were not specified.");
+ if (!writeFixedU8(uint8_t(type.isNullable() ? TypeCode::NullableRef
+ : TypeCode::Ref))) {
+ return false;
+ }
+ uint32_t typeIndex = types_->indexOf(*type.typeDef());
+ // Encode positive LEB S33 as S64.
+ return writeVarS64(typeIndex);
+ }
TypeCode tc = type.packed().typeCode();
MOZ_ASSERT(size_t(tc) < size_t(TypeCode::Limit));
return writeFixedU8(uint8_t(tc));
@@ -693,9 +718,9 @@ inline bool Decoder::readPackedType(const TypeContext& types,
}
case uint8_t(TypeCode::Ref):
case uint8_t(TypeCode::NullableRef): {
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
- if (!features.functionReferences) {
- return fail("(ref T) types not enabled");
+#ifdef ENABLE_WASM_GC
+ if (!features.gc) {
+ return fail("gc not enabled");
}
bool nullable = code == uint8_t(TypeCode::NullableRef);
RefType refType;
@@ -718,7 +743,7 @@ inline bool Decoder::readPackedType(const TypeContext& types,
case uint8_t(TypeCode::NullAnyRef): {
#ifdef ENABLE_WASM_GC
if (!features.gc) {
- return fail("gc types not enabled");
+ return fail("gc not enabled");
}
*type = RefType::fromTypeCode(TypeCode(code), true);
return true;
@@ -784,7 +809,7 @@ inline bool Decoder::readHeapType(const TypeContext& types,
case uint8_t(TypeCode::NullExternRef):
case uint8_t(TypeCode::NullAnyRef):
if (!features.gc) {
- return fail("gc types not enabled");
+ return fail("gc not enabled");
}
*type = RefType::fromTypeCode(TypeCode(code), nullable);
return true;
@@ -794,8 +819,8 @@ inline bool Decoder::readHeapType(const TypeContext& types,
}
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
- if (features.functionReferences) {
+#ifdef ENABLE_WASM_GC
+ if (features.gc) {
int32_t x;
if (!readVarS32(&x) || x < 0 || uint32_t(x) >= types.length()) {
return fail("invalid heap type index");
diff --git a/js/src/wasm/WasmBuiltinModule.cpp b/js/src/wasm/WasmBuiltinModule.cpp
index 0748977c8b..044591224e 100644
--- a/js/src/wasm/WasmBuiltinModule.cpp
+++ b/js/src/wasm/WasmBuiltinModule.cpp
@@ -27,28 +27,29 @@
#include "wasm/WasmJS.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmOpIter.h"
+#include "wasm/WasmStaticTypeDefs.h"
#include "wasm/WasmValidate.h"
using namespace js;
using namespace js::wasm;
-#define VISIT_BUILTIN_FUNC(op, export, sa_name, abitype, entry, uses_memory, \
- idx) \
- static const ValType BuiltinModuleFunc##op##_Params[] = \
- DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_##op; \
- \
- const BuiltinModuleFunc BuiltinModuleFunc##op = { \
- export, \
- mozilla::Span<const ValType>(BuiltinModuleFunc##op##_Params), \
- DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_##op, \
- SASig##sa_name, \
- uses_memory, \
- };
-
-FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
-#undef VISIT_BUILTIN_FUNC
+BuiltinModuleFuncs* BuiltinModuleFuncs::singleton_ = nullptr;
+
+[[nodiscard]] bool BuiltinModuleFunc::init(const RefPtr<TypeContext>& types,
+ mozilla::Span<const ValType> params,
+ Maybe<ValType> result,
+ bool usesMemory,
+ const SymbolicAddressSignature* sig,
+ const char* exportName) {
+ // This builtin must not have been initialized yet.
+ MOZ_ASSERT(!recGroup_);
+
+ // Initialize the basic fields
+ exportName_ = exportName;
+ sig_ = sig;
+ usesMemory_ = usesMemory;
-bool BuiltinModuleFunc::funcType(FuncType* type) const {
+ // Create a function type for the given params and result
ValTypeVector paramVec;
if (!paramVec.append(params.data(), params.data() + params.size())) {
return false;
@@ -57,21 +58,48 @@ bool BuiltinModuleFunc::funcType(FuncType* type) const {
if (result.isSome() && !resultVec.append(*result)) {
return false;
}
- *type = FuncType(std::move(paramVec), std::move(resultVec));
+ const TypeDef* typeDef =
+ types->addType(FuncType(std::move(paramVec), std::move(resultVec)));
+ if (!typeDef) {
+ return false;
+ }
+ recGroup_ = &typeDef->recGroup();
return true;
}
-/* static */
-const BuiltinModuleFunc& BuiltinModuleFunc::getFromId(BuiltinModuleFuncId id) {
- switch (id) {
-#define VISIT_BUILTIN_FUNC(op, ...) \
- case BuiltinModuleFuncId::op: \
- return BuiltinModuleFunc##op;
- FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
+bool BuiltinModuleFuncs::init() {
+ singleton_ = js_new<BuiltinModuleFuncs>();
+ if (!singleton_) {
+ return false;
+ }
+
+ RefPtr<TypeContext> types = js_new<TypeContext>();
+ if (!types) {
+ return false;
+ }
+
+#define VISIT_BUILTIN_FUNC(op, export, sa_name, abitype, entry, uses_memory, \
+ ...) \
+ const ValType op##Params[] = \
+ DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_##op; \
+ Maybe<ValType> op##Result = DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_##op; \
+ if (!singleton_->funcs_[BuiltinModuleFuncId::op].init( \
+ types, mozilla::Span<const ValType>(op##Params), op##Result, \
+ uses_memory, &SASig##sa_name, export)) { \
+ return false; \
+ }
+ FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
#undef VISIT_BUILTIN_FUNC
- default:
- MOZ_CRASH("unexpected builtinModuleFunc");
+
+ return true;
+}
+
+void BuiltinModuleFuncs::destroy() {
+ if (!singleton_) {
+ return;
}
+ js_delete(singleton_);
+ singleton_ = nullptr;
}
bool EncodeFuncBody(const BuiltinModuleFunc& builtinModuleFunc,
@@ -80,7 +108,8 @@ bool EncodeFuncBody(const BuiltinModuleFunc& builtinModuleFunc,
if (!EncodeLocalEntries(encoder, ValTypeVector())) {
return false;
}
- for (uint32_t i = 0; i < builtinModuleFunc.params.size(); i++) {
+ const FuncType* funcType = builtinModuleFunc.funcType();
+ for (uint32_t i = 0; i < funcType->args().length(); i++) {
if (!encoder.writeOp(Op::LocalGet) || !encoder.writeVarU32(i)) {
return false;
}
@@ -145,11 +174,11 @@ bool CompileBuiltinModule(JSContext* cx,
for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
const BuiltinModuleFuncId& id = ids[funcIndex];
const BuiltinModuleFunc& builtinModuleFunc =
- BuiltinModuleFunc::getFromId(id);
+ BuiltinModuleFuncs::getFromId(id);
- FuncType type;
- if (!builtinModuleFunc.funcType(&type) ||
- !moduleEnv.types->addType(std::move(type))) {
+ SharedRecGroup recGroup = builtinModuleFunc.recGroup();
+ MOZ_ASSERT(recGroup->numTypes() == 1);
+ if (!moduleEnv.types->addRecGroup(recGroup)) {
ReportOutOfMemory(cx);
return false;
}
@@ -170,10 +199,10 @@ bool CompileBuiltinModule(JSContext* cx,
// Add (export "$name" (func $i)) declarations.
for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
const BuiltinModuleFunc& builtinModuleFunc =
- BuiltinModuleFunc::getFromId(ids[funcIndex]);
+ BuiltinModuleFuncs::getFromId(ids[funcIndex]);
CacheableName exportName;
- if (!CacheableName::fromUTF8Chars(builtinModuleFunc.exportName,
+ if (!CacheableName::fromUTF8Chars(builtinModuleFunc.exportName(),
&exportName) ||
!moduleEnv.exports.append(Export(std::move(exportName), funcIndex,
DefinitionKind::Function))) {
@@ -200,7 +229,7 @@ bool CompileBuiltinModule(JSContext* cx,
for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
BuiltinModuleFuncId id = ids[funcIndex];
const BuiltinModuleFunc& builtinModuleFunc =
- BuiltinModuleFunc::getFromId(ids[funcIndex]);
+ BuiltinModuleFuncs::getFromId(ids[funcIndex]);
// Compilation may be done using other threads, ModuleGenerator requires
// that function bodies live until after finishFuncDefs().
@@ -267,14 +296,16 @@ static BuiltinModuleFuncId IntGemmFuncs[] = {
#ifdef ENABLE_WASM_JS_STRING_BUILTINS
static BuiltinModuleFuncId JSStringFuncs[] = {
- BuiltinModuleFuncId::StringFromWTF16Array,
- BuiltinModuleFuncId::StringToWTF16Array,
+ BuiltinModuleFuncId::StringTest,
+ BuiltinModuleFuncId::StringCast,
+ BuiltinModuleFuncId::StringFromCharCodeArray,
+ BuiltinModuleFuncId::StringIntoCharCodeArray,
BuiltinModuleFuncId::StringFromCharCode,
BuiltinModuleFuncId::StringFromCodePoint,
BuiltinModuleFuncId::StringCharCodeAt,
BuiltinModuleFuncId::StringCodePointAt,
BuiltinModuleFuncId::StringLength,
- BuiltinModuleFuncId::StringConcatenate,
+ BuiltinModuleFuncId::StringConcat,
BuiltinModuleFuncId::StringSubstring,
BuiltinModuleFuncId::StringEquals,
BuiltinModuleFuncId::StringCompare};
@@ -300,8 +331,8 @@ Maybe<const BuiltinModuleFunc*> wasm::ImportMatchesBuiltinModuleFunc(
// Not supported for implicit instantiation yet
MOZ_RELEASE_ASSERT(module == BuiltinModuleId::JSString);
for (BuiltinModuleFuncId funcId : JSStringFuncs) {
- const BuiltinModuleFunc& func = BuiltinModuleFunc::getFromId(funcId);
- if (importName == mozilla::MakeStringSpan(func.exportName)) {
+ const BuiltinModuleFunc& func = BuiltinModuleFuncs::getFromId(funcId);
+ if (importName == mozilla::MakeStringSpan(func.exportName())) {
return Some(&func);
}
}
diff --git a/js/src/wasm/WasmBuiltinModule.h b/js/src/wasm/WasmBuiltinModule.h
index 42faffec73..8646e789e6 100644
--- a/js/src/wasm/WasmBuiltinModule.h
+++ b/js/src/wasm/WasmBuiltinModule.h
@@ -62,25 +62,60 @@ struct MOZ_STACK_CLASS BuiltinModuleInstances {
// An builtin module func is a natively implemented function that may be
// compiled into a 'builtin module', which may be instantiated with a provided
// memory yielding an exported WebAssembly function wrapping the builtin module.
-struct BuiltinModuleFunc {
+class BuiltinModuleFunc {
+ private:
+ SharedRecGroup recGroup_;
+ const char* exportName_;
+ const SymbolicAddressSignature* sig_;
+ bool usesMemory_;
+
+ public:
+ // Default constructor so this can be used in an EnumeratedArray.
+ BuiltinModuleFunc() = default;
+
+ // Initialize this builtin. Must only be called once.
+ [[nodiscard]] bool init(const RefPtr<TypeContext>& types,
+ mozilla::Span<const ValType> params,
+ Maybe<ValType> result, bool usesMemory,
+ const SymbolicAddressSignature* sig,
+ const char* exportName);
+
+ // The rec group for the function type for this builtin.
+ const RecGroup* recGroup() const { return recGroup_.get(); }
+ // The type definition for the function type for this builtin.
+ const TypeDef* typeDef() const { return &recGroup_->type(0); }
+ // The function type for this builtin.
+ const FuncType* funcType() const { return &typeDef()->funcType(); }
+
// The name of the func as it is exported
- const char* exportName;
- // The params taken by the func.
- mozilla::Span<const ValType> params;
- // The optional result returned by the func.
- mozilla::Maybe<const ValType> result;
- // The signature of the builtin that implements the func
- const SymbolicAddressSignature& signature;
+ const char* exportName() const { return exportName_; }
+ // The signature of the builtin that implements this function.
+ const SymbolicAddressSignature* sig() const { return sig_; }
// Whether this function takes a pointer to the memory base as a hidden final
- // parameter.
- bool usesMemory;
+ // parameter. This parameter will show up in the SymbolicAddressSignature,
+ // but not the function type. Compilers must pass the memoryBase to the
+ // function call as the last parameter.
+ bool usesMemory() const { return usesMemory_; }
+};
+
+// Static storage for all builtin module funcs in the system.
+class BuiltinModuleFuncs {
+ using Storage =
+ mozilla::EnumeratedArray<BuiltinModuleFuncId, BuiltinModuleFunc,
+ size_t(BuiltinModuleFuncId::Limit)>;
+ Storage funcs_;
- // Allocate a FuncType for this func, returning false for OOM
- bool funcType(FuncType* type) const;
+ static BuiltinModuleFuncs* singleton_;
+
+ public:
+ [[nodiscard]] static bool init();
+ static void destroy();
// Get the BuiltinModuleFunc for an BuiltinModuleFuncId. BuiltinModuleFuncId
// must be validated.
- static const BuiltinModuleFunc& getFromId(BuiltinModuleFuncId id);
+ static const BuiltinModuleFunc& getFromId(BuiltinModuleFuncId id) {
+ return singleton_->funcs_[id];
+ }
};
Maybe<BuiltinModuleId> ImportMatchesBuiltinModule(
diff --git a/js/src/wasm/WasmBuiltinModule.yaml b/js/src/wasm/WasmBuiltinModule.yaml
index 88c2f5a575..755e0e5e74 100644
--- a/js/src/wasm/WasmBuiltinModule.yaml
+++ b/js/src/wasm/WasmBuiltinModule.yaml
@@ -12,10 +12,10 @@
entry: Instance::intrI8VecMul
export: i8vecmul
params:
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -40,12 +40,12 @@
entry: intgemm::IntrI8PrepareB
export: int8_prepare_b
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -65,12 +65,12 @@
entry: intgemm::IntrI8PrepareBFromTransposed
export: int8_prepare_b_from_transposed
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -90,10 +90,10 @@
entry: intgemm::IntrI8PrepareBFromQuantizedTransposed
export: int8_prepare_b_from_quantized_transposed
params:
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -116,12 +116,12 @@
entry: intgemm::IntrI8PrepareA
export: int8_prepare_a
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -142,15 +142,15 @@
entry: intgemm::IntrI8PrepareBias
export: int8_prepare_bias
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -177,18 +177,18 @@
entry: intgemm::IntrI8MultiplyAndAddBias
export: int8_multiply_and_add_bias
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -206,12 +206,12 @@
entry: intgemm::IntrI8SelectColumnsOfB
export: int8_select_columns_of_b
params:
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -219,31 +219,61 @@
#if defined(ENABLE_WASM_JS_STRING_BUILTINS)
-- op: StringFromWTF16Array
+- op: StringTest
symbolic_address:
- name: StringFromWTF16Array
+ name: StringTest
+ type: Args_Int32_GeneralGeneral
+ entry: Instance::stringTest
+ export: test
+ params:
+ - 'externref'
+ result: 'i32'
+ fail_mode: Infallible
+ uses_memory: false
+
+- op: StringCast
+ symbolic_address:
+ name: StringCast
+ type: Args_General2
+ entry: Instance::stringCast
+ export: cast
+ params:
+ - 'externref'
+ result:
+ code: 'extern'
+ nullable: false
+ fail_mode: FailOnNullPtr
+ uses_memory: false
+
+- op: StringFromCharCodeArray
+ symbolic_address:
+ name: StringFromCharCodeArray
type: Args_General_GeneralGeneralInt32Int32
- entry: Instance::stringFromWTF16Array
- export: fromWTF16Array
+ entry: Instance::stringFromCharCodeArray
+ export: fromCharCodeArray
params:
- - 'ValType(RefType::any())'
- - 'ValType::i32()'
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - type: "StaticTypeDefs::arrayMutI16"
+ nullable: true
+ - 'i32'
+ - 'i32'
+ result:
+ code: 'extern'
+ nullable: false
fail_mode: FailOnNullPtr
uses_memory: false
-- op: StringToWTF16Array
+- op: StringIntoCharCodeArray
symbolic_address:
- name: StringToWTF16Array
+ name: StringIntoCharCodeArray
type: Args_Int32_GeneralGeneralGeneralInt32
- entry: Instance::stringToWTF16Array
- export: toWTF16Array
+ entry: Instance::stringIntoCharCodeArray
+ export: intoCharCodeArray
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::any())'
- - 'ValType::i32()'
- result: 'ValType::i32()'
+ - 'externref'
+ - type: "StaticTypeDefs::arrayMutI16"
+ nullable: true
+ - 'i32'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -254,8 +284,8 @@
entry: Instance::stringFromCharCode
export: fromCharCode
params:
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - 'i32'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -266,8 +296,8 @@
entry: Instance::stringFromCodePoint
export: fromCodePoint
params:
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - 'i32'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -278,9 +308,9 @@
entry: Instance::stringCharCodeAt
export: charCodeAt
params:
- - 'ValType(RefType::extern_())'
- - 'ValType::i32()'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'i32'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -291,9 +321,9 @@
entry: Instance::stringCodePointAt
export: codePointAt
params:
- - 'ValType(RefType::extern_())'
- - 'ValType::i32()'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'i32'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -304,21 +334,21 @@
entry: Instance::stringLength
export: length
params:
- - 'ValType(RefType::extern_())'
- result: 'ValType::i32()'
+ - 'externref'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
-- op: StringConcatenate
+- op: StringConcat
symbolic_address:
- name: StringConcatenate
+ name: StringConcat
type: Args_General3
- entry: Instance::stringConcatenate
- export: concatenate
+ entry: Instance::stringConcat
+ export: concat
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::extern_())'
- result: 'ValType(RefType::extern_())'
+ - 'externref'
+ - 'externref'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -329,10 +359,10 @@
entry: Instance::stringSubstring
export: substring
params:
- - 'ValType(RefType::extern_())'
- - 'ValType::i32()'
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - 'externref'
+ - 'i32'
+ - 'i32'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -343,9 +373,9 @@
entry: Instance::stringEquals
export: equals
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::extern_())'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'externref'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -356,9 +386,9 @@
entry: Instance::stringCompare
export: compare
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::extern_())'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'externref'
+ result: 'i32'
fail_mode: FailOnMaxI32
uses_memory: false
diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
index 08024c3dfe..7b03494bcd 100644
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -397,9 +397,9 @@ const SymbolicAddressSignature SASigArrayCopy = {
#define VISIT_BUILTIN_FUNC(op, export, sa_name, ...) \
const SymbolicAddressSignature SASig##sa_name = { \
SymbolicAddress::sa_name, \
- DECLARE_BUILTIN_MODULE_FUNC_RESULT_SASTYPE_##op, \
+ DECLARE_BUILTIN_MODULE_FUNC_RESULT_MIRTYPE_##op, \
DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_##op, \
- DECLARE_BUILTIN_MODULE_FUNC_PARAM_SASTYPES_##op};
+ DECLARE_BUILTIN_MODULE_FUNC_PARAM_MIRTYPES_##op};
FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
#undef VISIT_BUILTIN_FUNC
@@ -1839,7 +1839,7 @@ using TypedNativeToCodeRangeMap =
HashMap<TypedNative, uint32_t, TypedNative, SystemAllocPolicy>;
using SymbolicAddressToCodeRangeArray =
- EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, uint32_t>;
+ EnumeratedArray<SymbolicAddress, uint32_t, size_t(SymbolicAddress::Limit)>;
struct BuiltinThunks {
uint8_t* codeBase;
diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp
index b7aaa1869c..7fe2562ab6 100644
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -1085,6 +1085,23 @@ bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const {
return false;
}
+bool Code::lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const {
+ // This logic only works if the codeRange is a function, and therefore only
+ // exists in metadata and not a lazy stub tier. Generalizing to access lazy
+ // stubs would require taking a lock, which is undesirable for the profiler.
+ MOZ_ASSERT(codeRange->isFunction());
+ for (Tier t : tiers()) {
+ const CodeTier& code = codeTier(t);
+ const MetadataTier& metadata = code.metadata();
+ if (codeRange >= metadata.codeRanges.begin() &&
+ codeRange < metadata.codeRanges.end()) {
+ *tier = t;
+ return true;
+ }
+ }
+ return false;
+}
+
struct UnwindInfoPCOffset {
const CodeRangeUnwindInfoVector& info;
explicit UnwindInfoPCOffset(const CodeRangeUnwindInfoVector& info)
diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h
index a34a462127..e03a2f596e 100644
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -117,8 +117,8 @@ struct LinkData : LinkDataCacheablePod {
};
using InternalLinkVector = Vector<InternalLink, 0, SystemAllocPolicy>;
- struct SymbolicLinkArray
- : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
+ struct SymbolicLinkArray : EnumeratedArray<SymbolicAddress, Uint32Vector,
+ size_t(SymbolicAddress::Limit)> {
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
};
@@ -857,6 +857,7 @@ class Code : public ShareableBase<Code> {
bool containsCodePC(const void* pc) const;
bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const;
+ bool lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const;
// To save memory, profilingLabels_ are generated lazily when profiling mode
// is enabled.
diff --git a/js/src/wasm/WasmCodegenTypes.h b/js/src/wasm/WasmCodegenTypes.h
index 815292dd23..590572ae8a 100644
--- a/js/src/wasm/WasmCodegenTypes.h
+++ b/js/src/wasm/WasmCodegenTypes.h
@@ -273,7 +273,7 @@ WASM_DECLARE_CACHEABLE_POD(TrapSite);
WASM_DECLARE_POD_VECTOR(TrapSite, TrapSiteVector)
struct TrapSiteVectorArray
- : EnumeratedArray<Trap, Trap::Limit, TrapSiteVector> {
+ : EnumeratedArray<Trap, TrapSiteVector, size_t(Trap::Limit)> {
bool empty() const;
void clear();
void swap(TrapSiteVectorArray& rhs);
diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp
index 2418340684..fbf4df3e71 100644
--- a/js/src/wasm/WasmCompile.cpp
+++ b/js/src/wasm/WasmCompile.cpp
@@ -726,8 +726,8 @@ void CompilerEnvironment::computeParameters(Decoder& d) {
state_ = Computed;
}
-template <class DecoderT>
-static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
+template <class DecoderT, class ModuleGeneratorT>
+static bool DecodeFunctionBody(DecoderT& d, ModuleGeneratorT& mg,
uint32_t funcIndex) {
uint32_t bodySize;
if (!d.readVarU32(&bodySize)) {
@@ -751,9 +751,9 @@ static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
bodyBegin + bodySize);
}
-template <class DecoderT>
+template <class DecoderT, class ModuleGeneratorT>
static bool DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d,
- ModuleGenerator& mg) {
+ ModuleGeneratorT& mg) {
if (!env.codeSection) {
if (env.numFuncDefs() != 0) {
return d.fail("expected code section");
@@ -996,3 +996,46 @@ SharedModule wasm::CompileStreaming(
return mg.finishModule(*bytecode, streamEnd.tier2Listener);
}
+
+class DumpIonModuleGenerator {
+ private:
+ ModuleEnvironment& moduleEnv_;
+ uint32_t targetFuncIndex_;
+ IonDumpContents contents_;
+ GenericPrinter& out_;
+ UniqueChars* error_;
+
+ public:
+ DumpIonModuleGenerator(ModuleEnvironment& moduleEnv, uint32_t targetFuncIndex,
+ IonDumpContents contents, GenericPrinter& out,
+ UniqueChars* error)
+ : moduleEnv_(moduleEnv),
+ targetFuncIndex_(targetFuncIndex),
+ contents_(contents),
+ out_(out),
+ error_(error) {}
+
+ bool finishFuncDefs() { return true; }
+ bool compileFuncDef(uint32_t funcIndex, uint32_t lineOrBytecode,
+ const uint8_t* begin, const uint8_t* end) {
+ if (funcIndex != targetFuncIndex_) {
+ return true;
+ }
+
+ FuncCompileInput input(funcIndex, lineOrBytecode, begin, end,
+ Uint32Vector());
+ return IonDumpFunction(moduleEnv_, input, contents_, out_, error_);
+ }
+};
+
+bool wasm::DumpIonFunctionInModule(const ShareableBytes& bytecode,
+ uint32_t targetFuncIndex,
+ IonDumpContents contents,
+ GenericPrinter& out, UniqueChars* error) {
+ UniqueCharsVector warnings;
+ Decoder d(bytecode.bytes, 0, error, &warnings);
+ ModuleEnvironment moduleEnv(FeatureArgs::allEnabled());
+ DumpIonModuleGenerator mg(moduleEnv, targetFuncIndex, contents, out, error);
+ return moduleEnv.init() && DecodeModuleEnvironment(d, &moduleEnv) &&
+ DecodeCodeSection(moduleEnv, d, mg);
+}
diff --git a/js/src/wasm/WasmCompile.h b/js/src/wasm/WasmCompile.h
index 2b07881eea..f39dc09fb9 100644
--- a/js/src/wasm/WasmCompile.h
+++ b/js/src/wasm/WasmCompile.h
@@ -93,6 +93,19 @@ SharedModule CompileStreaming(const CompileArgs& args, const Bytes& envBytes,
const Atomic<bool>& cancelled, UniqueChars* error,
UniqueCharsVector* warnings);
+// What to print out from dumping a function from Ion.
+enum class IonDumpContents {
+ UnoptimizedMIR,
+ OptimizedMIR,
+ LIR,
+
+ Default = UnoptimizedMIR,
+};
+
+bool DumpIonFunctionInModule(const ShareableBytes& bytecode,
+ uint32_t targetFuncIndex, IonDumpContents contents,
+ GenericPrinter& out, UniqueChars* error);
+
} // namespace wasm
} // namespace js
diff --git a/js/src/wasm/WasmCompileArgs.h b/js/src/wasm/WasmCompileArgs.h
index 1bf9e60d13..af85026b93 100644
--- a/js/src/wasm/WasmCompileArgs.h
+++ b/js/src/wasm/WasmCompileArgs.h
@@ -103,6 +103,15 @@ struct FeatureArgs {
FeatureArgs(FeatureArgs&&) = default;
static FeatureArgs build(JSContext* cx, const FeatureOptions& options);
+ static FeatureArgs allEnabled() {
+ FeatureArgs args;
+#define WASM_FEATURE(NAME, LOWER_NAME, ...) args.LOWER_NAME = true;
+ JS_FOR_WASM_FEATURES(WASM_FEATURE)
+#undef WASM_FEATURE
+ args.sharedMemory = Shareable::True;
+ args.simd = true;
+ return args;
+ }
#define WASM_FEATURE(NAME, LOWER_NAME, ...) bool LOWER_NAME;
JS_FOR_WASM_FEATURES(WASM_FEATURE)
diff --git a/js/src/wasm/WasmFeatures.cpp b/js/src/wasm/WasmFeatures.cpp
index 05804353ae..24ab1c7d51 100644
--- a/js/src/wasm/WasmFeatures.cpp
+++ b/js/src/wasm/WasmFeatures.cpp
@@ -21,6 +21,7 @@
#include "jit/AtomicOperations.h"
#include "jit/JitContext.h"
#include "jit/JitOptions.h"
+#include "js/Prefs.h"
#include "util/StringBuffer.h"
#include "vm/JSContext.h"
#include "vm/Realm.h"
@@ -56,13 +57,13 @@ static inline bool WasmThreadsFlag(JSContext* cx) {
JS_FOR_WASM_FEATURES(WASM_FEATURE);
#undef WASM_FEATURE
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, ...) \
- static inline bool Wasm##NAME##Flag(JSContext* cx) { \
- if (!(COMPILE_PRED)) { \
- return false; \
- } \
- return ((FLAG_PRED) && cx->options().wasm##NAME()) || (FLAG_FORCE_ON); \
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, FLAG_PRED, \
+ FLAG_FORCE_ON, FLAG_FUZZ_ON, PREF) \
+ static inline bool Wasm##NAME##Flag(JSContext* cx) { \
+ if (!(COMPILE_PRED)) { \
+ return false; \
+ } \
+ return ((FLAG_PRED) && JS::Prefs::wasm_##PREF()) || (FLAG_FORCE_ON); \
}
JS_FOR_WASM_FEATURES(WASM_FEATURE);
#undef WASM_FEATURE
@@ -219,10 +220,9 @@ bool wasm::AnyCompilerAvailable(JSContext* cx) {
// compiler that can support the feature. Subsequent compiler selection must
// ensure that only compilers that actually support the feature are used.
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- ...) \
- bool wasm::NAME##Available(JSContext* cx) { \
- return Wasm##NAME##Flag(cx) && (COMPILER_PRED); \
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, ...) \
+ bool wasm::NAME##Available(JSContext* cx) { \
+ return Wasm##NAME##Flag(cx) && (COMPILER_PRED); \
}
JS_FOR_WASM_FEATURES(WASM_FEATURE)
#undef WASM_FEATURE
diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
index 171ac285be..90555720da 100644
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -1882,3 +1882,16 @@ const char* ProfilingFrameIterator::label() const {
MOZ_CRASH("bad code range kind");
}
+
+ProfilingFrameIterator::Category ProfilingFrameIterator::category() const {
+ if (!exitReason_.isFixed() || !exitReason_.isNone() ||
+ !codeRange_->isFunction()) {
+ return Category::Other;
+ }
+
+ Tier tier;
+ if (!code_->lookupFunctionTier(codeRange_, &tier)) {
+ return Category::Other;
+ }
+ return tier == Tier::Optimized ? Category::Ion : Category::Baseline;
+}
diff --git a/js/src/wasm/WasmFrameIter.h b/js/src/wasm/WasmFrameIter.h
index 014f5de0ef..59590b1b2a 100644
--- a/js/src/wasm/WasmFrameIter.h
+++ b/js/src/wasm/WasmFrameIter.h
@@ -196,6 +196,12 @@ class ProfilingFrameIterator {
ProfilingFrameIterator(const jit::JitActivation& activation,
const RegisterState& state);
+ enum Category {
+ Baseline,
+ Ion,
+ Other,
+ };
+
void operator++();
bool done() const {
@@ -213,6 +219,8 @@ class ProfilingFrameIterator {
}
const char* label() const;
+ Category category() const;
+
void* endStackAddress() const { return endStackAddress_; }
};
diff --git a/js/src/wasm/WasmGcObject-inl.h b/js/src/wasm/WasmGcObject-inl.h
index 17800f41f1..4714aafc06 100644
--- a/js/src/wasm/WasmGcObject-inl.h
+++ b/js/src/wasm/WasmGcObject-inl.h
@@ -342,8 +342,7 @@ MOZ_ALWAYS_INLINE WasmArrayObject* WasmArrayObject::createArray(
calcStorageBytesChecked(typeDefData->arrayElemSize, numElements);
if (!storageBytes.isValid() ||
storageBytes.value() > uint32_t(wasm::MaxArrayPayloadBytes)) {
- JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
- JSMSG_WASM_ARRAY_IMP_LIMIT);
+ wasm::ReportTrapError(cx, JSMSG_WASM_ARRAY_IMP_LIMIT);
return nullptr;
}
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
index a470626de4..338812e1d6 100644
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -440,7 +440,7 @@ static bool InRange(uint32_t caller, uint32_t callee) {
using OffsetMap =
HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
using TrapMaybeOffsetArray =
- EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>>;
+ EnumeratedArray<Trap, Maybe<uint32_t>, size_t(Trap::Limit)>;
bool ModuleGenerator::linkCallSites() {
AutoCreatedBy acb(masm_, "linkCallSites");
diff --git a/js/src/wasm/WasmInitExpr.cpp b/js/src/wasm/WasmInitExpr.cpp
index e8c49cbc31..581eca2f62 100644
--- a/js/src/wasm/WasmInitExpr.cpp
+++ b/js/src/wasm/WasmInitExpr.cpp
@@ -74,20 +74,16 @@ class MOZ_STACK_CLASS InitExprInterpreter {
return stack.append(Val(RefType::func(), ref));
}
-#if defined(ENABLE_WASM_EXTENDED_CONST) || defined(ENABLE_WASM_GC)
int32_t popI32() {
uint32_t result = stack.back().i32();
stack.popBack();
return int32_t(result);
}
-#endif
-#ifdef ENABLE_WASM_EXTENDED_CONST
int64_t popI64() {
uint64_t result = stack.back().i64();
stack.popBack();
return int64_t(result);
}
-#endif
bool evalGlobalGet(JSContext* cx, uint32_t index) {
RootedVal val(cx);
@@ -107,7 +103,6 @@ class MOZ_STACK_CLASS InitExprInterpreter {
return pushFuncRef(func);
}
bool evalRefNull(RefType type) { return pushRef(type, AnyRef::null()); }
-#ifdef ENABLE_WASM_EXTENDED_CONST
bool evalI32Add() {
uint32_t b = popI32();
uint32_t a = popI32();
@@ -138,7 +133,6 @@ class MOZ_STACK_CLASS InitExprInterpreter {
uint64_t a = popI64();
return pushI64(a * b);
}
-#endif // ENABLE_WASM_EXTENDED_CONST
#ifdef ENABLE_WASM_GC
bool evalStructNew(JSContext* cx, uint32_t typeIndex) {
const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
@@ -320,7 +314,6 @@ bool InitExprInterpreter::evaluate(JSContext* cx, Decoder& d) {
}
CHECK(evalRefNull(type));
}
-#ifdef ENABLE_WASM_EXTENDED_CONST
case uint16_t(Op::I32Add): {
if (!d.readBinary()) {
return false;
@@ -357,7 +350,6 @@ bool InitExprInterpreter::evaluate(JSContext* cx, Decoder& d) {
}
CHECK(evalI64Mul());
}
-#endif
#ifdef ENABLE_WASM_GC
case uint16_t(Op::GcPrefix): {
switch (op.b1) {
@@ -449,9 +441,7 @@ bool wasm::DecodeConstantExpression(Decoder& d, ModuleEnvironment* env,
return false;
}
-#if defined(ENABLE_WASM_EXTENDED_CONST) || defined(ENABLE_WASM_GC)
Nothing nothing;
-#endif
NothingVector nothings{};
ResultType unusedType;
@@ -542,13 +532,9 @@ bool wasm::DecodeConstantExpression(Decoder& d, ModuleEnvironment* env,
*literal = Some(LitVal(ValType(type)));
break;
}
-#ifdef ENABLE_WASM_EXTENDED_CONST
case uint16_t(Op::I32Add):
case uint16_t(Op::I32Sub):
case uint16_t(Op::I32Mul): {
- if (!env->extendedConstEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
if (!iter.readBinary(ValType::I32, &nothing, &nothing)) {
return false;
}
@@ -558,16 +544,12 @@ bool wasm::DecodeConstantExpression(Decoder& d, ModuleEnvironment* env,
case uint16_t(Op::I64Add):
case uint16_t(Op::I64Sub):
case uint16_t(Op::I64Mul): {
- if (!env->extendedConstEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
if (!iter.readBinary(ValType::I64, &nothing, &nothing)) {
return false;
}
*literal = Nothing();
break;
}
-#endif
#ifdef ENABLE_WASM_GC
case uint16_t(Op::GcPrefix): {
if (!env->gcEnabled()) {
@@ -663,6 +645,7 @@ bool InitExpr::decodeAndValidate(Decoder& d, ModuleEnvironment* env,
expr->type_ = expected;
if (literal) {
+ literal->unsafeSetType(expected);
expr->kind_ = InitExprKind::Literal;
expr->literal_ = *literal;
return true;
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
index bf25b58c14..d025c02c16 100644
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -1531,8 +1531,10 @@ static bool ArrayCopyFromData(JSContext* cx, Handle<WasmArrayObject*> arrayObj,
// Because `numBytesToCopy` is an in-range `CheckedUint32`, the cast to
// `size_t` is safe even on a 32-bit target.
- memcpy(arrayObj->data_, &seg->bytes[segByteOffset],
- size_t(numBytesToCopy.value()));
+ if (numElements != 0) {
+ memcpy(arrayObj->data_, &seg->bytes[segByteOffset],
+ size_t(numBytesToCopy.value()));
+ }
return true;
}
@@ -1948,35 +1950,42 @@ static bool ArrayCopyFromElem(JSContext* cx, Handle<WasmArrayObject*> arrayObj,
// take into account the enclosing recursion group of the type. This is
// temporary until builtin module functions can specify a precise array type
// for params/results.
-static WasmArrayObject* CastToI16Array(HandleAnyRef ref, bool needMutable) {
- if (!ref.isJSObject()) {
- return nullptr;
- }
+template <bool isMutable>
+static WasmArrayObject* UncheckedCastToArrayI16(HandleAnyRef ref) {
JSObject& object = ref.toJSObject();
- if (!object.is<WasmArrayObject>()) {
- return nullptr;
- }
WasmArrayObject& array = object.as<WasmArrayObject>();
- const ArrayType& type = array.typeDef().arrayType();
- if (type.elementType_ != StorageType::I16) {
- return nullptr;
+ DebugOnly<const ArrayType*> type(&array.typeDef().arrayType());
+ MOZ_ASSERT(type->elementType_ == StorageType::I16);
+ MOZ_ASSERT(type->isMutable_ == isMutable);
+ return &array;
+}
+
+/* static */
+int32_t Instance::stringTest(Instance* instance, void* stringArg) {
+ AnyRef string = AnyRef::fromCompiledCode(stringArg);
+ if (string.isNull() || !string.isJSString()) {
+ return 0;
}
- if (needMutable && !type.isMutable_) {
+ return 1;
+}
+
+/* static */
+void* Instance::stringCast(Instance* instance, void* stringArg) {
+ AnyRef string = AnyRef::fromCompiledCode(stringArg);
+ if (string.isNull() || !string.isJSString()) {
+ ReportTrapError(instance->cx(), JSMSG_WASM_BAD_CAST);
return nullptr;
}
- return &array;
+ return string.forCompiledCode();
}
/* static */
-void* Instance::stringFromWTF16Array(Instance* instance, void* arrayArg,
- uint32_t arrayStart, uint32_t arrayCount) {
+void* Instance::stringFromCharCodeArray(Instance* instance, void* arrayArg,
+ uint32_t arrayStart,
+ uint32_t arrayCount) {
JSContext* cx = instance->cx();
RootedAnyRef arrayRef(cx, AnyRef::fromCompiledCode(arrayArg));
- Rooted<WasmArrayObject*> array(cx);
- if (!(array = CastToI16Array(arrayRef, false))) {
- ReportTrapError(cx, JSMSG_WASM_BAD_CAST);
- return nullptr;
- }
+ Rooted<WasmArrayObject*> array(cx, UncheckedCastToArrayI16<true>(arrayRef));
CheckedUint32 lastIndexPlus1 =
CheckedUint32(arrayStart) + CheckedUint32(arrayCount);
@@ -1997,8 +2006,8 @@ void* Instance::stringFromWTF16Array(Instance* instance, void* arrayArg,
}
/* static */
-int32_t Instance::stringToWTF16Array(Instance* instance, void* stringArg,
- void* arrayArg, uint32_t arrayStart) {
+int32_t Instance::stringIntoCharCodeArray(Instance* instance, void* stringArg,
+ void* arrayArg, uint32_t arrayStart) {
JSContext* cx = instance->cx();
AnyRef stringRef = AnyRef::fromCompiledCode(stringArg);
if (!stringRef.isJSString()) {
@@ -2009,11 +2018,7 @@ int32_t Instance::stringToWTF16Array(Instance* instance, void* stringArg,
size_t stringLength = string->length();
RootedAnyRef arrayRef(cx, AnyRef::fromCompiledCode(arrayArg));
- Rooted<WasmArrayObject*> array(cx);
- if (!(array = CastToI16Array(arrayRef, true))) {
- ReportTrapError(cx, JSMSG_WASM_BAD_CAST);
- return -1;
- }
+ Rooted<WasmArrayObject*> array(cx, UncheckedCastToArrayI16<true>(arrayRef));
CheckedUint32 lastIndexPlus1 = CheckedUint32(arrayStart) + stringLength;
if (!lastIndexPlus1.isValid() ||
@@ -2120,8 +2125,8 @@ int32_t Instance::stringLength(Instance* instance, void* stringArg) {
return (int32_t)stringRef.toJSString()->length();
}
-void* Instance::stringConcatenate(Instance* instance, void* firstStringArg,
- void* secondStringArg) {
+void* Instance::stringConcat(Instance* instance, void* firstStringArg,
+ void* secondStringArg) {
JSContext* cx = instance->cx();
AnyRef firstStringRef = AnyRef::fromCompiledCode(firstStringArg);
@@ -2444,11 +2449,10 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
if (global.isIndirect()) {
// Initialize the cell
- wasm::GCPtrVal& cell = globalObjs[i]->val();
- cell = val.get();
+ globalObjs[i]->setVal(val);
+
// Link to the cell
- void* address = (void*)&cell.get().cell();
- *(void**)globalAddr = address;
+ *(void**)globalAddr = globalObjs[i]->addressOfCell();
} else {
val.get().writeToHeapLocation(globalAddr);
}
@@ -2539,6 +2543,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
size_t numWords = std::max<size_t>((numFuncs + 31) / 32, 1);
debugFilter_ = (uint32_t*)js_calloc(numWords, sizeof(uint32_t));
if (!debugFilter_) {
+ ReportOutOfMemory(cx);
return false;
}
}
@@ -2552,6 +2557,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
// Take references to the passive data segments
if (!passiveDataSegments_.resize(dataSegments.length())) {
+ ReportOutOfMemory(cx);
return false;
}
for (size_t i = 0; i < dataSegments.length(); i++) {
@@ -2563,6 +2569,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
// Create InstanceElemSegments for any passive element segments, since these
// are the ones available at runtime.
if (!passiveElemSegments_.resize(elemSegments.length())) {
+ ReportOutOfMemory(cx);
return false;
}
for (size_t i = 0; i < elemSegments.length(); i++) {
@@ -2571,6 +2578,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
passiveElemSegments_[i] = InstanceElemSegment();
InstanceElemSegment& instanceSeg = passiveElemSegments_[i];
if (!instanceSeg.reserve(seg.numElements())) {
+ ReportOutOfMemory(cx);
return false;
}
diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
index dcc586b14f..074c6212df 100644
--- a/js/src/wasm/WasmInstance.h
+++ b/js/src/wasm/WasmInstance.h
@@ -571,10 +571,13 @@ class alignas(16) Instance {
static int32_t intrI8VecMul(Instance* instance, uint32_t dest, uint32_t src1,
uint32_t src2, uint32_t len, uint8_t* memBase);
- static void* stringFromWTF16Array(Instance* instance, void* arrayArg,
- uint32_t arrayStart, uint32_t arrayCount);
- static int32_t stringToWTF16Array(Instance* instance, void* stringArg,
- void* arrayArg, uint32_t start);
+ static int32_t stringTest(Instance* instance, void* stringArg);
+ static void* stringCast(Instance* instance, void* stringArg);
+ static void* stringFromCharCodeArray(Instance* instance, void* arrayArg,
+ uint32_t arrayStart,
+ uint32_t arrayCount);
+ static int32_t stringIntoCharCodeArray(Instance* instance, void* stringArg,
+ void* arrayArg, uint32_t arrayStart);
static void* stringFromCharCode(Instance* instance, uint32_t charCode);
static void* stringFromCodePoint(Instance* instance, uint32_t codePoint);
static int32_t stringCharCodeAt(Instance* instance, void* stringArg,
@@ -582,8 +585,8 @@ class alignas(16) Instance {
static int32_t stringCodePointAt(Instance* instance, void* stringArg,
uint32_t index);
static int32_t stringLength(Instance* instance, void* stringArg);
- static void* stringConcatenate(Instance* instance, void* firstStringArg,
- void* secondStringArg);
+ static void* stringConcat(Instance* instance, void* firstStringArg,
+ void* secondStringArg);
static void* stringSubstring(Instance* instance, void* stringArg,
int32_t startIndex, int32_t endIndex);
static int32_t stringEquals(Instance* instance, void* firstStringArg,
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
index 6fbfeb3809..0568a95804 100644
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -900,7 +900,7 @@ class FunctionCompiler {
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
const ResultType& type, MDefinition* condition) {
if (inDeadCode()) {
@@ -963,7 +963,7 @@ class FunctionCompiler {
return true;
}
-#endif // ENABLE_WASM_FUNCTION_REFERENCES
+#endif // ENABLE_WASM_GC
#ifdef ENABLE_WASM_GC
MDefinition* refI31(MDefinition* input) {
@@ -2006,10 +2006,10 @@ class FunctionCompiler {
MOZ_CRASH("Unknown ABIArg kind.");
}
- template <typename SpanT>
- [[nodiscard]] bool passArgs(const DefVector& argDefs, SpanT types,
+ template <typename VecT>
+ [[nodiscard]] bool passArgs(const DefVector& argDefs, const VecT& types,
CallCompileState* call) {
- MOZ_ASSERT(argDefs.length() == types.size());
+ MOZ_ASSERT(argDefs.length() == types.length());
for (uint32_t i = 0; i < argDefs.length(); i++) {
MDefinition* def = argDefs[i];
ValType type = types[i];
@@ -2447,7 +2447,7 @@ class FunctionCompiler {
return collectUnaryCallResult(builtin.retType, def);
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool callRef(const FuncType& funcType, MDefinition* ref,
uint32_t lineOrBytecode,
const CallCompileState& call, DefVector* results) {
@@ -2489,7 +2489,7 @@ class FunctionCompiler {
# endif // ENABLE_WASM_TAIL_CALLS
-#endif // ENABLE_WASM_FUNCTION_REFERENCES
+#endif // ENABLE_WASM_GC
/*********************************************** Control flow generation */
@@ -2788,7 +2788,8 @@ class FunctionCompiler {
// patches around.
for (uint32_t depth = 0; depth < iter().controlStackDepth(); depth++) {
LabelKind kind = iter().controlKind(depth);
- if (kind != LabelKind::Try && kind != LabelKind::Body) {
+ if (kind != LabelKind::Try && kind != LabelKind::TryTable &&
+ kind != LabelKind::Body) {
continue;
}
Control& control = iter().controlItem(depth);
@@ -5440,7 +5441,7 @@ static bool EmitReturnCallIndirect(FunctionCompiler& f) {
}
#endif
-#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
+#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_GC)
static bool EmitReturnCallRef(FunctionCompiler& f) {
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
@@ -7090,7 +7091,11 @@ static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
return false;
}
- f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
+ auto* ins = f.loadSplatSimd128(viewType, addr, splatOp);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7100,7 +7105,11 @@ static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
return false;
}
- f.iter().setResult(f.loadExtendSimd128(addr, op));
+ auto* ins = f.loadExtendSimd128(addr, op);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7111,7 +7120,11 @@ static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
return false;
}
- f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
+ auto* ins = f.loadZeroSimd128(viewType, numBytes, addr);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7123,7 +7136,11 @@ static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
return false;
}
- f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
+ auto* ins = f.loadLaneSimd128(laneSize, addr, laneIndex, src);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7141,7 +7158,7 @@ static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
#endif // ENABLE_WASM_SIMD
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
static bool EmitRefAsNonNull(FunctionCompiler& f) {
MDefinition* ref;
if (!f.iter().readRefAsNonNull(&ref)) {
@@ -7204,7 +7221,7 @@ static bool EmitCallRef(FunctionCompiler& f) {
return true;
}
-#endif // ENABLE_WASM_FUNCTION_REFERENCES
+#endif // ENABLE_WASM_GC
#ifdef ENABLE_WASM_GC
@@ -7917,18 +7934,18 @@ static bool EmitCallBuiltinModuleFunc(FunctionCompiler& f) {
}
uint32_t bytecodeOffset = f.readBytecodeOffset();
- const SymbolicAddressSignature& callee = builtinModuleFunc->signature;
+ const SymbolicAddressSignature& callee = *builtinModuleFunc->sig();
CallCompileState args;
if (!f.passInstance(callee.argTypes[0], &args)) {
return false;
}
- if (!f.passArgs(params, builtinModuleFunc->params, &args)) {
+ if (!f.passArgs(params, builtinModuleFunc->funcType()->args(), &args)) {
return false;
}
- if (builtinModuleFunc->usesMemory) {
+ if (builtinModuleFunc->usesMemory()) {
MDefinition* memoryBase = f.memoryBase(0);
if (!f.passArg(memoryBase, MIRType::Pointer, &args)) {
return false;
@@ -7939,7 +7956,7 @@ static bool EmitCallBuiltinModuleFunc(FunctionCompiler& f) {
return false;
}
- bool hasResult = builtinModuleFunc->result.isSome();
+ bool hasResult = !builtinModuleFunc->funcType()->results().empty();
MDefinition* result = nullptr;
MDefinition** resultOutParam = hasResult ? &result : nullptr;
if (!f.builtinInstanceMethodCall(callee, bytecodeOffset, args,
@@ -7996,37 +8013,19 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
case uint16_t(Op::Else):
CHECK(EmitElse(f));
case uint16_t(Op::Try):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitTry(f));
case uint16_t(Op::Catch):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitCatch(f));
case uint16_t(Op::CatchAll):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitCatchAll(f));
case uint16_t(Op::Delegate):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
if (!EmitDelegate(f)) {
return false;
}
break;
case uint16_t(Op::Throw):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitThrow(f));
case uint16_t(Op::Rethrow):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitRethrow(f));
case uint16_t(Op::ThrowRef):
if (!f.moduleEnv().exnrefEnabled()) {
@@ -8474,36 +8473,35 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefAsNonNull):
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitRefAsNonNull(f));
case uint16_t(Op::BrOnNull): {
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitBrOnNull(f));
}
case uint16_t(Op::BrOnNonNull): {
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitBrOnNonNull(f));
}
case uint16_t(Op::CallRef): {
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitCallRef(f));
}
#endif
-#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
+#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_GC)
case uint16_t(Op::ReturnCallRef): {
- if (!f.moduleEnv().functionReferencesEnabled() ||
- !f.moduleEnv().tailCallsEnabled()) {
+ if (!f.moduleEnv().gcEnabled() || !f.moduleEnv().tailCallsEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitReturnCallRef(f));
@@ -9025,114 +9023,91 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
case uint32_t(ThreadOp::I32AtomicAdd):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchAddOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchAddOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchAddOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicSub):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchSubOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchSubOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchSubOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicAnd):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchAndOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchAndOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchAndOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicOr):
- CHECK(
- EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr):
- CHECK(
- EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr8U):
- CHECK(
- EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr8U):
- CHECK(
- EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicXor):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchXorOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchXorOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchXorOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXchg):
CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
@@ -9267,6 +9242,41 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
#undef CHECK
}
+static bool IonBuildMIR(Decoder& d, const ModuleEnvironment& moduleEnv,
+ const FuncCompileInput& func,
+ const ValTypeVector& locals, MIRGenerator& mir,
+ TryNoteVector& tryNotes, FeatureUsage* observedFeatures,
+ UniqueChars* error) {
+ // Initialize MIR global information used for optimization
+ if (moduleEnv.numMemories() > 0) {
+ if (moduleEnv.memories[0].indexType() == IndexType::I32) {
+ mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength32());
+ } else {
+ mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength64());
+ }
+ }
+
+ // Build MIR graph
+ FunctionCompiler f(moduleEnv, d, func, locals, mir, tryNotes);
+ if (!f.init()) {
+ return false;
+ }
+
+ if (!f.startBlock()) {
+ return false;
+ }
+
+ if (!EmitBodyExprs(f)) {
+ return false;
+ }
+
+ f.finish();
+
+ *observedFeatures = f.featureUsage();
+
+ return true;
+}
+
bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
const CompilerEnvironment& compilerEnv,
LifoAlloc& lifo,
@@ -9307,52 +9317,28 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
Decoder d(func.begin, func.end, func.lineOrBytecode, error);
// Build the local types vector.
-
- const FuncType& funcType = *moduleEnv.funcs[func.index].type;
ValTypeVector locals;
- if (!locals.appendAll(funcType.args())) {
- return false;
- }
- if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
+ if (!DecodeLocalEntriesWithParams(d, moduleEnv, func.index, &locals)) {
return false;
}
// Set up for Ion compilation.
-
const JitCompileOptions options;
MIRGraph graph(&alloc);
CompileInfo compileInfo(locals.length());
MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
IonOptimizations.get(OptimizationLevel::Wasm));
- if (moduleEnv.numMemories() > 0) {
- if (moduleEnv.memories[0].indexType() == IndexType::I32) {
- mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength32());
- } else {
- mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength64());
- }
- }
// Build MIR graph
- {
- FunctionCompiler f(moduleEnv, d, func, locals, mir, masm.tryNotes());
- if (!f.init()) {
- return false;
- }
-
- if (!f.startBlock()) {
- return false;
- }
-
- if (!EmitBodyExprs(f)) {
- return false;
- }
-
- f.finish();
-
- // Record observed feature usage
- code->featureUsage |= f.featureUsage();
+ FeatureUsage observedFeatures;
+ if (!IonBuildMIR(d, moduleEnv, func, locals, mir, masm.tryNotes(),
+ &observedFeatures, error)) {
+ return false;
}
+ // Record observed feature usage
+ code->featureUsage |= observedFeatures;
+
// Compile MIR graph
{
jit::SpewBeginWasmFunction(&mir, func.index);
@@ -9373,7 +9359,7 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
FuncOffsets offsets;
- ArgTypeVector args(funcType);
+ ArgTypeVector args(*moduleEnv.funcs[func.index].type);
if (!codegen.generateWasm(CallIndirectId::forFunc(moduleEnv, func.index),
prologueTrapOffset, args, trapExitLayout,
trapExitLayoutNumWords, &offsets,
@@ -9407,6 +9393,66 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
return code->swap(masm);
}
+bool wasm::IonDumpFunction(const ModuleEnvironment& moduleEnv,
+ const FuncCompileInput& func,
+ IonDumpContents contents, GenericPrinter& out,
+ UniqueChars* error) {
+ LifoAlloc lifo(TempAllocator::PreferredLifoChunkSize);
+ TempAllocator alloc(&lifo);
+ JitContext jitContext;
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ // Decode the locals.
+ ValTypeVector locals;
+ if (!DecodeLocalEntriesWithParams(d, moduleEnv, func.index, &locals)) {
+ return false;
+ }
+
+ // Set up for Ion compilation.
+ const JitCompileOptions options;
+ MIRGraph graph(&alloc);
+ CompileInfo compileInfo(locals.length());
+ MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
+ IonOptimizations.get(OptimizationLevel::Wasm));
+
+ // Build MIR graph
+ TryNoteVector tryNotes;
+ FeatureUsage observedFeatures;
+ if (!IonBuildMIR(d, moduleEnv, func, locals, mir, tryNotes, &observedFeatures,
+ error)) {
+ return false;
+ }
+
+ if (contents == IonDumpContents::UnoptimizedMIR) {
+ graph.dump(out);
+ return true;
+ }
+
+ // Optimize the MIR graph
+ if (!OptimizeMIR(&mir)) {
+ return false;
+ }
+
+ if (contents == IonDumpContents::OptimizedMIR) {
+ graph.dump(out);
+ return true;
+ }
+
+#ifdef JS_JITSPEW
+ // Generate the LIR graph
+ LIRGraph* lir = GenerateLIR(&mir);
+ if (!lir) {
+ return false;
+ }
+
+ MOZ_ASSERT(contents == IonDumpContents::LIR);
+ lir->dump(out);
+#else
+ out.printf("cannot dump LIR without --enable-jitspew");
+#endif
+ return true;
+}
+
bool js::wasm::IonPlatformSupport() {
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
diff --git a/js/src/wasm/WasmIonCompile.h b/js/src/wasm/WasmIonCompile.h
index f583cbad1f..4597c08be1 100644
--- a/js/src/wasm/WasmIonCompile.h
+++ b/js/src/wasm/WasmIonCompile.h
@@ -22,6 +22,9 @@
#include "wasm/WasmGenerator.h"
namespace js {
+
+class GenericPrinter;
+
namespace wasm {
// Return whether IonCompileFunction() can generate code on the current device.
@@ -35,6 +38,11 @@ namespace wasm {
const FuncCompileInputVector& inputs,
CompiledCode* code, UniqueChars* error);
+[[nodiscard]] bool IonDumpFunction(const ModuleEnvironment& moduleEnv,
+ const FuncCompileInput& func,
+ IonDumpContents contents,
+ GenericPrinter& out, UniqueChars* error);
+
} // namespace wasm
} // namespace js
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
index 6cc9528415..2eb5e355d9 100644
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -768,8 +768,14 @@ static JSObject* GetWasmConstructorPrototype(JSContext* cx,
}
#ifdef ENABLE_WASM_TYPE_REFLECTIONS
-static JSString* UTF8CharsToString(JSContext* cx, const char* chars) {
- return NewStringCopyUTF8Z(cx, JS::ConstUTF8CharsZ(chars, strlen(chars)));
+template <typename T>
+static JSString* TypeToString(JSContext* cx, T type) {
+ UniqueChars chars = ToString(type, nullptr);
+ if (!chars) {
+ return nullptr;
+ }
+ return NewStringCopyUTF8Z(
+ cx, JS::ConstUTF8CharsZ(chars.get(), strlen(chars.get())));
}
[[nodiscard]] static JSObject* ValTypesToArray(JSContext* cx,
@@ -779,8 +785,7 @@ static JSString* UTF8CharsToString(JSContext* cx, const char* chars) {
return nullptr;
}
for (ValType valType : valTypes) {
- RootedString type(cx,
- UTF8CharsToString(cx, ToString(valType, nullptr).get()));
+ RootedString type(cx, TypeToString(cx, valType));
if (!type) {
return nullptr;
}
@@ -809,15 +814,14 @@ static JSObject* FuncTypeToObject(JSContext* cx, const FuncType& type) {
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* TableTypeToObject(JSContext* cx, RefType type,
uint32_t initial, Maybe<uint32_t> maximum) {
Rooted<IdValueVector> props(cx, IdValueVector(cx));
- RootedString elementType(
- cx, UTF8CharsToString(cx, ToString(type, nullptr).get()));
+ RootedString elementType(cx, TypeToString(cx, type));
if (!elementType || !props.append(IdValuePair(NameToId(cx->names().element),
StringValue(elementType)))) {
ReportOutOfMemory(cx);
@@ -838,7 +842,7 @@ static JSObject* TableTypeToObject(JSContext* cx, RefType type,
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* MemoryTypeToObject(JSContext* cx, bool shared,
@@ -892,7 +896,7 @@ static JSObject* MemoryTypeToObject(JSContext* cx, bool shared,
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* GlobalTypeToObject(JSContext* cx, ValType type,
@@ -905,15 +909,14 @@ static JSObject* GlobalTypeToObject(JSContext* cx, ValType type,
return nullptr;
}
- RootedString valueType(cx,
- UTF8CharsToString(cx, ToString(type, nullptr).get()));
+ RootedString valueType(cx, TypeToString(cx, type));
if (!valueType || !props.append(IdValuePair(NameToId(cx->names().value),
StringValue(valueType)))) {
ReportOutOfMemory(cx);
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* TagTypeToObject(JSContext* cx,
@@ -928,7 +931,7 @@ static JSObject* TagTypeToObject(JSContext* cx,
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
#endif // ENABLE_WASM_TYPE_REFLECTIONS
@@ -1184,8 +1187,7 @@ bool WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp) {
}
#endif // ENABLE_WASM_TYPE_REFLECTIONS
- JSObject* obj =
- NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ JSObject* obj = NewPlainObjectWithUniqueNames(cx, props);
if (!obj) {
return false;
}
@@ -1288,8 +1290,7 @@ bool WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp) {
}
#endif // ENABLE_WASM_TYPE_REFLECTIONS
- JSObject* obj =
- NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ JSObject* obj = NewPlainObjectWithUniqueNames(cx, props);
if (!obj) {
return false;
}
@@ -3227,7 +3228,7 @@ void WasmGlobalObject::finalize(JS::GCContext* gcx, JSObject* obj) {
// Release the strong reference to the type definitions this global could
// be referencing.
global->type().Release();
- gcx->delete_(obj, &global->val(), MemoryUse::WasmGlobalCell);
+ gcx->delete_(obj, &global->mutableVal(), MemoryUse::WasmGlobalCell);
}
}
@@ -3253,7 +3254,9 @@ WasmGlobalObject* WasmGlobalObject::create(JSContext* cx, HandleVal value,
// It's simpler to initialize the cell after the object has been created,
// to avoid needing to root the cell before the object creation.
- obj->val() = value.get();
+ // We don't use `setVal` here because the assumes the cell has already
+ // been initialized.
+ obj->mutableVal() = value.get();
// Acquire a strong reference to a type definition this global could
// be referencing.
obj->type().AddRef();
@@ -3384,7 +3387,7 @@ bool WasmGlobalObject::valueSetterImpl(JSContext* cx, const CallArgs& args) {
if (!Val::fromJSValue(cx, global->type(), args.get(0), &val)) {
return false;
}
- global->val() = val.get();
+ global->setVal(val);
args.rval().setUndefined();
return true;
@@ -3417,10 +3420,23 @@ bool WasmGlobalObject::isMutable() const {
ValType WasmGlobalObject::type() const { return val().get().type(); }
-GCPtrVal& WasmGlobalObject::val() const {
+GCPtrVal& WasmGlobalObject::mutableVal() {
+ return *reinterpret_cast<GCPtrVal*>(getReservedSlot(VAL_SLOT).toPrivate());
+}
+
+const GCPtrVal& WasmGlobalObject::val() const {
return *reinterpret_cast<GCPtrVal*>(getReservedSlot(VAL_SLOT).toPrivate());
}
+void WasmGlobalObject::setVal(wasm::HandleVal value) {
+ MOZ_ASSERT(type() == value.get().type());
+ mutableVal() = value;
+}
+
+void* WasmGlobalObject::addressOfCell() const {
+ return (void*)&val().get().cell();
+}
+
#ifdef ENABLE_WASM_TYPE_REFLECTIONS
/* static */
bool WasmGlobalObject::typeImpl(JSContext* cx, const CallArgs& args) {
@@ -4652,6 +4668,10 @@ static bool WebAssembly_validate(JSContext* cx, unsigned argc, Value* vp) {
}
FeatureOptions options;
+ if (!options.init(cx, callArgs.get(1))) {
+ return false;
+ }
+
UniqueChars error;
bool validated = Validate(cx, *bytecode, options, &error);
@@ -5351,15 +5371,13 @@ static bool WebAssemblyClassFinish(JSContext* cx, HandleObject object,
}
}
- if (ExceptionsAvailable(cx)) {
- constexpr NameAndProtoKey exceptionEntries[] = {
- {"Tag", JSProto_WasmTag},
- {"Exception", JSProto_WasmException},
- };
- for (const auto& entry : exceptionEntries) {
- if (!WebAssemblyDefineConstructor(cx, wasm, entry, &ctorValue, &id)) {
- return false;
- }
+ constexpr NameAndProtoKey exceptionEntries[] = {
+ {"Tag", JSProto_WasmTag},
+ {"Exception", JSProto_WasmException},
+ };
+ for (const auto& entry : exceptionEntries) {
+ if (!WebAssemblyDefineConstructor(cx, wasm, entry, &ctorValue, &id)) {
+ return false;
}
}
diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
index 10c71b436b..27d49701a9 100644
--- a/js/src/wasm/WasmJS.h
+++ b/js/src/wasm/WasmJS.h
@@ -167,6 +167,8 @@ class WasmGlobalObject : public NativeObject {
static bool valueSetterImpl(JSContext* cx, const CallArgs& args);
static bool valueSetter(JSContext* cx, unsigned argc, Value* vp);
+ wasm::GCPtrVal& mutableVal();
+
public:
static const unsigned RESERVED_SLOTS = 2;
static const JSClass class_;
@@ -182,7 +184,9 @@ class WasmGlobalObject : public NativeObject {
bool isMutable() const;
wasm::ValType type() const;
- wasm::GCPtrVal& val() const;
+ const wasm::GCPtrVal& val() const;
+ void setVal(wasm::HandleVal value);
+ void* addressOfCell() const;
};
// The class of WebAssembly.Instance. Each WasmInstanceObject owns a
diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
index c2de0429d3..a297e81ad3 100644
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -867,7 +867,7 @@ static bool GetGlobalExport(JSContext* cx,
MOZ_RELEASE_ASSERT(!global.isImport());
RootedVal globalVal(cx);
instanceObj->instance().constantGlobalGet(globalIndex, &globalVal);
- globalObj->val() = globalVal;
+ globalObj->setVal(globalVal);
return true;
}
diff --git a/js/src/wasm/WasmOpIter.cpp b/js/src/wasm/WasmOpIter.cpp
index 102d39639c..d60a87dc12 100644
--- a/js/src/wasm/WasmOpIter.cpp
+++ b/js/src/wasm/WasmOpIter.cpp
@@ -25,14 +25,14 @@ using namespace js::jit;
using namespace js::wasm;
#ifdef ENABLE_WASM_GC
-# ifndef ENABLE_WASM_FUNCTION_REFERENCES
+# ifndef ENABLE_WASM_GC
# error "GC types require the function-references feature"
# endif
#endif
#ifdef DEBUG
-# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+# ifdef ENABLE_WASM_GC
# define WASM_FUNCTION_REFERENCES_OP(code) return code
# else
# define WASM_FUNCTION_REFERENCES_OP(code) break
diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
index 1711cc3926..59d494bfbf 100644
--- a/js/src/wasm/WasmOpIter.h
+++ b/js/src/wasm/WasmOpIter.h
@@ -165,7 +165,7 @@ enum class OpKind {
ReturnCall,
CallIndirect,
ReturnCallIndirect,
-# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+# ifdef ENABLE_WASM_GC
CallRef,
ReturnCallRef,
# endif
@@ -493,7 +493,8 @@ class MOZ_STACK_CLASS OpIter : private Policy {
[[nodiscard]] bool getControl(uint32_t relativeDepth, Control** controlEntry);
[[nodiscard]] bool checkBranchValueAndPush(uint32_t relativeDepth,
ResultType* type,
- ValueVector* values);
+ ValueVector* values,
+ bool rewriteStackTypes);
[[nodiscard]] bool checkBrTableEntryAndPush(uint32_t* relativeDepth,
ResultType prevBranchType,
ResultType* branchType,
@@ -533,7 +534,7 @@ class MOZ_STACK_CLASS OpIter : private Policy {
inline bool checkIsSubtypeOf(ResultType params, ResultType results);
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
inline bool checkIsSubtypeOf(uint32_t actualTypeIndex,
uint32_t expectedTypeIndex);
#endif
@@ -703,7 +704,7 @@ class MOZ_STACK_CLASS OpIter : private Policy {
uint32_t* tableIndex, Value* callee,
ValueVector* argValues);
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool readCallRef(const FuncType** funcType, Value* callee,
ValueVector* argValues);
@@ -932,7 +933,7 @@ inline bool OpIter<Policy>::checkIsSubtypeOf(ResultType params,
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
template <typename Policy>
inline bool OpIter<Policy>::checkIsSubtypeOf(uint32_t actualTypeIndex,
uint32_t expectedTypeIndex) {
@@ -1480,14 +1481,15 @@ inline void OpIter<Policy>::popEnd() {
template <typename Policy>
inline bool OpIter<Policy>::checkBranchValueAndPush(uint32_t relativeDepth,
ResultType* type,
- ValueVector* values) {
+ ValueVector* values,
+ bool rewriteStackTypes) {
Control* block = nullptr;
if (!getControl(relativeDepth, &block)) {
return false;
}
*type = block->branchTargetType();
- return checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/false);
+ return checkTopTypeMatches(*type, values, rewriteStackTypes);
}
template <typename Policy>
@@ -1499,7 +1501,8 @@ inline bool OpIter<Policy>::readBr(uint32_t* relativeDepth, ResultType* type,
return fail("unable to read br depth");
}
- if (!checkBranchValueAndPush(*relativeDepth, type, values)) {
+ if (!checkBranchValueAndPush(*relativeDepth, type, values,
+ /*rewriteStackTypes=*/false)) {
return false;
}
@@ -1520,7 +1523,8 @@ inline bool OpIter<Policy>::readBrIf(uint32_t* relativeDepth, ResultType* type,
return false;
}
- return checkBranchValueAndPush(*relativeDepth, type, values);
+ return checkBranchValueAndPush(*relativeDepth, type, values,
+ /*rewriteStackTypes=*/true);
}
#define UNKNOWN_ARITY UINT32_MAX
@@ -2392,10 +2396,10 @@ inline bool OpIter<Policy>::readRefFunc(uint32_t* funcIndex) {
"function index is not declared in a section before the code section");
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
// When function references enabled, push type index on the stack, e.g. for
// validation of the call_ref instruction.
- if (env_.functionReferencesEnabled()) {
+ if (env_.gcEnabled()) {
const uint32_t typeIndex = env_.funcs[*funcIndex].typeIndex;
const TypeDef& typeDef = env_.types->type(typeIndex);
return push(RefType::fromTypeDef(&typeDef, false));
@@ -2457,7 +2461,8 @@ inline bool OpIter<Policy>::readBrOnNull(uint32_t* relativeDepth,
return false;
}
- if (!checkBranchValueAndPush(*relativeDepth, type, values)) {
+ if (!checkBranchValueAndPush(*relativeDepth, type, values,
+ /*rewriteStackTypes=*/true)) {
return false;
}
@@ -2505,7 +2510,7 @@ inline bool OpIter<Policy>::readBrOnNonNull(uint32_t* relativeDepth,
}
// Check if the type stack matches the branch target type.
- if (!checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/false)) {
+ if (!checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/true)) {
return false;
}
@@ -2693,7 +2698,7 @@ inline bool OpIter<Policy>::readReturnCallIndirect(uint32_t* funcTypeIndex,
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
template <typename Policy>
inline bool OpIter<Policy>::readCallRef(const FuncType** funcType,
Value* callee, ValueVector* argValues) {
@@ -2719,7 +2724,7 @@ inline bool OpIter<Policy>::readCallRef(const FuncType** funcType,
}
#endif
-#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
+#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_GC)
template <typename Policy>
inline bool OpIter<Policy>::readReturnCallRef(const FuncType** funcType,
Value* callee,
@@ -4001,7 +4006,7 @@ inline bool OpIter<Policy>::readBrOnCast(bool onSuccess,
fallthroughTypes[labelTypeNumValues - 1] = typeOnFallthrough;
return checkTopTypeMatches(ResultType::Vector(fallthroughTypes), values,
- /*rewriteStackTypes=*/false);
+ /*rewriteStackTypes=*/true);
}
template <typename Policy>
@@ -4228,18 +4233,18 @@ inline bool OpIter<Policy>::readCallBuiltinModuleFunc(
return fail("index out of range");
}
- *builtinModuleFunc = &BuiltinModuleFunc::getFromId(BuiltinModuleFuncId(id));
+ *builtinModuleFunc = &BuiltinModuleFuncs::getFromId(BuiltinModuleFuncId(id));
- if ((*builtinModuleFunc)->usesMemory && env_.numMemories() == 0) {
+ if ((*builtinModuleFunc)->usesMemory() && env_.numMemories() == 0) {
return fail("can't touch memory without memory");
}
- if (!popWithTypes((*builtinModuleFunc)->params, params)) {
+
+ const FuncType& funcType = *(*builtinModuleFunc)->funcType();
+ if (!popCallArgs(funcType.args(), params)) {
return false;
}
- if ((*builtinModuleFunc)->result.isNothing()) {
- return true;
- }
- return push(*(*builtinModuleFunc)->result);
+
+ return push(ResultType::Vector(funcType.results()));
}
} // namespace wasm
diff --git a/js/src/wasm/WasmProcess.cpp b/js/src/wasm/WasmProcess.cpp
index 427ba42d9d..0436e0a23f 100644
--- a/js/src/wasm/WasmProcess.cpp
+++ b/js/src/wasm/WasmProcess.cpp
@@ -26,10 +26,12 @@
#include "threading/ExclusiveData.h"
#include "vm/MutexIDs.h"
#include "vm/Runtime.h"
+#include "wasm/WasmBuiltinModule.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmInstance.h"
#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmStaticTypeDefs.h"
using namespace js;
using namespace wasm;
@@ -438,6 +440,15 @@ bool wasm::Init() {
oomUnsafe.crash("js::wasm::Init");
}
+ if (!StaticTypeDefs::init()) {
+ oomUnsafe.crash("js::wasm::Init");
+ }
+
+ // This uses StaticTypeDefs
+ if (!BuiltinModuleFuncs::init()) {
+ oomUnsafe.crash("js::wasm::Init");
+ }
+
sProcessCodeSegmentMap = map;
if (!InitTagForJSValue()) {
@@ -455,6 +466,8 @@ void wasm::ShutDown() {
return;
}
+ BuiltinModuleFuncs::destroy();
+ StaticTypeDefs::destroy();
PurgeCanonicalTypes();
if (sWrappedJSValueTagType) {
diff --git a/js/src/wasm/WasmSerialize.cpp b/js/src/wasm/WasmSerialize.cpp
index 62a68c5aff..35f437688c 100644
--- a/js/src/wasm/WasmSerialize.cpp
+++ b/js/src/wasm/WasmSerialize.cpp
@@ -957,7 +957,7 @@ CoderResult CodeSymbolicLinkArray(
template <CoderMode mode>
CoderResult CodeLinkData(Coder<mode>& coder,
CoderArg<mode, wasm::LinkData> item) {
- WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::LinkData, 8832);
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::LinkData, 8976);
if constexpr (mode == MODE_ENCODE) {
MOZ_ASSERT(item->tier == Tier::Serialized);
}
diff --git a/js/src/wasm/WasmStaticTypeDefs.cpp b/js/src/wasm/WasmStaticTypeDefs.cpp
new file mode 100644
index 0000000000..2306339087
--- /dev/null
+++ b/js/src/wasm/WasmStaticTypeDefs.cpp
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2023 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmStaticTypeDefs.h"
+
+#include "wasm/WasmTypeDef.h"
+
+using namespace js;
+using namespace js::wasm;
+
+const TypeDef* StaticTypeDefs::arrayMutI16 = nullptr;
+
+bool StaticTypeDefs::init() {
+ RefPtr<TypeContext> types = js_new<TypeContext>();
+ if (!types) {
+ return false;
+ }
+
+#ifdef ENABLE_WASM_GC
+ arrayMutI16 = types->addType(ArrayType(StorageType::I16, true));
+ if (!arrayMutI16) {
+ return false;
+ }
+ arrayMutI16->recGroup().AddRef();
+#endif
+
+ return true;
+}
+
+void StaticTypeDefs::destroy() {
+ if (arrayMutI16) {
+ arrayMutI16->recGroup().Release();
+ arrayMutI16 = nullptr;
+ }
+}
diff --git a/js/src/wasm/WasmStaticTypeDefs.h b/js/src/wasm/WasmStaticTypeDefs.h
new file mode 100644
index 0000000000..d3a01ad26c
--- /dev/null
+++ b/js/src/wasm/WasmStaticTypeDefs.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_static_type_defs
+#define wasm_static_type_defs
+
+namespace js {
+namespace wasm {
+
+class TypeDef;
+
+// Simple type definitions used in builtins with a static lifetime.
+//
+// TODO: this class is very simple and won't scale well with many type
+// definitions. Rethink this if we have more than several type definitions.
+struct StaticTypeDefs {
+ static const TypeDef* arrayMutI16;
+
+ [[nodiscard]] static bool init();
+ static void destroy();
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_static_type_defs
diff --git a/js/src/wasm/WasmTypeDef.h b/js/src/wasm/WasmTypeDef.h
index 7aedbed1f8..3426647095 100644
--- a/js/src/wasm/WasmTypeDef.h
+++ b/js/src/wasm/WasmTypeDef.h
@@ -1228,13 +1228,16 @@ class TypeContext : public AtomicRefCounted<TypeContext> {
}
template <typename T>
- [[nodiscard]] bool addType(T&& type) {
+ [[nodiscard]] const TypeDef* addType(T&& type) {
MutableRecGroup recGroup = startRecGroup(1);
if (!recGroup) {
- return false;
+ return nullptr;
}
recGroup->type(0) = std::move(type);
- return endRecGroup();
+ if (!endRecGroup()) {
+ return nullptr;
+ }
+ return &this->type(length() - 1);
}
const TypeDef& type(uint32_t index) const { return *types_[index]; }
diff --git a/js/src/wasm/WasmValType.cpp b/js/src/wasm/WasmValType.cpp
index d1874b7131..64ef8ff85a 100644
--- a/js/src/wasm/WasmValType.cpp
+++ b/js/src/wasm/WasmValType.cpp
@@ -150,94 +150,7 @@ enum class RefTypeResult {
Unparsed,
};
-static RefTypeResult MaybeToRefType(JSContext* cx, HandleObject obj,
- RefType* out) {
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
- if (!wasm::FunctionReferencesAvailable(cx)) {
- return RefTypeResult::Unparsed;
- }
-
- JSAtom* refAtom = Atomize(cx, "ref", strlen("ref"));
- if (!refAtom) {
- return RefTypeResult::Failure;
- }
- RootedId refId(cx, AtomToId(refAtom));
-
- RootedValue refVal(cx);
- if (!GetProperty(cx, obj, obj, refId, &refVal)) {
- return RefTypeResult::Failure;
- }
-
- RootedString typeStr(cx, ToString(cx, refVal));
- if (!typeStr) {
- return RefTypeResult::Failure;
- }
-
- Rooted<JSLinearString*> typeLinearStr(cx, typeStr->ensureLinear(cx));
- if (!typeLinearStr) {
- return RefTypeResult::Failure;
- }
-
- if (StringEqualsLiteral(typeLinearStr, "func")) {
- *out = RefType::func();
- } else if (StringEqualsLiteral(typeLinearStr, "extern")) {
- *out = RefType::extern_();
-# ifdef ENABLE_WASM_EXNREF
- } else if (ExnRefAvailable(cx) && StringEqualsLiteral(typeLinearStr, "exn")) {
- *out = RefType::exn();
-# endif
-# ifdef ENABLE_WASM_GC
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "any")) {
- *out = RefType::any();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "eq")) {
- *out = RefType::eq();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "i31")) {
- *out = RefType::i31();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "struct")) {
- *out = RefType::struct_();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "array")) {
- *out = RefType::array();
-# endif
- } else {
- return RefTypeResult::Unparsed;
- }
-
- JSAtom* nullableAtom = Atomize(cx, "nullable", strlen("nullable"));
- if (!nullableAtom) {
- return RefTypeResult::Failure;
- }
- RootedId nullableId(cx, AtomToId(nullableAtom));
- RootedValue nullableVal(cx);
- if (!GetProperty(cx, obj, obj, nullableId, &nullableVal)) {
- return RefTypeResult::Failure;
- }
-
- bool nullable = ToBoolean(nullableVal);
- if (!nullable) {
- *out = out->asNonNullable();
- }
- MOZ_ASSERT(out->isNullable() == nullable);
- return RefTypeResult::Parsed;
-#else
- return RefTypeResult::Unparsed;
-#endif
-}
-
bool wasm::ToValType(JSContext* cx, HandleValue v, ValType* out) {
- if (v.isObject()) {
- RootedObject obj(cx, &v.toObject());
- RefType refType;
- switch (MaybeToRefType(cx, obj, &refType)) {
- case RefTypeResult::Failure:
- return false;
- case RefTypeResult::Parsed:
- *out = ValType(refType);
- return true;
- case RefTypeResult::Unparsed:
- break;
- }
- }
-
RootedString typeStr(cx, ToString(cx, v));
if (!typeStr) {
return false;
@@ -274,18 +187,6 @@ bool wasm::ToValType(JSContext* cx, HandleValue v, ValType* out) {
}
bool wasm::ToRefType(JSContext* cx, HandleValue v, RefType* out) {
- if (v.isObject()) {
- RootedObject obj(cx, &v.toObject());
- switch (MaybeToRefType(cx, obj, out)) {
- case RefTypeResult::Failure:
- return false;
- case RefTypeResult::Parsed:
- return true;
- case RefTypeResult::Unparsed:
- break;
- }
- }
-
RootedString typeStr(cx, ToString(cx, v));
if (!typeStr) {
return false;
diff --git a/js/src/wasm/WasmValType.h b/js/src/wasm/WasmValType.h
index 0821ee5df9..c98eda28dd 100644
--- a/js/src/wasm/WasmValType.h
+++ b/js/src/wasm/WasmValType.h
@@ -479,7 +479,7 @@ class StorageTypeTraits {
case TypeCode::NullExternRef:
case TypeCode::NullAnyRef:
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case AbstractTypeRefCode:
#endif
return true;
@@ -557,7 +557,7 @@ class ValTypeTraits {
case TypeCode::NullExternRef:
case TypeCode::NullAnyRef:
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case AbstractTypeRefCode:
#endif
return true;
diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
index e964c11d04..98a1423a41 100644
--- a/js/src/wasm/WasmValidate.cpp
+++ b/js/src/wasm/WasmValidate.cpp
@@ -89,14 +89,19 @@ bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
return true;
}
-bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
- const FeatureArgs& features,
- ValTypeVector* locals) {
+bool wasm::DecodeLocalEntriesWithParams(Decoder& d,
+ const ModuleEnvironment& env,
+ uint32_t funcIndex,
+ ValTypeVector* locals) {
uint32_t numLocalEntries;
if (!d.readVarU32(&numLocalEntries)) {
return d.fail("failed to read number of local entries");
}
+ if (!locals->appendAll(env.funcs[funcIndex].type->args())) {
+ return false;
+ }
+
for (uint32_t i = 0; i < numLocalEntries; i++) {
uint32_t count;
if (!d.readVarU32(&count)) {
@@ -108,7 +113,7 @@ bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
}
ValType type;
- if (!d.readValType(types, features, &type)) {
+ if (!d.readValType(*env.types, env.features, &type)) {
return false;
}
@@ -235,9 +240,9 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
&unusedArgs));
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::CallRef): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
const FuncType* unusedType;
@@ -246,7 +251,7 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
}
# ifdef ENABLE_WASM_TAIL_CALLS
case uint16_t(Op::ReturnCallRef): {
- if (!env.functionReferencesEnabled() || !env.tailCallsEnabled()) {
+ if (!env.gcEnabled() || !env.tailCallsEnabled()) {
return iter.unrecognizedOpcode(&op);
}
const FuncType* unusedType;
@@ -1240,15 +1245,15 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
}
break;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefAsNonNull): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
CHECK(iter.readRefAsNonNull(&nothing));
}
case uint16_t(Op::BrOnNull): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedDepth;
@@ -1256,7 +1261,7 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
iter.readBrOnNull(&unusedDepth, &unusedType, &nothings, &nothing));
}
case uint16_t(Op::BrOnNonNull): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedDepth;
@@ -1285,31 +1290,19 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
CHECK(iter.readRefIsNull(&nothing));
}
case uint16_t(Op::Try):
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
CHECK(iter.readTry(&unusedType));
case uint16_t(Op::Catch): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
LabelKind unusedKind;
uint32_t unusedIndex;
CHECK(iter.readCatch(&unusedKind, &unusedIndex, &unusedType,
&unusedType, &nothings));
}
case uint16_t(Op::CatchAll): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
LabelKind unusedKind;
CHECK(iter.readCatchAll(&unusedKind, &unusedType, &unusedType,
&nothings));
}
case uint16_t(Op::Delegate): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
uint32_t unusedDepth;
if (!iter.readDelegate(&unusedDepth, &unusedType, &nothings)) {
return false;
@@ -1318,16 +1311,10 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
break;
}
case uint16_t(Op::Throw): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
uint32_t unusedIndex;
CHECK(iter.readThrow(&unusedIndex, &nothings));
}
case uint16_t(Op::Rethrow): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
uint32_t unusedDepth;
CHECK(iter.readRethrow(&unusedDepth));
}
@@ -1541,14 +1528,10 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
uint32_t funcIndex, uint32_t bodySize,
Decoder& d) {
- ValTypeVector locals;
- if (!locals.appendAll(env.funcs[funcIndex].type->args())) {
- return false;
- }
-
const uint8_t* bodyBegin = d.currentPosition();
- if (!DecodeLocalEntries(d, *env.types, env.features, &locals)) {
+ ValTypeVector locals;
+ if (!DecodeLocalEntriesWithParams(d, env, funcIndex, &locals)) {
return false;
}
@@ -1624,7 +1607,7 @@ static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
StructType* structType) {
if (!env->gcEnabled()) {
- return d.fail("Structure types not enabled");
+ return d.fail("gc not enabled");
}
uint32_t numFields;
@@ -1668,7 +1651,7 @@ static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
static bool DecodeArrayType(Decoder& d, ModuleEnvironment* env,
ArrayType* arrayType) {
if (!env->gcEnabled()) {
- return d.fail("gc types not enabled");
+ return d.fail("gc not enabled");
}
StorageType elementType;
@@ -2247,13 +2230,6 @@ static bool CheckImportsAgainstBuiltinModules(Decoder& d,
return true;
}
- // Allocate a type context for builtin types so we can canonicalize them
- // and use them in type comparisons
- RefPtr<TypeContext> builtinTypes = js_new<TypeContext>();
- if (!builtinTypes) {
- return false;
- }
-
uint32_t importFuncIndex = 0;
for (auto& import : env->imports) {
Maybe<BuiltinModuleId> builtinModule =
@@ -2278,21 +2254,9 @@ static bool CheckImportsAgainstBuiltinModules(Decoder& d,
return d.fail("unrecognized builtin module field");
}
- // Get a canonicalized type definition for this builtin so we can
- // accurately compare it against the import type.
- FuncType builtinFuncType;
- if (!(*builtinFunc)->funcType(&builtinFuncType)) {
- return false;
- }
- if (!builtinTypes->addType(builtinFuncType)) {
- return false;
- }
- const TypeDef& builtinTypeDef =
- builtinTypes->type(builtinTypes->length() - 1);
-
const TypeDef& importTypeDef = (*env->types)[func.typeIndex];
- if (!TypeDef::isSubTypeOf(&builtinTypeDef, &importTypeDef)) {
- return d.failf("type mismatch in %s", (*builtinFunc)->exportName);
+ if (!TypeDef::isSubTypeOf((*builtinFunc)->typeDef(), &importTypeDef)) {
+ return d.failf("type mismatch in %s", (*builtinFunc)->exportName());
}
break;
}
@@ -2479,10 +2443,6 @@ static bool DecodeTagSection(Decoder& d, ModuleEnvironment* env) {
return true;
}
- if (!env->exceptionsEnabled()) {
- return d.fail("exceptions not enabled");
- }
-
uint32_t numDefs;
if (!d.readVarU32(&numDefs)) {
return d.fail("expected number of tags");
diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h
index 3254e7b74a..8ba08fd088 100644
--- a/js/src/wasm/WasmValidate.h
+++ b/js/src/wasm/WasmValidate.h
@@ -285,11 +285,13 @@ using ValidatingOpIter = OpIter<ValidatingPolicy>;
Decoder& d,
ValTypeVector* locals);
-// This validates the entries.
+// This validates the entries. Function params are inserted before the locals
+// to generate the full local entries for use in validation
-[[nodiscard]] bool DecodeLocalEntries(Decoder& d, const TypeContext& types,
- const FeatureArgs& features,
- ValTypeVector* locals);
+[[nodiscard]] bool DecodeLocalEntriesWithParams(Decoder& d,
+ const ModuleEnvironment& env,
+ uint32_t funcIndex,
+ ValTypeVector* locals);
// Returns whether the given [begin, end) prefix of a module's bytecode starts a
// code section and, if so, returns the SectionRange of that code section.
diff --git a/js/src/wasm/WasmValue.cpp b/js/src/wasm/WasmValue.cpp
index 3798f6c3e8..6039b00517 100644
--- a/js/src/wasm/WasmValue.cpp
+++ b/js/src/wasm/WasmValue.cpp
@@ -642,7 +642,7 @@ bool wasm::ToWebAssemblyValue(JSContext* cx, HandleValue val, ValType type,
case ValType::V128:
break;
case ValType::Ref:
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
if (!type.isNullable() && val.isNull()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_REF_NONNULLABLE_VALUE);
diff --git a/js/src/wasm/WasmValue.h b/js/src/wasm/WasmValue.h
index 66de690537..79e20285b9 100644
--- a/js/src/wasm/WasmValue.h
+++ b/js/src/wasm/WasmValue.h
@@ -224,6 +224,11 @@ class LitVal {
Cell& cell() { return cell_; }
const Cell& cell() const { return cell_; }
+ // Updates the type of the LitVal. Does not check that the type is valid for the
+ // actual value, so make sure the type is definitely correct via validation or
+ // something.
+ void unsafeSetType(ValType type) { type_ = type; }
+
uint32_t i32() const {
MOZ_ASSERT(type_ == ValType::I32);
return cell_.i32_;
@@ -309,11 +314,6 @@ class MOZ_NON_PARAM Val : public LitVal {
return cell_.ref_;
}
- // Updates the type of the Val. Does not check that the type is valid for the
- // actual value, so make sure the type is definitely correct via validation or
- // something.
- void unsafeSetType(ValType type) { type_ = type; }
-
// Initialize from `loc` which is a rooted location and needs no barriers.
void initFromRootedLocation(ValType type, const void* loc);
void initFromHeapLocation(ValType type, const void* loc);
diff --git a/js/src/wasm/moz.build b/js/src/wasm/moz.build
index 8aa23e3516..83fea3b81b 100644
--- a/js/src/wasm/moz.build
+++ b/js/src/wasm/moz.build
@@ -45,6 +45,7 @@ UNIFIED_SOURCES += [
"WasmRealm.cpp",
"WasmSerialize.cpp",
"WasmSignalHandlers.cpp",
+ "WasmStaticTypeDefs.cpp",
"WasmStubs.cpp",
"WasmSummarizeInsn.cpp",
"WasmTable.cpp",
diff --git a/js/xpconnect/idl/xpccomponents.idl b/js/xpconnect/idl/xpccomponents.idl
index 0235228663..1d4155ed74 100644
--- a/js/xpconnect/idl/xpccomponents.idl
+++ b/js/xpconnect/idl/xpccomponents.idl
@@ -324,6 +324,9 @@ interface nsIXPCComponents_Utils : nsISupports
* pointing to the same file will not cause the module to be re-evaluated,
* but the symbols in EXPORTED_SYMBOLS will be exported into the
* specified target object and the global object returned as above.
+ *
+ * TODO: Remove this once m-c, c-c, and out-of-tree code migrations finish
+ * (bug 1776175, bug 1881888).
*/
[implicit_jscontext,optional_argc]
jsval import(in AUTF8String aResourceURI, [optional] in jsval targetObj);
@@ -344,6 +347,9 @@ interface nsIXPCComponents_Utils : nsISupports
/**
* Returns true if the JSM is loaded into the system global previously via
* the import method above. Returns false otherwise.
+ *
+ * TODO: Remove this once m-c, c-c, and out-of-tree code migrations finish
+ * (bug 1776175, bug 1881888).
*/
boolean isJSModuleLoaded(in AUTF8String aResourceURI);
@@ -360,6 +366,9 @@ interface nsIXPCComponents_Utils : nsISupports
* imported then this method will do nothing.
*
* @param resourceURI A resource:// URI string to unload the module from.
+ *
+ * TODO: Remove this once m-c, c-c, and out-of-tree code migrations finish
+ * (bug 1776175, bug 1881888).
*/
void unload(in AUTF8String registryLocation);
diff --git a/js/xpconnect/loader/ChromeScriptLoader.cpp b/js/xpconnect/loader/ChromeScriptLoader.cpp
index 5c7115c997..d67c41d014 100644
--- a/js/xpconnect/loader/ChromeScriptLoader.cpp
+++ b/js/xpconnect/loader/ChromeScriptLoader.cpp
@@ -17,8 +17,8 @@
#include "js/CompileOptions.h" // JS::CompileOptions, JS::OwningCompileOptions
#include "js/CompilationAndEvaluation.h"
#include "js/experimental/CompileScript.h" // JS::CompileGlobalScriptToStencil, JS::NewFrontendContext, JS::DestroyFrontendContext, JS::SetNativeStackQuota, JS::ThreadStackQuotaForSize, JS::HadFrontendErrors, JS::ConvertFrontendErrorsToRuntimeErrors
-#include "js/experimental/JSStencil.h" // JS::Stencil, JS::CompileGlobalScriptToStencil, JS::InstantiateGlobalStencil, JS::CompilationStorage
-#include "js/SourceText.h" // JS::SourceText
+#include "js/experimental/JSStencil.h" // JS::Stencil, JS::CompileGlobalScriptToStencil, JS::InstantiateGlobalStencil
+#include "js/SourceText.h" // JS::SourceText
#include "js/Utility.h"
#include "mozilla/AlreadyAddRefed.h" // already_AddRefed
@@ -119,9 +119,8 @@ class AsyncScriptCompileTask final : public Task {
JS::SetNativeStackQuota(mFrontendContext,
JS::ThreadStackQuotaForSize(stackSize));
- JS::CompilationStorage compileStorage;
- mStencil = JS::CompileGlobalScriptToStencil(mFrontendContext, mOptions,
- mSrcBuf, compileStorage);
+ mStencil =
+ JS::CompileGlobalScriptToStencil(mFrontendContext, mOptions, mSrcBuf);
}
// Cancel the task.
diff --git a/js/xpconnect/loader/mozJSModuleLoader.cpp b/js/xpconnect/loader/mozJSModuleLoader.cpp
index f20306f0a0..017ac32b3b 100644
--- a/js/xpconnect/loader/mozJSModuleLoader.cpp
+++ b/js/xpconnect/loader/mozJSModuleLoader.cpp
@@ -454,6 +454,12 @@ void mozJSModuleLoader::InitStatics() {
MOZ_ASSERT(!sSelf);
sSelf = new mozJSModuleLoader();
RegisterWeakMemoryReporter(sSelf);
+
+ dom::AutoJSAPI jsapi;
+ jsapi.Init();
+ JSContext* cx = jsapi.cx();
+ sSelf->InitSharedGlobal(cx);
+
NonSharedGlobalSyncModuleLoaderScope::InitStatics();
}
@@ -486,12 +492,16 @@ void mozJSModuleLoader::ShutdownLoaders() {
}
}
-mozJSModuleLoader* mozJSModuleLoader::GetOrCreateDevToolsLoader() {
+mozJSModuleLoader* mozJSModuleLoader::GetOrCreateDevToolsLoader(
+ JSContext* aCx) {
if (sDevToolsLoader) {
return sDevToolsLoader;
}
sDevToolsLoader = new mozJSModuleLoader();
RegisterWeakMemoryReporter(sDevToolsLoader);
+
+ sDevToolsLoader->InitSharedGlobal(aCx);
+
return sDevToolsLoader;
}
@@ -682,26 +692,22 @@ void mozJSModuleLoader::CreateLoaderGlobal(JSContext* aCx,
aGlobal.set(global);
}
-JSObject* mozJSModuleLoader::GetSharedGlobal(JSContext* aCx) {
- if (!mLoaderGlobal) {
- JS::RootedObject globalObj(aCx);
+void mozJSModuleLoader::InitSharedGlobal(JSContext* aCx) {
+ JS::RootedObject globalObj(aCx);
- CreateLoaderGlobal(
- aCx, IsDevToolsLoader() ? "DevTools global"_ns : "shared JSM global"_ns,
- &globalObj);
+ CreateLoaderGlobal(
+ aCx, IsDevToolsLoader() ? "DevTools global"_ns : "shared JSM global"_ns,
+ &globalObj);
- // If we fail to create a module global this early, we're not going to
- // get very far, so just bail out now.
- MOZ_RELEASE_ASSERT(globalObj);
- mLoaderGlobal = globalObj;
-
- // AutoEntryScript required to invoke debugger hook, which is a
- // Gecko-specific concept at present.
- dom::AutoEntryScript aes(globalObj, "module loader report global");
- JS_FireOnNewGlobalObject(aes.cx(), globalObj);
- }
+ // If we fail to create a module global this early, we're not going to
+ // get very far, so just bail out now.
+ MOZ_RELEASE_ASSERT(globalObj);
+ mLoaderGlobal = globalObj;
- return mLoaderGlobal;
+ // AutoEntryScript required to invoke debugger hook, which is a
+ // Gecko-specific concept at present.
+ dom::AutoEntryScript aes(globalObj, "module loader report global");
+ JS_FireOnNewGlobalObject(aes.cx(), globalObj);
}
// Read script file on the main thread and pass it back to worker.
@@ -970,8 +976,8 @@ JSObject* mozJSModuleLoader::PrepareObjectForLocation(JSContext* aCx,
nsIFile* aModuleFile,
nsIURI* aURI,
bool aRealFile) {
- RootedObject globalObj(aCx, GetSharedGlobal(aCx));
- NS_ENSURE_TRUE(globalObj, nullptr);
+ RootedObject globalObj(aCx, GetSharedGlobal());
+ MOZ_ASSERT(globalObj);
JSAutoRealm ar(aCx, globalObj);
// |thisObj| is the object we set properties on for a particular .jsm.
@@ -2018,8 +2024,8 @@ nsresult mozJSModuleLoader::ImportESModule(
MarkerInnerWindowIdFromJSContext(aCx)),
Substring(aLocation, 0, std::min(size_t(128), aLocation.Length())));
- RootedObject globalObj(aCx, GetSharedGlobal(aCx));
- NS_ENSURE_TRUE(globalObj, NS_ERROR_FAILURE);
+ RootedObject globalObj(aCx, GetSharedGlobal());
+ MOZ_ASSERT(globalObj);
MOZ_ASSERT_IF(NS_IsMainThread(),
xpc::Scriptability::Get(globalObj).Allowed());
diff --git a/js/xpconnect/loader/mozJSModuleLoader.h b/js/xpconnect/loader/mozJSModuleLoader.h
index 0b8e5f85d9..ac118c507d 100644
--- a/js/xpconnect/loader/mozJSModuleLoader.h
+++ b/js/xpconnect/loader/mozJSModuleLoader.h
@@ -76,9 +76,14 @@ class mozJSModuleLoader final : public nsIMemoryReporter {
return sSelf;
}
- JSObject* GetSharedGlobal(JSContext* aCx);
+ JSObject* GetSharedGlobal() {
+ MOZ_ASSERT(mLoaderGlobal);
+ return mLoaderGlobal;
+ }
private:
+ void InitSharedGlobal(JSContext* aCx);
+
void InitSyncModuleLoaderForGlobal(nsIGlobalObject* aGlobal);
void DisconnectSyncModuleLoaderFromGlobal();
@@ -86,7 +91,7 @@ class mozJSModuleLoader final : public nsIMemoryReporter {
public:
static mozJSModuleLoader* GetDevToolsLoader() { return sDevToolsLoader; }
- static mozJSModuleLoader* GetOrCreateDevToolsLoader();
+ static mozJSModuleLoader* GetOrCreateDevToolsLoader(JSContext* aCx);
nsresult ImportInto(const nsACString& aResourceURI,
JS::HandleValue aTargetObj, JSContext* aCx, uint8_t aArgc,
diff --git a/js/xpconnect/loader/nsImportModule.cpp b/js/xpconnect/loader/nsImportModule.cpp
index a313c44388..3720a444a6 100644
--- a/js/xpconnect/loader/nsImportModule.cpp
+++ b/js/xpconnect/loader/nsImportModule.cpp
@@ -39,44 +39,11 @@ static void AnnotateCrashReportWithJSException(JSContext* aCx,
aURI, file.get(), line, column,
NS_ConvertUTF16toUTF8(msg).get());
- CrashReporter::AnnotateCrashReport(
+ CrashReporter::RecordAnnotationNSCString(
CrashReporter::Annotation::JSModuleLoadError, errorString);
}
}
-nsresult ImportModule(const char* aURI, const char* aExportName,
- const nsIID& aIID, void** aResult, bool aInfallible) {
- AutoJSAPI jsapi;
- MOZ_ALWAYS_TRUE(jsapi.Init(xpc::PrivilegedJunkScope()));
- JSContext* cx = jsapi.cx();
-
- JS::RootedObject global(cx);
- JS::RootedObject exports(cx);
- nsresult rv = mozJSModuleLoader::Get()->Import(cx, nsDependentCString(aURI),
- &global, &exports);
- if (NS_WARN_IF(NS_FAILED(rv))) {
- if (aInfallible) {
- AnnotateCrashReportWithJSException(cx, aURI);
-
- MOZ_CRASH_UNSAFE_PRINTF("Failed to load critical module \"%s\"", aURI);
- }
- return rv;
- }
-
- if (aExportName) {
- JS::RootedValue namedExport(cx);
- if (!JS_GetProperty(cx, exports, aExportName, &namedExport)) {
- return NS_ERROR_FAILURE;
- }
- if (!namedExport.isObject()) {
- return NS_ERROR_XPC_BAD_CONVERT_JS;
- }
- exports.set(&namedExport.toObject());
- }
-
- return nsXPConnect::XPConnect()->WrapJS(cx, exports, aIID, aResult);
-}
-
nsresult ImportESModule(const char* aURI, const char* aExportName,
const nsIID& aIID, void** aResult, bool aInfallible) {
AutoJSAPI jsapi;
diff --git a/js/xpconnect/loader/nsImportModule.h b/js/xpconnect/loader/nsImportModule.h
index 31f6f8c7c1..16a5c40a88 100644
--- a/js/xpconnect/loader/nsImportModule.h
+++ b/js/xpconnect/loader/nsImportModule.h
@@ -16,124 +16,12 @@
namespace mozilla {
namespace loader {
-nsresult ImportModule(const char* aURI, const char* aExportName,
- const nsIID& aIID, void** aResult, bool aInfallible);
-
nsresult ImportESModule(const char* aURI, const char* aExportName,
const nsIID& aIID, void** aResult, bool aInfallible);
} // namespace loader
} // namespace mozilla
-class MOZ_STACK_CLASS nsImportModule final : public nsCOMPtr_helper {
- public:
- nsImportModule(const char* aURI, const char* aExportName, nsresult* aErrorPtr,
- bool aInfallible)
- : mURI(aURI),
- mExportName(aExportName),
- mErrorPtr(aErrorPtr),
- mInfallible(aInfallible) {
- MOZ_ASSERT_IF(mErrorPtr, !mInfallible);
- }
-
- virtual nsresult NS_FASTCALL operator()(const nsIID& aIID,
- void** aResult) const override {
- nsresult rv = ::mozilla::loader::ImportModule(mURI, mExportName, aIID,
- aResult, mInfallible);
- if (mErrorPtr) {
- *mErrorPtr = rv;
- }
- return rv;
- }
-
- private:
- const char* mURI;
- const char* mExportName;
- nsresult* mErrorPtr;
- bool mInfallible;
-};
-
-/**
- * These helpers make it considerably easier for C++ code to import a JS module
- * and wrap it in an appropriately-defined XPIDL interface for its exports.
- * Typical usage is something like:
- *
- * Foo.jsm:
- *
- * var EXPORTED_SYMBOLS = ["foo"];
- *
- * function foo(bar) {
- * return bar.toString();
- * }
- *
- * mozIFoo.idl:
- *
- * interface mozIFoo : nsISupports {
- * AString foo(double meh);
- * }
- *
- * Thing.cpp:
- *
- * nsCOMPtr<mozIFoo> foo = do_ImportModule(
- * "resource://meh/Foo.jsm");
- *
- * MOZ_TRY(foo->Foo(42));
- *
- * For JS modules which export all fields within a single named object, a second
- * argument can be passed naming that object.
- *
- * Foo.jsm:
- *
- * var EXPORTED_SYMBOLS = ["Foo"];
- *
- * var Foo = {
- * function foo(bar) {
- * return bar.toString();
- * }
- * };
- *
- * Thing.cpp:
- *
- * nsCOMPtr<mozIFoo> foo = do_ImportModule(
- * "resource:://meh/Foo.jsm", "Foo");
- */
-
-template <size_t N>
-inline nsImportModule do_ImportModule(const char (&aURI)[N]) {
- return {aURI, nullptr, nullptr, /* infallible */ true};
-}
-
-template <size_t N>
-inline nsImportModule do_ImportModule(const char (&aURI)[N],
- const mozilla::fallible_t&) {
- return {aURI, nullptr, nullptr, /* infallible */ false};
-}
-
-template <size_t N>
-inline nsImportModule do_ImportModule(const char (&aURI)[N], nsresult* aRv) {
- return {aURI, nullptr, aRv, /* infallible */ false};
-}
-
-template <size_t N, size_t N2>
-inline nsImportModule do_ImportModule(const char (&aURI)[N],
- const char (&aExportName)[N2]) {
- return {aURI, aExportName, nullptr, /* infallible */ true};
-}
-
-template <size_t N, size_t N2>
-inline nsImportModule do_ImportModule(const char (&aURI)[N],
- const char (&aExportName)[N2],
- const mozilla::fallible_t&) {
- return {aURI, aExportName, nullptr, /* infallible */ false};
-}
-
-template <size_t N, size_t N2>
-inline nsImportModule do_ImportModule(const char (&aURI)[N],
- const char (&aExportName)[N2],
- nsresult* aRv) {
- return {aURI, aExportName, aRv, /* infallible */ false};
-}
-
class MOZ_STACK_CLASS nsImportESModule final : public nsCOMPtr_helper {
public:
nsImportESModule(const char* aURI, const char* aExportName,
diff --git a/js/xpconnect/src/Sandbox.cpp b/js/xpconnect/src/Sandbox.cpp
index 77dbf2d02e..3e931320a9 100644
--- a/js/xpconnect/src/Sandbox.cpp
+++ b/js/xpconnect/src/Sandbox.cpp
@@ -50,6 +50,7 @@
#include "mozilla/dom/DOMParserBinding.h"
#include "mozilla/dom/DOMTokenListBinding.h"
#include "mozilla/dom/ElementBinding.h"
+#include "mozilla/dom/ElementInternalsBinding.h"
#include "mozilla/dom/EventBinding.h"
#include "mozilla/dom/Exceptions.h"
#include "mozilla/dom/IndexedDatabaseManager.h"
@@ -901,6 +902,8 @@ bool xpc::GlobalProperties::Parse(JSContext* cx, JS::HandleObject obj) {
CSS = true;
} else if (JS_LinearStringEqualsLiteral(nameStr, "CSSRule")) {
CSSRule = true;
+ } else if (JS_LinearStringEqualsLiteral(nameStr, "CustomStateSet")) {
+ CustomStateSet = true;
} else if (JS_LinearStringEqualsLiteral(nameStr, "Document")) {
Document = true;
} else if (JS_LinearStringEqualsLiteral(nameStr, "Directory")) {
@@ -1027,6 +1030,11 @@ bool xpc::GlobalProperties::Define(JSContext* cx, JS::HandleObject obj) {
return false;
}
+ if (CustomStateSet &&
+ !dom::CustomStateSet_Binding::GetConstructorObject(cx)) {
+ return false;
+ }
+
if (Directory && !dom::Directory_Binding::GetConstructorObject(cx))
return false;
diff --git a/js/xpconnect/src/XPCComponents.cpp b/js/xpconnect/src/XPCComponents.cpp
index 9fa4e629aa..25feaf851c 100644
--- a/js/xpconnect/src/XPCComponents.cpp
+++ b/js/xpconnect/src/XPCComponents.cpp
@@ -1777,7 +1777,7 @@ nsXPCComponents_Utils::GetFunctionSourceLocation(HandleValue funcValue,
NS_ENSURE_TRUE(func, NS_ERROR_INVALID_ARG);
RootedScript script(cx, JS_GetFunctionScript(cx, func));
- NS_ENSURE_TRUE(func, NS_ERROR_FAILURE);
+ NS_ENSURE_TRUE(script, NS_ERROR_FAILURE);
AppendUTF8toUTF16(nsDependentCString(JS_GetScriptFilename(script)),
filename);
diff --git a/js/xpconnect/src/XPCConvert.cpp b/js/xpconnect/src/XPCConvert.cpp
index 560ac375d7..c11e4ccdfb 100644
--- a/js/xpconnect/src/XPCConvert.cpp
+++ b/js/xpconnect/src/XPCConvert.cpp
@@ -250,27 +250,18 @@ bool XPCConvert::NativeData2JS(JSContext* cx, MutableHandleValue d,
// almost always ASCII, so the inexact allocations below
// should be fine.
- if (IsUtf8Latin1(*utf8String)) {
- using UniqueLatin1Chars =
- js::UniquePtr<JS::Latin1Char[], JS::FreePolicy>;
-
- UniqueLatin1Chars buffer(static_cast<JS::Latin1Char*>(
- JS_string_malloc(cx, allocLen.value())));
- if (!buffer) {
+ // Is the string buffer is already valid latin1 (i.e. it is ASCII).
+ //
+ // NOTE: XPCStringConvert::UTF8ToJSVal cannot be used here because
+ // it requires valid UTF-8 sequence.
+ if (mozilla::IsAscii(*utf8String)) {
+ nsStringBuffer* buf;
+ if (!XPCStringConvert::Latin1ToJSVal(cx, *utf8String, &buf, d)) {
return false;
}
-
- size_t written = LossyConvertUtf8toLatin1(
- *utf8String, Span(reinterpret_cast<char*>(buffer.get()), len));
- buffer[written] = 0;
-
- // written can never exceed len, so the truncation is OK.
- JSString* str = JS_NewLatin1String(cx, std::move(buffer), written);
- if (!str) {
- return false;
+ if (buf) {
+ buf->AddRef();
}
-
- d.setString(str);
return true;
}
@@ -670,24 +661,7 @@ bool XPCConvert::JSData2Native(JSContext* cx, void* d, HandleValue s,
return true;
}
- JSLinearString* linear = JS_EnsureLinearString(cx, str);
- if (!linear) {
- return false;
- }
-
- size_t utf8Length = JS::GetDeflatedUTF8StringLength(linear);
- if (!rs->SetLength(utf8Length, fallible)) {
- if (pErr) {
- *pErr = NS_ERROR_OUT_OF_MEMORY;
- }
- return false;
- }
-
- mozilla::DebugOnly<size_t> written = JS::DeflateStringToUTF8Buffer(
- linear, mozilla::Span(rs->BeginWriting(), utf8Length));
- MOZ_ASSERT(written == utf8Length);
-
- return true;
+ return AssignJSString(cx, *rs, str);
}
case nsXPTType::T_CSTRING: {
diff --git a/js/xpconnect/src/XPCJSContext.cpp b/js/xpconnect/src/XPCJSContext.cpp
index 7bf574f675..8f3621f9c5 100644
--- a/js/xpconnect/src/XPCJSContext.cpp
+++ b/js/xpconnect/src/XPCJSContext.cpp
@@ -812,11 +812,6 @@ void xpc::SetPrefableContextOptions(JS::ContextOptions& options) {
.setWasmIon(Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_optimizingjit"))
.setWasmBaseline(
Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_baselinejit"))
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, FLAG_FUZZ_ON, SHELL, PREF) \
- .setWasm##NAME(Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_" PREF))
- JS_FOR_WASM_FEATURES(WASM_FEATURE)
-#undef WASM_FEATURE
.setWasmVerbose(Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_verbose"))
.setAsyncStack(Preferences::GetBool(JS_OPTIONS_DOT_STR "asyncstack"))
.setAsyncStackCaptureDebuggeeOnly(Preferences::GetBool(
diff --git a/js/xpconnect/src/XPCJSRuntime.cpp b/js/xpconnect/src/XPCJSRuntime.cpp
index c4d272b950..fd495ec964 100644
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -8,13 +8,13 @@
#include "mozilla/ArrayUtils.h"
#include "mozilla/AutoRestore.h"
+#include "mozilla/AppShutdown.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/UniquePtr.h"
#include "xpcprivate.h"
#include "xpcpublic.h"
#include "XPCMaps.h"
-#include "XPCWrapper.h"
#include "XPCJSMemoryReporter.h"
#include "XrayWrapper.h"
#include "WrapperFactory.h"
@@ -28,11 +28,9 @@
#include "nsIObserverService.h"
#include "mozilla/dom/Document.h"
#include "nsIRunnable.h"
-#include "nsIPlatformInfo.h"
#include "nsPIDOMWindow.h"
#include "nsPrintfCString.h"
#include "nsScriptSecurityManager.h"
-#include "nsThreadPool.h"
#include "nsWindowSizes.h"
#include "mozilla/BasePrincipal.h"
#include "mozilla/Preferences.h"
@@ -40,6 +38,7 @@
#include "mozilla/Services.h"
#include "mozilla/dom/ScriptLoader.h"
#include "mozilla/dom/ScriptSettings.h"
+#include "mozilla/glean/GleanMetrics.h"
#include "nsContentUtils.h"
#include "nsCCUncollectableMarker.h"
@@ -613,9 +612,13 @@ JSObject* NACScope(JSObject* global) {
return scope;
}
-JSObject* PrivilegedJunkScope() { return XPCJSRuntime::Get()->LoaderGlobal(); }
+JSObject* PrivilegedJunkScope() {
+ return mozJSModuleLoader::Get()->GetSharedGlobal();
+}
-JSObject* CompilationScope() { return XPCJSRuntime::Get()->LoaderGlobal(); }
+JSObject* CompilationScope() {
+ return mozJSModuleLoader::Get()->GetSharedGlobal();
+}
nsGlobalWindowInner* WindowOrNull(JSObject* aObj) {
MOZ_ASSERT(aObj);
@@ -1454,6 +1457,9 @@ static void ReportZoneStats(const JS::ZoneStats& zStats,
zStats.regExpSharedsMallocHeap,
"Shared compiled regexp data.");
+ ZRREPORT_BYTES(pathPrefix + "zone-object"_ns, zStats.zoneObject,
+ "The JS::Zone object itself.");
+
ZRREPORT_BYTES(pathPrefix + "regexp-zone"_ns, zStats.regexpZone,
"The regexp zone and regexp data.");
@@ -2638,9 +2644,6 @@ static void SetUseCounterCallback(JSObject* obj, JSUseCounter counter) {
case JSUseCounter::WASM_LEGACY_EXCEPTIONS:
SetUseCounter(obj, eUseCounter_custom_JS_wasm_legacy_exceptions);
break;
- case JSUseCounter::LATE_WEEKDAY:
- SetUseCounter(obj, eUseCounter_custom_JS_late_weekday);
- break;
default:
MOZ_ASSERT_UNREACHABLE("Unexpected JSUseCounter id");
}
@@ -2905,8 +2908,6 @@ void ConstructUbiNode(void* storage, JSObject* ptr) {
}
void XPCJSRuntime::Initialize(JSContext* cx) {
- mLoaderGlobal.init(cx, nullptr);
-
// these jsids filled in later when we have a JSContext to work with.
mStrIDs[0] = JS::PropertyKey::Void();
@@ -3191,20 +3192,6 @@ void XPCJSRuntime::DeleteSingletonScopes() {
sandbox->ReleaseWrapper(sandbox);
mUnprivilegedJunkScope = nullptr;
}
- mLoaderGlobal = nullptr;
-}
-
-JSObject* XPCJSRuntime::LoaderGlobal() {
- if (!mLoaderGlobal) {
- RefPtr loader = mozJSModuleLoader::Get();
-
- dom::AutoJSAPI jsapi;
- jsapi.Init();
-
- mLoaderGlobal = loader->GetSharedGlobal(jsapi.cx());
- MOZ_RELEASE_ASSERT(!JS_IsExceptionPending(jsapi.cx()));
- }
- return mLoaderGlobal;
}
uint32_t GetAndClampCPUCount() {
diff --git a/js/xpconnect/src/XPCShellImpl.cpp b/js/xpconnect/src/XPCShellImpl.cpp
index 38a02e9b2f..b36ba56aed 100644
--- a/js/xpconnect/src/XPCShellImpl.cpp
+++ b/js/xpconnect/src/XPCShellImpl.cpp
@@ -1084,6 +1084,10 @@ int XRE_XPCShellMain(int argc, char** argv, char** envp,
// stability, we should instantiate COM ASAP so that we can ensure that these
// global settings are configured before anything can interfere.
mscom::ProcessRuntime mscom;
+
+# ifdef MOZ_SANDBOX
+ nsAutoString binDirPath;
+# endif
#endif
// The provider needs to outlive the call to shutting down XPCOM.
@@ -1103,6 +1107,11 @@ int XRE_XPCShellMain(int argc, char** argv, char** envp,
return 1;
}
+#if defined(XP_WIN) && defined(MOZ_SANDBOX)
+ // We need the binary directory to initialize the windows sandbox.
+ MOZ_ALWAYS_SUCCEEDS(appDir->GetPath(binDirPath));
+#endif
+
dirprovider.SetAppFile(appFile);
nsCOMPtr<nsIFile> greDir;
@@ -1301,7 +1310,7 @@ int XRE_XPCShellMain(int argc, char** argv, char** envp,
# if defined(MOZ_SANDBOX)
// Required for sandboxed child processes.
if (aShellData->sandboxBrokerServices) {
- SandboxBroker::Initialize(aShellData->sandboxBrokerServices);
+ SandboxBroker::Initialize(aShellData->sandboxBrokerServices, binDirPath);
SandboxBroker::GeckoDependentInitialize();
} else {
NS_WARNING(
diff --git a/js/xpconnect/src/xpcprivate.h b/js/xpconnect/src/xpcprivate.h
index 57a4b1e02e..1e873d9c05 100644
--- a/js/xpconnect/src/xpcprivate.h
+++ b/js/xpconnect/src/xpcprivate.h
@@ -560,7 +560,6 @@ class XPCJSRuntime final : public mozilla::CycleCollectedJSRuntime {
JSObject* UnprivilegedJunkScope(const mozilla::fallible_t&);
bool IsUnprivilegedJunkScope(JSObject*);
- JSObject* LoaderGlobal();
void DeleteSingletonScopes();
@@ -610,7 +609,6 @@ class XPCJSRuntime final : public mozilla::CycleCollectedJSRuntime {
JS::GCSliceCallback mPrevGCSliceCallback;
JS::DoCycleCollectionCallback mPrevDoCycleCollectionCallback;
mozilla::WeakPtr<SandboxPrivate> mUnprivilegedJunkScope;
- JS::PersistentRootedObject mLoaderGlobal;
RefPtr<AsyncFreeSnowWhite> mAsyncSnowWhiteFreer;
friend class XPCJSContext;
@@ -2193,6 +2191,7 @@ struct GlobalProperties {
bool ChromeUtils : 1;
bool CSS : 1;
bool CSSRule : 1;
+ bool CustomStateSet : 1;
bool Directory : 1;
bool Document : 1;
bool DOMException : 1;
diff --git a/js/xpconnect/tests/browser/browser.toml b/js/xpconnect/tests/browser/browser.toml
index c7c72c71e7..59b8e08657 100644
--- a/js/xpconnect/tests/browser/browser.toml
+++ b/js/xpconnect/tests/browser/browser.toml
@@ -9,8 +9,6 @@ support-files = [
"browser_promise_userInteractionHandling.html"
]
-["browser_date_telemetry.js"]
-
["browser_dead_object.js"]
["browser_exception_leak.js"]
@@ -28,4 +26,3 @@ support-files = [
["browser_weak_xpcwjs.js"]
["browser_weak_xpcwn.js"]
-
diff --git a/js/xpconnect/tests/browser/browser_date_telemetry.js b/js/xpconnect/tests/browser/browser_date_telemetry.js
deleted file mode 100644
index b9c653db53..0000000000
--- a/js/xpconnect/tests/browser/browser_date_telemetry.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/.
- */
-
-const triggers = [
- "Sep 26 Tues 1995",
- "Sep 26 1995 Tues",
- "Sep 26 1995 Tues 09:30",
- "Sep 26 1995 09:Tues:30",
- "Sep 26 1995 09:30 Tues GMT",
- "Sep 26 1995 09:30 GMT Tues",
-
- "26 Tues Sep 1995",
- "26 Sep Tues 1995",
- "26 Sep 1995 Tues",
-
- "1995-09-26 Tues",
-
- // Multiple occurences should only trigger 1 counter
- "Sep 26 Tues 1995 Tues",
-];
-const nonTriggers = [
- "Sep 26 1995",
- "Tues Sep 26 1995",
- "Sep Tues 26 1995",
-
- // Invalid format shouldn't trigger the counter
- "Sep 26 Tues 1995 foo",
-];
-
-function getCount() {
- return Glean.useCounterPage.jsLateWeekday.testGetValue() ?? 0;
-}
-
-/**
- * Opens and closes a browser tab with minimal JS code which parses
- * the given Date format.
- */
-async function parseFormat(format, call = "new Date") {
- let newTab = await BrowserTestUtils.openNewForegroundTab(
- gBrowser,
- `data:text/html;charset=utf-8,<script>${call}("${format}")</script>`
- );
- BrowserTestUtils.removeTab(newTab);
-}
-
-add_task(async function test_date_telemetry() {
- let sum = getCount();
-
- // waitForCondition cannot be used to test if nothing has changed,
- // so these tests aren't as reliable as the ones in the next loop.
- // If you encounter an inexplicable failure in any of these tests,
- // debug by adding a delay to the end of the parseFormat function.
- for (const format of nonTriggers) {
- await parseFormat(format);
- const count = getCount();
- is(count, sum, `${format} should not trigger telemetry`);
- sum = count;
- }
-
- for (const [i, format] of triggers.entries()) {
- // Alternate between Date constructor and Date.parse
- await parseFormat(format, ["new Date", "Date.parse"][i % 2]);
- await BrowserTestUtils.waitForCondition(() => getCount() > sum);
- const count = getCount();
- is(count, sum + 1, `${format} should trigger telemetry`);
- sum = count;
- }
-});
diff --git a/js/xpconnect/tests/browser/browser_dead_object.js b/js/xpconnect/tests/browser/browser_dead_object.js
index b8b2dd0688..a79b46bc36 100644
--- a/js/xpconnect/tests/browser/browser_dead_object.js
+++ b/js/xpconnect/tests/browser/browser_dead_object.js
@@ -19,13 +19,10 @@ add_task(async function test() {
let { TestUtils } = ChromeUtils.importESModule(
"resource://testing-common/TestUtils.sys.mjs"
);
- let promise = TestUtils.topicObserved(
- "inner-window-nuked",
- (subject, data) => {
- let id = subject.QueryInterface(Ci.nsISupportsPRUint64).data;
- return id == args.innerWindowId;
- }
- );
+ let promise = TestUtils.topicObserved("inner-window-nuked", subject => {
+ let id = subject.QueryInterface(Ci.nsISupportsPRUint64).data;
+ return id == args.innerWindowId;
+ });
content.location = "http://mochi.test:8888/";
await promise;
return Cu.isDeadWrapper(doc);
diff --git a/js/xpconnect/tests/browser/browser_exception_leak.js b/js/xpconnect/tests/browser/browser_exception_leak.js
index be860355bc..80358758ec 100644
--- a/js/xpconnect/tests/browser/browser_exception_leak.js
+++ b/js/xpconnect/tests/browser/browser_exception_leak.js
@@ -40,13 +40,10 @@ add_task(async function test() {
// eslint-disable-next-line no-unused-vars
let doc = content.document;
- let promise = TestUtils.topicObserved(
- "inner-window-nuked",
- (subject, data) => {
- let id = subject.QueryInterface(Ci.nsISupportsPRUint64).data;
- return id == args.innerWindowId;
- }
- );
+ let promise = TestUtils.topicObserved("inner-window-nuked", subject => {
+ let id = subject.QueryInterface(Ci.nsISupportsPRUint64).data;
+ return id == args.innerWindowId;
+ });
content.location = "http://mochi.test:8888/";
await promise;
diff --git a/js/xpconnect/tests/chrome/test_bug799348.xhtml b/js/xpconnect/tests/chrome/test_bug799348.xhtml
index 91de48164f..99e36eaae9 100644
--- a/js/xpconnect/tests/chrome/test_bug799348.xhtml
+++ b/js/xpconnect/tests/chrome/test_bug799348.xhtml
@@ -22,10 +22,10 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=799348
var gCalledOnload = false;
var myObserver = {
QueryInterface: ChromeUtils.generateQI(["nsIObserver"]),
- observe(win, topic, data) {
+ observe(win, topic) {
if (topic == "domwindowopened") {
ok(!gCalledOnload, "domwindowopened notification fired before onload");
- win.addEventListener("load", function(evt) {
+ win.addEventListener("load", function() {
gCalledOnload = true;
win.close();
});
diff --git a/js/xpconnect/tests/chrome/test_cows.xhtml b/js/xpconnect/tests/chrome/test_cows.xhtml
index 69d7d3e9e6..c6e27a2c47 100644
--- a/js/xpconnect/tests/chrome/test_cows.xhtml
+++ b/js/xpconnect/tests/chrome/test_cows.xhtml
@@ -91,7 +91,7 @@ function COWTests() {
});
// Test function objects.
- var func = function(x) { return 42; };
+ var func = function() { return 42; };
func.foo = "foo property";
var funcCOW = getCOW(func);
try {
diff --git a/js/xpconnect/tests/chrome/test_windowProxyDeadWrapper.html b/js/xpconnect/tests/chrome/test_windowProxyDeadWrapper.html
index a6f0ac95be..234a38ebeb 100644
--- a/js/xpconnect/tests/chrome/test_windowProxyDeadWrapper.html
+++ b/js/xpconnect/tests/chrome/test_windowProxyDeadWrapper.html
@@ -51,7 +51,7 @@ async function go() {
// once the window is destroyed.
frame.remove();
- TestUtils.topicObserved("outer-window-nuked", (subject, data) => {
+ TestUtils.topicObserved("outer-window-nuked", (subject) => {
let id = subject.QueryInterface(Ci.nsISupportsPRUint64).data;
return id == winID;
}).then(() => {
diff --git a/js/xpconnect/tests/chrome/test_xrayToJS.xhtml b/js/xpconnect/tests/chrome/test_xrayToJS.xhtml
index cc009a2d55..9943055aea 100644
--- a/js/xpconnect/tests/chrome/test_xrayToJS.xhtml
+++ b/js/xpconnect/tests/chrome/test_xrayToJS.xhtml
@@ -360,7 +360,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=933681
return typedArrayClasses.includes(classname);
}
- function propertyIsGetter(obj, name, classname) {
+ function propertyIsGetter(obj, name) {
return !!Object.getOwnPropertyDescriptor(obj, name).get;
}
@@ -800,7 +800,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=933681
checkThrows(function() { trickyObject.hasOwnProperty = 33; }, /shadow/,
"Should reject shadowing of pre-existing inherited properties over Xrays");
- checkThrows(function() { Object.defineProperty(trickyObject, 'rejectedProp', { get() {}}); },
+ checkThrows(function() { Object.defineProperty(trickyObject, 'rejectedProp', { get() { return undefined; }}); },
/accessor property/, "Should reject accessor property definition");
}
@@ -1058,7 +1058,7 @@ for (var prop of props) {
is(t.delete(null), true, "Key null can be deleted");
let values = [];
- t.forEach((value, key) => values.push(value));
+ t.forEach((value) => values.push(value));
is(values.toString(), "a,5", "forEach enumerates values correctly");
t.clear();
diff --git a/js/xpconnect/tests/components/native/moz.build b/js/xpconnect/tests/components/native/moz.build
index ba3d227c5b..662cfbcc85 100644
--- a/js/xpconnect/tests/components/native/moz.build
+++ b/js/xpconnect/tests/components/native/moz.build
@@ -14,7 +14,6 @@ UNIFIED_SOURCES += [
"xpctest_esmreturncode.cpp",
"xpctest_module.cpp",
"xpctest_params.cpp",
- "xpctest_returncode.cpp",
]
LOCAL_INCLUDES += [
diff --git a/js/xpconnect/tests/components/native/xpctest_module.cpp b/js/xpconnect/tests/components/native/xpctest_module.cpp
index 42b4259d5b..11200240b1 100644
--- a/js/xpconnect/tests/components/native/xpctest_module.cpp
+++ b/js/xpconnect/tests/components/native/xpctest_module.cpp
@@ -35,8 +35,6 @@ nsresult xpcTestRegisterComponents() {
"@mozilla.org/js/xpc/test/native/ObjectReadWrite;1"));
MOZ_TRY(RegisterFactory<nsXPCTestParams>(
"@mozilla.org/js/xpc/test/native/Params;1"));
- MOZ_TRY(RegisterFactory<nsXPCTestReturnCodeParent>(
- "@mozilla.org/js/xpc/test/native/ReturnCodeParent;1"));
MOZ_TRY(RegisterFactory<nsXPCTestESMReturnCodeParent>(
"@mozilla.org/js/xpc/test/native/ESMReturnCodeParent;1"));
MOZ_TRY(RegisterFactory<xpcTestCEnums>(
diff --git a/js/xpconnect/tests/components/native/xpctest_private.h b/js/xpconnect/tests/components/native/xpctest_private.h
index c5d7bc86cf..d2d12d4ef8 100644
--- a/js/xpconnect/tests/components/native/xpctest_private.h
+++ b/js/xpconnect/tests/components/native/xpctest_private.h
@@ -67,17 +67,6 @@ class nsXPCTestParams final : public nsIXPCTestParams {
~nsXPCTestParams() = default;
};
-class nsXPCTestReturnCodeParent final : public nsIXPCTestReturnCodeParent {
- public:
- NS_DECL_ISUPPORTS
- NS_DECL_NSIXPCTESTRETURNCODEPARENT
-
- nsXPCTestReturnCodeParent() = default;
-
- private:
- ~nsXPCTestReturnCodeParent() = default;
-};
-
class nsXPCTestESMReturnCodeParent final : public nsIXPCTestReturnCodeParent {
public:
NS_DECL_ISUPPORTS
diff --git a/js/xpconnect/tests/components/native/xpctest_returncode.cpp b/js/xpconnect/tests/components/native/xpctest_returncode.cpp
deleted file mode 100644
index 3a52f616d9..0000000000
--- a/js/xpconnect/tests/components/native/xpctest_returncode.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "xpctest_private.h"
-#include "nsComponentManagerUtils.h"
-#include "nsImportModule.h"
-
-NS_IMPL_ISUPPORTS(nsXPCTestReturnCodeParent, nsIXPCTestReturnCodeParent)
-
-NS_IMETHODIMP nsXPCTestReturnCodeParent::CallChild(int32_t childBehavior,
- nsresult* _retval) {
- nsresult rv;
- nsCOMPtr<nsIXPCTestReturnCodeChild> child(do_ImportModule(
- "resource://test/ReturnCodeChild.jsm", "ReturnCodeChild", &rv));
- NS_ENSURE_SUCCESS(rv, rv);
- rv = child->DoIt(childBehavior);
- *_retval = rv;
- return NS_OK;
-}
diff --git a/js/xpconnect/tests/idl/xpctest_esmreturncode.idl b/js/xpconnect/tests/idl/xpctest_esmreturncode.idl
deleted file mode 100644
index ac17feda3f..0000000000
--- a/js/xpconnect/tests/idl/xpctest_esmreturncode.idl
+++ /dev/null
@@ -1,45 +0,0 @@
-/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-/**
- * Test the use of Components.returnCode with system ESM
- *
- * This ("parent") interface defines a method that in-turn calls another
- * ("child") interface implemented in JS, and returns the nsresult from that
- * child interface. The child interface manages the return code by way of
- * Components.returnCode.
- */
-
-#include "nsISupports.idl"
-
-
-[scriptable, uuid(494f9336-ad06-46ad-bbb4-b0010e27e12d)]
-interface nsIXPCTestESMReturnCodeParent : nsISupports {
- // Calls the "child" interface with the specified behavior flag. Returns
- // the NSRESULT from the child interface.
- nsresult callChild(in long childBehavior);
-};
-
-[scriptable, uuid(dee07408-75d8-4968-a37c-fe0d48ccd1ac)]
-interface nsIXPCTestESMReturnCodeChild : nsISupports {
- void doIt(in long behavior);
-
- // Flags to control that the child does.
- // child will throw a JS exception
- const long CHILD_SHOULD_THROW = 0;
-
- // child will just return normally
- const long CHILD_SHOULD_RETURN_SUCCESS = 1;
-
- // child will return after setting Components.returnCode to NS_ERROR_FAILURE
- const long CHILD_SHOULD_RETURN_RESULTCODE = 2;
-
- // child will set Components.returnCode to NS_ERROR_UNEXPECTED, then create
- // a new component that sets Components.returnCode to NS_ERROR_FAILURE.
- // Our caller should see the NS_ERROR_UNEXPECTED we set rather than the
- // value set later by the "inner" child.
- const long CHILD_SHOULD_NEST_RESULTCODES = 3;
-};
diff --git a/js/xpconnect/tests/idl/xpctest_utils.idl b/js/xpconnect/tests/idl/xpctest_utils.idl
index e59814272b..db135fcbe1 100644
--- a/js/xpconnect/tests/idl/xpctest_utils.idl
+++ b/js/xpconnect/tests/idl/xpctest_utils.idl
@@ -17,3 +17,26 @@ interface nsIXPCTestFunctionInterface : nsISupports {
interface nsIXPCTestUtils : nsISupports {
nsIXPCTestFunctionInterface doubleWrapFunction(in nsIXPCTestFunctionInterface f);
};
+
+/*
+ * Test that non-[scriptable] interfaces and [noscript] members are not
+ * generated for TypeScript bindings.
+ */
+
+[uuid(ddf64cfb-668a-4571-a900-0fe2babb6249)]
+interface nsIXPCTestNotScriptable : nsISupports {
+ // Empty.
+};
+
+[scriptable, uuid(1bbfe703-c67d-4995-b061-564c8a1c39d7)]
+interface nsIXPCTestNoScriptMembers : nsISupports {
+ [noscript]
+ attribute long noscriptProp;
+
+ attribute long exposedProp;
+
+ [noscript]
+ void noscriptMethod(in long arg);
+
+ void exposedMethod(in long arg);
+};
diff --git a/js/xpconnect/tests/unit/ReturnCodeChild.jsm b/js/xpconnect/tests/unit/ReturnCodeChild.jsm
deleted file mode 100644
index bf74453969..0000000000
--- a/js/xpconnect/tests/unit/ReturnCodeChild.jsm
+++ /dev/null
@@ -1,51 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-var EXPORTED_SYMBOLS = ["ReturnCodeChild"];
-
-function xpcWrap(obj, iface) {
- let ifacePointer = Cc[
- "@mozilla.org/supports-interface-pointer;1"
- ].createInstance(Ci.nsISupportsInterfacePointer);
-
- ifacePointer.data = obj;
- return ifacePointer.data.QueryInterface(iface);
-}
-
-var ReturnCodeChild = {
- QueryInterface: ChromeUtils.generateQI(["nsIXPCTestReturnCodeChild"]),
-
- doIt(behaviour) {
- switch (behaviour) {
- case Ci.nsIXPCTestReturnCodeChild.CHILD_SHOULD_THROW:
- throw(new Error("a requested error"));
- case Ci.nsIXPCTestReturnCodeChild.CHILD_SHOULD_RETURN_SUCCESS:
- return;
- case Ci.nsIXPCTestReturnCodeChild.CHILD_SHOULD_RETURN_RESULTCODE:
- Components.returnCode = Cr.NS_ERROR_FAILURE;
- return;
- case Ci.nsIXPCTestReturnCodeChild.CHILD_SHOULD_NEST_RESULTCODES:
- // Use xpconnect to create another instance of *this* component and
- // call that. This way we have crossed the xpconnect bridge twice.
-
- // We set *our* return code early - this should be what is returned
- // to our caller, even though our "inner" component will set it to
- // a different value that we will see (but our caller should not)
- Components.returnCode = Cr.NS_ERROR_UNEXPECTED;
- // call the child asking it to do the .returnCode set.
- let sub = xpcWrap(ReturnCodeChild, Ci.nsIXPCTestReturnCodeChild);
- let childResult = Cr.NS_OK;
- try {
- sub.doIt(Ci.nsIXPCTestReturnCodeChild.CHILD_SHOULD_RETURN_RESULTCODE);
- } catch (ex) {
- childResult = ex.result;
- }
- // write it to the console so the test can check it.
- let consoleService = Cc["@mozilla.org/consoleservice;1"]
- .getService(Ci.nsIConsoleService);
- consoleService.logStringMessage("nested child returned " + childResult);
- return;
- }
- }
-};
diff --git a/js/xpconnect/tests/unit/es6module_devtoolsLoader.sys.mjs b/js/xpconnect/tests/unit/es6module_devtoolsLoader.sys.mjs
index c7de54c82f..c8262f5f5c 100644
--- a/js/xpconnect/tests/unit/es6module_devtoolsLoader.sys.mjs
+++ b/js/xpconnect/tests/unit/es6module_devtoolsLoader.sys.mjs
@@ -7,23 +7,88 @@ export function increment() {
import { object } from "resource://test/es6module_devtoolsLoader.js";
export const importedObject = object;
-const importTrue = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", { loadInDevToolsLoader : true });
-export const importESModuleTrue = importTrue.object;
+const importDevTools = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", { global: "devtools" });
+export const importESModuleDevTools = importDevTools.object;
-const importFalse = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", { loadInDevToolsLoader : false });
-export const importESModuleFalse = importFalse.object;
+const importShared = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", { global: "shared" });
+export const importESModuleShared = importShared.object;
-const importNull = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", {});
-export const importESModuleNull = importNull.object;
+const importCurrent = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", { global: "current" });
+export const importESModuleCurrent = importCurrent.object;
-const importNull2 = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js");
-export const importESModuleNull2 = importNull2.object;
+const importContextual = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", { global: "contextual" });
+export const importESModuleContextual = importContextual.object;
-const lazy = {};
-ChromeUtils.defineESModuleGetters(lazy, {
+let caught = false;
+try {
+ ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js");
+} catch (e) {
+ caught = true;
+}
+export const importESModuleNoOptionFailed1 = caught;
+
+caught = false;
+try {
+ ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader.js", {});
+} catch (e) {
+ caught = true;
+}
+export const importESModuleNoOptionFailed2 = caught;
+
+const lazyDevTools = {};
+ChromeUtils.defineESModuleGetters(lazyDevTools, {
+ object: "resource://test/es6module_devtoolsLoader.js",
+}, { global: "devtools" });
+
+export function importLazyDevTools() {
+ return lazyDevTools.object;
+}
+
+const lazyShared = {};
+ChromeUtils.defineESModuleGetters(lazyShared, {
+ object: "resource://test/es6module_devtoolsLoader.js",
+}, { global: "shared" });
+
+export function importLazyShared() {
+ return lazyShared.object;
+}
+
+const lazyCurrent = {};
+ChromeUtils.defineESModuleGetters(lazyCurrent, {
+ object: "resource://test/es6module_devtoolsLoader.js",
+}, { global: "current" });
+
+export function importLazyCurrent() {
+ return lazyCurrent.object;
+}
+
+const lazyContextual = {};
+ChromeUtils.defineESModuleGetters(lazyContextual, {
object: "resource://test/es6module_devtoolsLoader.js",
-});
+}, { global: "contextual" });
+
+export function importLazyContextual() {
+ return lazyContextual.object;
+}
+
+caught = false;
+try {
+ let lazy = {};
+ ChromeUtils.defineESModuleGetters({}, {
+ object: "resource://test/es6module_devtoolsLoader.js",
+ });
+} catch (e) {
+ caught = true;
+}
+export const importLazyNoOptionFailed1 = caught;
-export function importLazy() {
- return lazy.object;
+caught = false;
+try {
+ let lazy = {};
+ ChromeUtils.defineESModuleGetters({}, {
+ object: "resource://test/es6module_devtoolsLoader.js",
+ }, {});
+} catch (e) {
+ caught = true;
}
+export const importLazyNoOptionFailed2 = caught;
diff --git a/js/xpconnect/tests/unit/import_shared_in_worker.js b/js/xpconnect/tests/unit/import_shared_in_worker.js
index bc92fe26a6..170254fb21 100644
--- a/js/xpconnect/tests/unit/import_shared_in_worker.js
+++ b/js/xpconnect/tests/unit/import_shared_in_worker.js
@@ -24,13 +24,5 @@ onmessage = event => {
caught3 = true;
}
- let caught4 = false;
- try {
- ChromeUtils.importESModule("resource://test/esmified-1.sys.mjs", {
- loadInDevToolsLoader: true,
- });
- } catch (e) {
- caught4 = true;
- }
- postMessage({ caught1, caught2, caught3, caught4 });
+ postMessage({ caught1, caught2, caught3 });
};
diff --git a/js/xpconnect/tests/unit/lazy_shared_in_worker.js b/js/xpconnect/tests/unit/lazy_shared_in_worker.js
index 148cdefb3e..91114e61c4 100644
--- a/js/xpconnect/tests/unit/lazy_shared_in_worker.js
+++ b/js/xpconnect/tests/unit/lazy_shared_in_worker.js
@@ -36,17 +36,5 @@ onmessage = event => {
caught3 = true;
}
- let caught4 = false;
- try {
- const lazy = {};
- ChromeUtils.defineESModuleGetters(lazy, {
- obj: "resource://test/esmified-1.sys.mjs"
- }, {
- loadInDevToolsLoader: true,
- });
- lazy.obj;
- } catch (e) {
- caught4 = true;
- }
- postMessage({ caught1, caught2, caught3, caught4 });
+ postMessage({ caught1, caught2, caught3 });
};
diff --git a/js/xpconnect/tests/unit/test_defineESModuleGetters_options.js b/js/xpconnect/tests/unit/test_defineESModuleGetters_options.js
index 11d282e511..b01580dfa1 100644
--- a/js/xpconnect/tests/unit/test_defineESModuleGetters_options.js
+++ b/js/xpconnect/tests/unit/test_defineESModuleGetters_options.js
@@ -25,29 +25,21 @@ add_task(async function testShared() {
});
add_task(async function testDevTools() {
- const lazy1 = {};
- const lazy2 = {};
-
- ChromeUtils.defineESModuleGetters(lazy1, {
- GetX: "resource://test/esm_lazy-1.sys.mjs",
- }, {
- loadInDevToolsLoader: true,
- });
+ const lazy = {};
- ChromeUtils.defineESModuleGetters(lazy2, {
+ ChromeUtils.defineESModuleGetters(lazy, {
GetX: "resource://test/esm_lazy-1.sys.mjs",
}, {
global: "devtools",
});
- Assert.equal(lazy1.GetX, lazy2.GetX);
+ lazy.GetX; // delazify before import.
const ns = ChromeUtils.importESModule("resource://test/esm_lazy-1.sys.mjs", {
- loadInDevToolsLoader: true,
+ global: "devtools",
});
- Assert.equal(ns.GetX, lazy1.GetX);
- Assert.equal(ns.GetX, lazy2.GetX);
+ Assert.equal(ns.GetX, lazy.GetX);
});
add_task(async function testSandbox() {
diff --git a/js/xpconnect/tests/unit/test_defineESModuleGetters_options_worker.js b/js/xpconnect/tests/unit/test_defineESModuleGetters_options_worker.js
index f1eab22d2b..a21030ac6a 100644
--- a/js/xpconnect/tests/unit/test_defineESModuleGetters_options_worker.js
+++ b/js/xpconnect/tests/unit/test_defineESModuleGetters_options_worker.js
@@ -29,5 +29,4 @@ add_task(async function testSharedInWorker() {
Assert.equal(result.caught1, true);
Assert.equal(result.caught2, true);
Assert.equal(result.caught3, true);
- Assert.equal(result.caught4, true);
});
diff --git a/js/xpconnect/tests/unit/test_import_devtools_loader.js b/js/xpconnect/tests/unit/test_import_devtools_loader.js
index d7e6fe42f6..f3518ca301 100644
--- a/js/xpconnect/tests/unit/test_import_devtools_loader.js
+++ b/js/xpconnect/tests/unit/test_import_devtools_loader.js
@@ -39,19 +39,27 @@ add_task(async function testDevToolsModuleLoader() {
dbg.addDebuggee(nsGlobal);
Assert.ok(true, "The global is accepted by the Debugger API");
- const ns1 = ChromeUtils.importESModule(ESM_URL, { loadInDevToolsLoader : false });
- Assert.equal(ns1, ns, "Passing loadInDevToolsLoader=false from the shared JSM global is equivalent to regular importESModule");
+ const ns1 = ChromeUtils.importESModule(ESM_URL, { global: "shared" });
+ Assert.equal(ns1, ns, "Passing global: 'shared' from the shared JSM global is equivalent to regular importESModule");
info("Test importing in the devtools loader");
- const ns2 = ChromeUtils.importESModule(ESM_URL, { loadInDevToolsLoader: true });
+ const ns2 = ChromeUtils.importESModule(ESM_URL, { global: "devtools" });
Assert.equal(ns2.x, 0, "We get a new module instance with a new incremented number");
Assert.notEqual(ns2, ns, "We imported a new instance of the module");
Assert.notEqual(ns2.importedObject, ns.importedObject, "The two module instances expose distinct objects");
- Assert.equal(ns2.importESModuleTrue, ns2.importedObject, "When using loadInDevToolsLoader:true from a devtools global, we keep loading in the same loader");
- Assert.equal(ns2.importESModuleNull, ns2.importedObject, "When having an undefined loadInDevToolsLoader from a devtools global, we keep loading in the same loader");
- Assert.equal(ns2.importESModuleNull2, ns2.importedObject, "When having no optional argument at all, we keep loading in the same loader");
- Assert.equal(ns2.importESModuleFalse, ns.importedObject, "When passing an explicit loadInDevToolsLoader:false, we load in the shared global, even from a devtools global");
- Assert.equal(ns2.importLazy(), ns2.importedObject, "ChromeUtils.defineESModuleGetters imports will follow the contextual loader");
+ Assert.equal(ns2.importESModuleDevTools, ns2.importedObject, "When using global: 'devtools' from a devtools global, we keep loading in the same loader");
+ Assert.equal(ns2.importESModuleCurrent, ns2.importedObject, "When using global: 'current' from a devtools global, we keep loading in the same loader");
+ Assert.equal(ns2.importESModuleContextual, ns2.importedObject, "When using global: 'contextual' from a devtools global, we keep loading in the same loader");
+ Assert.ok(ns2.importESModuleNoOptionFailed1, "global option is required in DevTools global");
+ Assert.ok(ns2.importESModuleNoOptionFailed2, "global option is required in DevTools global");
+ Assert.equal(ns2.importESModuleShared, ns.importedObject, "When passing global: 'shared', we load in the shared global, even from a devtools global");
+
+ Assert.equal(ns2.importLazyDevTools(), ns2.importedObject, "When using global: 'devtools' from a devtools global, we keep loading in the same loader");
+ Assert.equal(ns2.importLazyCurrent(), ns2.importedObject, "When using global: 'current' from a devtools global, we keep loading in the same loader");
+ Assert.equal(ns2.importLazyContextual(), ns2.importedObject, "When using global: 'contextual' from a devtools global, we keep loading in the same loader");
+ Assert.ok(ns2.importLazyNoOptionFailed1, "global option is required in DevTools global");
+ Assert.ok(ns2.importLazyNoOptionFailed2, "global option is required in DevTools global");
+ Assert.equal(ns2.importLazyShared(), ns.importedObject, "When passing global: 'shared', we load in the shared global, even from a devtools global");
info("When using the devtools loader, we load in a distinct global, but the same compartment");
const ns2Global = Cu.getGlobalForObject(ns2);
@@ -63,12 +71,12 @@ add_task(async function testDevToolsModuleLoader() {
"Global os ESM loaded in the devtools loader can't be inspected by the Debugee");
info("Re-import the same module in the devtools loader");
- const ns3 = ChromeUtils.importESModule(ESM_URL, { loadInDevToolsLoader: true });
+ const ns3 = ChromeUtils.importESModule(ESM_URL, { global: "devtools" });
Assert.equal(ns3, ns2, "We import the exact same module");
Assert.equal(ns3.importedObject, ns2.importedObject, "The two module expose the same objects");
info("Import a module only from the devtools loader");
- const ns4 = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader_only.js", { loadInDevToolsLoader: true });
+ const ns4 = ChromeUtils.importESModule("resource://test/es6module_devtoolsLoader_only.js", { global: "devtools" });
const ns4Global = Cu.getGlobalForObject(ns4);
Assert.equal(ns4Global, ns2Global, "The module is loaded in the same devtools global");
diff --git a/js/xpconnect/tests/unit/test_import_global.js b/js/xpconnect/tests/unit/test_import_global.js
index 9ad4522854..97dbaac90d 100644
--- a/js/xpconnect/tests/unit/test_import_global.js
+++ b/js/xpconnect/tests/unit/test_import_global.js
@@ -14,16 +14,14 @@ add_task(async function testShared() {
});
add_task(async function testDevTools() {
- const ns1 = ChromeUtils.importESModule("resource://test/esmified-1.sys.mjs", {
- loadInDevToolsLoader: true,
- });
+ const ns1 = ChromeUtils.importESModule("resource://test/esmified-1.sys.mjs");
const ns2 = ChromeUtils.importESModule("resource://test/esmified-1.sys.mjs", {
global: "devtools",
});
- Assert.equal(ns1, ns2);
- Assert.equal(ns1.obj, ns2.obj);
+ Assert.notEqual(ns1, ns2);
+ Assert.notEqual(ns1.obj, ns2.obj);
});
add_task(async function testInvalidOptions() {
diff --git a/js/xpconnect/tests/unit/test_import_global_worker.js b/js/xpconnect/tests/unit/test_import_global_worker.js
index 16359a4da4..9000358b67 100644
--- a/js/xpconnect/tests/unit/test_import_global_worker.js
+++ b/js/xpconnect/tests/unit/test_import_global_worker.js
@@ -17,5 +17,4 @@ add_task(async function testSharedInWorker() {
Assert.equal(result.caught1, true);
Assert.equal(result.caught2, true);
Assert.equal(result.caught3, true);
- Assert.equal(result.caught4, true);
});
diff --git a/js/xpconnect/tests/unit/test_returncode.js b/js/xpconnect/tests/unit/test_returncode.js
index de4289c013..31997eb4ad 100644
--- a/js/xpconnect/tests/unit/test_returncode.js
+++ b/js/xpconnect/tests/unit/test_returncode.js
@@ -14,10 +14,6 @@ function run_test() {
// Load the component manifests.
registerXPCTestComponents();
- // and the tests.
- test_simple("@mozilla.org/js/xpc/test/native/ReturnCodeParent;1");
- test_nested("@mozilla.org/js/xpc/test/native/ReturnCodeParent;1");
-
test_simple("@mozilla.org/js/xpc/test/native/ESMReturnCodeParent;1");
test_nested("@mozilla.org/js/xpc/test/native/ESMReturnCodeParent;1");
}
diff --git a/js/xpconnect/tests/unit/xpcshell.toml b/js/xpconnect/tests/unit/xpcshell.toml
index 97b2dbe559..37274eba96 100644
--- a/js/xpconnect/tests/unit/xpcshell.toml
+++ b/js/xpconnect/tests/unit/xpcshell.toml
@@ -15,7 +15,6 @@ support-files = [
"importer.jsm",
"recursive_importA.jsm",
"recursive_importB.jsm",
- "ReturnCodeChild.jsm",
"ReturnCodeChild.sys.mjs",
"syntax_error.jsm",
"uninitialized_lexical.jsm",