summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--js/src/jit-test/lib/pretenure.js18
-rw-r--r--js/src/jit-test/tests/arrays/sort-trampoline.js153
-rw-r--r--js/src/jit-test/tests/basic/bug1875795.js7
-rw-r--r--js/src/jit-test/tests/basic/bug1888746.js12
-rw-r--r--js/src/jit-test/tests/basic/bug1890200.js12
-rw-r--r--js/src/jit-test/tests/cacheir/bug1888346.js8
-rw-r--r--js/src/jit-test/tests/collections/bug-1884927.js10
-rw-r--r--js/src/jit-test/tests/collections/bug-1885775.js12
-rw-r--r--js/src/jit-test/tests/collections/bug-1887939-1.js7
-rw-r--r--js/src/jit-test/tests/collections/bug-1887939-2.js7
-rw-r--r--js/src/jit-test/tests/debug/Debugger-onNativeCall-03.js18
-rw-r--r--js/src/jit-test/tests/debug/Environment-methods-toPrimitive.js21
-rw-r--r--js/src/jit-test/tests/debug/Frame-onStep-21.js19
-rw-r--r--js/src/jit-test/tests/debug/private-methods-eval-in-frame.js9
-rw-r--r--js/src/jit-test/tests/errors/bug-1886940-2.js6
-rw-r--r--js/src/jit-test/tests/errors/bug-1886940.js2
-rw-r--r--js/src/jit-test/tests/fuses/optimized-getiterator-invalidation.js37
-rw-r--r--js/src/jit-test/tests/gc/alllcation-metadata-builder-over-recursion.js22
-rw-r--r--js/src/jit-test/tests/gc/bug-1568740.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1569840.js2
-rw-r--r--js/src/jit-test/tests/gc/bug-1885819-2.js12
-rw-r--r--js/src/jit-test/tests/gc/bug-1885819.js10
-rw-r--r--js/src/jit-test/tests/gc/bug-1886466.js5
-rw-r--r--js/src/jit-test/tests/gc/bug-1888717.js3
-rw-r--r--js/src/jit-test/tests/gc/dedupe-03.js66
-rw-r--r--js/src/jit-test/tests/gc/deduplicateTenuringStrings.js1
-rw-r--r--js/src/jit-test/tests/gc/gcparam.js1
-rw-r--r--js/src/jit-test/tests/gc/pretenuring.js1
-rw-r--r--js/src/jit-test/tests/heap-analysis/byteSize-of-string.js87
-rw-r--r--js/src/jit-test/tests/ion/apply-native-arguments-object.js46
-rw-r--r--js/src/jit-test/tests/ion/apply-native-arguments.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-array.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadcall-arguments.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadcall-array.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadcall-rest.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadnew-arguments.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadnew-array.js39
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadnew-newtarget.js66
-rw-r--r--js/src/jit-test/tests/ion/apply-native-spreadnew-rest.js39
-rw-r--r--js/src/jit-test/tests/ion/recover-atomics-islockfree.js25
-rw-r--r--js/src/jit-test/tests/ion/recover-string-from-charcode.js13
-rw-r--r--js/src/jit-test/tests/modules/bug-1888902.js16
-rw-r--r--js/src/jit-test/tests/modules/dynamic-import-error.js2
-rw-r--r--js/src/jit-test/tests/modules/dynamic-import-module.js2
-rw-r--r--js/src/jit-test/tests/modules/inline-data-2.js12
-rw-r--r--js/src/jit-test/tests/modules/inline-data.js13
-rw-r--r--js/src/jit-test/tests/modules/shell-wrapper.js7
-rw-r--r--js/src/jit-test/tests/parser/bug1887176.js46
-rw-r--r--js/src/jit-test/tests/parser/dumpStencil-02.js8
-rw-r--r--js/src/jit-test/tests/parser/module-filename.js13
-rw-r--r--js/src/jit-test/tests/profiler/native-trampoline-2.js7
-rw-r--r--js/src/jit-test/tests/profiler/native-trampoline-3.js32
-rw-r--r--js/src/jit-test/tests/profiler/native-trampoline.js40
-rw-r--r--js/src/jit-test/tests/profiler/wasm-to-js-1.js20
-rw-r--r--js/src/jit-test/tests/profiler/wasm-to-js-2.js19
-rw-r--r--js/src/jit-test/tests/promise/allSettled-dead.js20
-rw-r--r--js/src/jit-test/tests/promise/jobqueue-interrupt-01.js23
-rw-r--r--js/src/jit-test/tests/promise/jobqueue-interrupt-02.js14
-rw-r--r--js/src/jit-test/tests/proxy/bug1885774.js25
-rw-r--r--js/src/jit-test/tests/structured-clone/bug1888727.js21
-rw-r--r--js/src/jit-test/tests/structured-clone/tenuring.js3
-rw-r--r--js/src/jit-test/tests/typedarray/resizable-typedarray-from-pinned-buffer.js9
-rw-r--r--js/src/jit-test/tests/warp/bug1876425.js62
-rw-r--r--js/src/jit-test/tests/wasm/directiveless/bug1877358.js2
-rw-r--r--js/src/jit-test/tests/wasm/gc/casting.js69
-rw-r--r--js/src/jit-test/tests/wasm/gc/i31ref.js18
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1886870.js8
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1887535.js25
-rw-r--r--js/src/jit-test/tests/wasm/regress/bug1887596.js14
-rw-r--r--js/src/jit/ABIFunctionList-inl.h2
-rw-r--r--js/src/jit/BaselineBailouts.cpp26
-rw-r--r--js/src/jit/BaselineFrame.h4
-rw-r--r--js/src/jit/CacheIR.cpp15
-rw-r--r--js/src/jit/CacheIR.h6
-rw-r--r--js/src/jit/CacheIRCompiler.cpp39
-rw-r--r--js/src/jit/CacheIROps.yaml3
-rw-r--r--js/src/jit/CacheIRReader.h24
-rw-r--r--js/src/jit/CodeGenerator.cpp552
-rw-r--r--js/src/jit/CodeGenerator.h38
-rw-r--r--js/src/jit/Ion.cpp16
-rw-r--r--js/src/jit/JSJitFrameIter.cpp93
-rw-r--r--js/src/jit/JSJitFrameIter.h49
-rw-r--r--js/src/jit/JitFrames.cpp68
-rw-r--r--js/src/jit/JitFrames.h11
-rw-r--r--js/src/jit/JitOptions.cpp3
-rw-r--r--js/src/jit/JitRuntime.h30
-rw-r--r--js/src/jit/LIROps.yaml18
-rw-r--r--js/src/jit/Lowering.cpp134
-rw-r--r--js/src/jit/MIR.cpp65
-rw-r--r--js/src/jit/MIR.h3
-rw-r--r--js/src/jit/MIROps.yaml12
-rw-r--r--js/src/jit/MacroAssembler.cpp20
-rw-r--r--js/src/jit/MacroAssembler.h1
-rw-r--r--js/src/jit/PerfSpewer.cpp6
-rw-r--r--js/src/jit/ProcessExecutableMemory.cpp12
-rw-r--r--js/src/jit/Recover.cpp370
-rw-r--r--js/src/jit/Recover.h9
-rw-r--r--js/src/jit/Trampoline.cpp36
-rw-r--r--js/src/jit/TrampolineNatives.cpp274
-rw-r--r--js/src/jit/TrampolineNatives.h60
-rw-r--r--js/src/jit/VMFunctionList-inl.h1
-rw-r--r--js/src/jit/VMFunctions.cpp35
-rw-r--r--js/src/jit/VMFunctions.h4
-rw-r--r--js/src/jit/WarpCacheIRTranspiler.cpp10
-rw-r--r--js/src/jit/arm/Architecture-arm.h2
-rw-r--r--js/src/jit/arm64/Architecture-arm64.h2
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.cpp2
-rw-r--r--js/src/jit/loong64/Architecture-loong64.h2
-rw-r--r--js/src/jit/mips32/Architecture-mips32.h2
-rw-r--r--js/src/jit/mips64/Architecture-mips64.h2
-rw-r--r--js/src/jit/moz.build1
-rw-r--r--js/src/jit/none/Architecture-none.h2
-rw-r--r--js/src/jit/riscv64/Architecture-riscv64.h2
-rw-r--r--js/src/jit/shared/LIR-shared.h182
-rw-r--r--js/src/jit/wasm32/Architecture-wasm32.h2
-rw-r--r--js/src/jit/x86-shared/Architecture-x86-shared.h4
116 files changed, 3234 insertions, 602 deletions
diff --git a/js/src/jit-test/lib/pretenure.js b/js/src/jit-test/lib/pretenure.js
index 214f1d44d1..c1184fefc4 100644
--- a/js/src/jit-test/lib/pretenure.js
+++ b/js/src/jit-test/lib/pretenure.js
@@ -22,8 +22,12 @@ function setupPretenureTest() {
gczeal(0);
// Restrict nursery size so we can fill it quicker, and ensure it is resized.
- gcparam("minNurseryBytes", 1024 * 1024);
- gcparam("maxNurseryBytes", 1024 * 1024);
+ let size = 1024 * 1024;
+ if (gcparam("semispaceNurseryEnabled")) {
+ size *= 2;
+ }
+ gcparam("minNurseryBytes", size);
+ gcparam("maxNurseryBytes", size);
// Limit allocation threshold so we trigger major GCs sooner.
gcparam("allocationThreshold", 1 /* MB */);
@@ -67,8 +71,14 @@ function allocateArrays(count, longLived) {
}
function gcCounts() {
- return { minor: gcparam("minorGCNumber"),
- major: gcparam("majorGCNumber") };
+ let major = gcparam("majorGCNumber")
+ let minor = gcparam("minorGCNumber");
+
+ // Only report minor collections that didn't happen as part of a major GC.
+ assertEq(minor >= major, true);
+ minor -= major;
+
+ return { minor, major };
}
function runTestAndCountCollections(thunk) {
diff --git a/js/src/jit-test/tests/arrays/sort-trampoline.js b/js/src/jit-test/tests/arrays/sort-trampoline.js
new file mode 100644
index 0000000000..b81d4628b0
--- /dev/null
+++ b/js/src/jit-test/tests/arrays/sort-trampoline.js
@@ -0,0 +1,153 @@
+function testGC() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ for (var i = 0; i < 20; i++) {
+ arr.sort((x, y) => {
+ if (i === 17) {
+ gc();
+ }
+ return x.n - y.n;
+ });
+ }
+ assertEq(arr.map(x => x.n).join(""), "0135");
+}
+testGC();
+
+function testException() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ var ex;
+ try {
+ for (var i = 0; i < 20; i++) {
+ arr.sort((x, y) => {
+ if (i === 17) {
+ throw "fit";
+ }
+ return x.n - y.n;
+ });
+ }
+ } catch (e) {
+ ex = e;
+ }
+ assertEq(ex, "fit");
+ assertEq(i, 17);
+}
+testException();
+
+function testRectifier() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ for (var i = 0; i < 20; i++) {
+ arr.sort(function(x, y, a) {
+ assertEq(arguments.length, 2);
+ assertEq(a, undefined);
+ return y.n - x.n;
+ });
+ }
+ assertEq(arr.map(x => x.n).join(""), "5310");
+}
+testRectifier();
+
+function testClassConstructor() {
+ var normal = (x, y) => x.n - y.n;
+ var dummy = {};
+ var ctor = (class { constructor(x, y) {
+ assertEq(x, dummy);
+ }});
+ // Warm up the constructor.
+ for (var i = 0; i < 20; i++) {
+ new ctor(dummy, dummy);
+ }
+ for (var i = 0; i < 20; i++) {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ var ex;
+ try {
+ arr.sort(i < 17 ? normal : ctor);
+ } catch (e) {
+ ex = e;
+ }
+ assertEq(ex instanceof TypeError, i >= 17);
+ assertEq(arr.map(x => x.n).join(""), i >= 17 ? "1305" : "0135");
+ }
+}
+testClassConstructor();
+
+function testSwitchRealms() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ var g = newGlobal({sameCompartmentAs: this});
+ g.foo = 123;
+ var comp = g.evaluate(`((x, y) => {
+ assertEq(foo, 123);
+ return x.n - y.n;
+ })`);
+ for (var i = 0; i < 20; i++) {
+ arr.sort(comp);
+ }
+ assertEq(arr.map(x => x.n).join(""), "0135");
+}
+testSwitchRealms();
+
+function testCrossCompartment() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ var g = newGlobal({newCompartment: true});
+ var wrapper = g.evaluate(`((x, y) => {
+ return x.n - y.n;
+ })`);
+ for (var i = 0; i < 20; i++) {
+ arr.sort(wrapper);
+ }
+ assertEq(arr.map(x => x.n).join(""), "0135");
+}
+testCrossCompartment();
+
+function testBound() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ var fun = (function(a, x, y) {
+ "use strict";
+ assertEq(this, null);
+ assertEq(a, 1);
+ return x.n - y.n;
+ }).bind(null, 1);
+ for (var i = 0; i < 20; i++) {
+ arr.sort(fun);
+ }
+ assertEq(arr.map(x => x.n).join(""), "0135");
+}
+testBound();
+
+function testExtraArgs() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ var cmp = (x, y) => x.n - y.n;
+ for (var i = 0; i < 20; i++) {
+ arr.sort(cmp, cmp, cmp, cmp, cmp, cmp, cmp);
+ }
+ assertEq(arr.map(x => x.n).join(""), "0135");
+}
+testExtraArgs();
+
+function testBailout() {
+ var arr = [{n: 1}, {n: 3}, {n: 0}, {n: 5}];
+ for (var i = 0; i < 110; i++) {
+ arr.sort(function(x, y) {
+ if (i === 108) {
+ bailout();
+ }
+ return x.n - y.n;
+ });
+ }
+ assertEq(arr.map(x => x.n).join(""), "0135");
+}
+testBailout();
+
+function testExceptionHandlerSwitchRealm() {
+ var g = newGlobal({sameCompartmentAs: this});
+ for (var i = 0; i < 25; i++) {
+ var ex = null;
+ try {
+ g.Array.prototype.toSorted.call([2, 3], () => {
+ throw "fit";
+ });
+ } catch (e) {
+ ex = e;
+ }
+ assertEq(ex, "fit");
+ }
+}
+testExceptionHandlerSwitchRealm();
diff --git a/js/src/jit-test/tests/basic/bug1875795.js b/js/src/jit-test/tests/basic/bug1875795.js
new file mode 100644
index 0000000000..1a5b54acfe
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug1875795.js
@@ -0,0 +1,7 @@
+// |jit-test| --fast-warmup; --no-threads; skip-if: !('oomTest' in this)
+oomTest(function() {
+ var o = {};
+ for (var p in this) {
+ o[p] = 1;
+ }
+});
diff --git a/js/src/jit-test/tests/basic/bug1888746.js b/js/src/jit-test/tests/basic/bug1888746.js
new file mode 100644
index 0000000000..8e6d0cd0b9
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug1888746.js
@@ -0,0 +1,12 @@
+function comparator(x, y) {
+ saveStack();
+ return {valueOf: function() {
+ saveStack();
+ return x - y;
+ }};
+}
+for (let i = 0; i < 20; i++) {
+ let arr = [3, 1, 2];
+ arr.sort(comparator);
+ assertEq(arr.toString(), "1,2,3");
+}
diff --git a/js/src/jit-test/tests/basic/bug1890200.js b/js/src/jit-test/tests/basic/bug1890200.js
new file mode 100644
index 0000000000..caa97fcece
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug1890200.js
@@ -0,0 +1,12 @@
+let triggerGC = false;
+let proxy = new Proxy({}, {get: function(target, key) {
+ if (key === "sameCompartmentAs" || key === "sameZoneAs") {
+ triggerGC = true;
+ return newGlobal({newCompartment: true});
+ }
+ if (triggerGC) {
+ gc();
+ triggerGC = false;
+ }
+}});
+newGlobal(proxy);
diff --git a/js/src/jit-test/tests/cacheir/bug1888346.js b/js/src/jit-test/tests/cacheir/bug1888346.js
new file mode 100644
index 0000000000..8e63d86089
--- /dev/null
+++ b/js/src/jit-test/tests/cacheir/bug1888346.js
@@ -0,0 +1,8 @@
+setJitCompilerOption("ion.frequent-bailout-threshold", 1);
+for (let i = 0; i < 49; i++) {
+ (function () {
+ let x = new (function () {})();
+ Object.defineProperty(x, "z", {});
+ x.z;
+ })();
+}
diff --git a/js/src/jit-test/tests/collections/bug-1884927.js b/js/src/jit-test/tests/collections/bug-1884927.js
new file mode 100644
index 0000000000..263d9df8a0
--- /dev/null
+++ b/js/src/jit-test/tests/collections/bug-1884927.js
@@ -0,0 +1,10 @@
+// |jit-test| --enable-symbols-as-weakmap-keys; skip-if: getBuildConfiguration("release_or_beta")
+for (x=0; x<10000; ++x) {
+ try {
+ m13 = new WeakMap;
+ sym = Symbol();
+ m13.set(sym, new Debugger);
+ startgc(1, );
+ } catch (exc) {}
+}
+
diff --git a/js/src/jit-test/tests/collections/bug-1885775.js b/js/src/jit-test/tests/collections/bug-1885775.js
new file mode 100644
index 0000000000..bc14c6d58b
--- /dev/null
+++ b/js/src/jit-test/tests/collections/bug-1885775.js
@@ -0,0 +1,12 @@
+// |jit-test| --enable-symbols-as-weakmap-keys; skip-if: getBuildConfiguration("release_or_beta")
+var code = `
+var m58 = new WeakMap;
+var sym = Symbol();
+m58.set(sym, ({ entry16: 0, length: 1 }));
+function testCompacting() {
+ gcslice(50000);
+}
+testCompacting(2, 100000, 50000);
+`;
+for (x = 0; x < 10000; ++x)
+ evaluate(code);
diff --git a/js/src/jit-test/tests/collections/bug-1887939-1.js b/js/src/jit-test/tests/collections/bug-1887939-1.js
new file mode 100644
index 0000000000..292c44d492
--- /dev/null
+++ b/js/src/jit-test/tests/collections/bug-1887939-1.js
@@ -0,0 +1,7 @@
+var map = new WeakMap();
+var sym = Symbol();
+try {
+ map.set(sym, 1);
+} catch (e) {
+ assertEq(!!e.message.match(/an unregistered symbol/), false);
+}
diff --git a/js/src/jit-test/tests/collections/bug-1887939-2.js b/js/src/jit-test/tests/collections/bug-1887939-2.js
new file mode 100644
index 0000000000..2ec4e4c585
--- /dev/null
+++ b/js/src/jit-test/tests/collections/bug-1887939-2.js
@@ -0,0 +1,7 @@
+// |jit-test| --enable-symbols-as-weakmap-keys; skip-if: getBuildConfiguration("release_or_beta")
+var map = new WeakMap();
+try {
+ map.set(1, 1);
+} catch (e) {
+ assertEq(!!e.message.match(/an unregistered symbol/), true);
+}
diff --git a/js/src/jit-test/tests/debug/Debugger-onNativeCall-03.js b/js/src/jit-test/tests/debug/Debugger-onNativeCall-03.js
index 7e9c0b280a..27ab5b2b23 100644
--- a/js/src/jit-test/tests/debug/Debugger-onNativeCall-03.js
+++ b/js/src/jit-test/tests/debug/Debugger-onNativeCall-03.js
@@ -1,4 +1,5 @@
-// Test onNativeCall's behavior when used with self-hosted functions.
+// Test onNativeCall's behavior when used with self-hosted functions
+// and trampoline natives.
load(libdir + 'eqArrayHelper.js');
@@ -18,13 +19,22 @@ dbg.onNativeCall = f => {
gdbg.executeInGlobal(`
var x = [1,3,2];
+ x.forEach((a) => {print(a)});
x.sort((a, b) => {print(a)});
+ x.sort(print);
`);
assertEqArray(rv, [
- "EnterFrame", "sort",
- "ArraySortCompare/<",
+ "EnterFrame", "forEach",
"EnterFrame", "print",
- "ArraySortCompare/<",
"EnterFrame", "print",
+ "EnterFrame", "print",
+
+ "sort",
+ "EnterFrame","print",
+ "EnterFrame","print",
+
+ "sort",
+ "print",
+ "print"
]);
diff --git a/js/src/jit-test/tests/debug/Environment-methods-toPrimitive.js b/js/src/jit-test/tests/debug/Environment-methods-toPrimitive.js
new file mode 100644
index 0000000000..106728901d
--- /dev/null
+++ b/js/src/jit-test/tests/debug/Environment-methods-toPrimitive.js
@@ -0,0 +1,21 @@
+// removeDebuggee can be called through ToPrimitive while converting the argument
+// passed to Debugger.Environment.{find,getVariable,setVariable} to string.
+
+var g = newGlobal({newCompartment: true});
+g.eval("function f() { debugger; }");
+var dbg = new Debugger();
+var oddball = {[Symbol.toPrimitive]: () => dbg.removeDebuggee(g)};
+
+for (var method of ["find", "getVariable", "setVariable"]) {
+ dbg.addDebuggee(g);
+ dbg.onDebuggerStatement = frame => {
+ var ex;
+ try {
+ frame.environment[method](oddball, oddball);
+ } catch (e) {
+ ex = e;
+ }
+ assertEq(ex.message, "Debugger.Environment is not a debuggee environment");
+ };
+ g.f();
+}
diff --git a/js/src/jit-test/tests/debug/Frame-onStep-21.js b/js/src/jit-test/tests/debug/Frame-onStep-21.js
new file mode 100644
index 0000000000..7bea2e3a95
--- /dev/null
+++ b/js/src/jit-test/tests/debug/Frame-onStep-21.js
@@ -0,0 +1,19 @@
+// |jit-test| error: too much recursion
+
+// Generator closed due to over-recursion shouldn't cause crash around onStep.
+
+async function* foo() {
+ const g = this.newGlobal({sameZoneAs: this});
+ g.Debugger(this).getNewestFrame().onStep = g.evaluate(`(function() {})`);
+ return {};
+}
+function f() {
+ try {
+ f.apply(undefined, f);
+ } catch {
+ drainJobQueue();
+ foo().next();
+ }
+}
+foo().next();
+f();
diff --git a/js/src/jit-test/tests/debug/private-methods-eval-in-frame.js b/js/src/jit-test/tests/debug/private-methods-eval-in-frame.js
index 5122cfa56b..318a36f614 100644
--- a/js/src/jit-test/tests/debug/private-methods-eval-in-frame.js
+++ b/js/src/jit-test/tests/debug/private-methods-eval-in-frame.js
@@ -150,13 +150,6 @@ if ('dis' in this) {
assertEq(b.ef(`var x = () => { return this.#priv(); }; x()`), 12);
assertEq(b.ef(`function x(o) { function y(o) { return o.#priv(); }; return y(o); } x(this)`), 12);
-assertEq(b.ef("B.#smethod()"), 14)
-assertEq(b.ef("B.#unusedmethod()"), 19);
-assertEq(b.ef("B.#unusedgetter"), 10);
-
-b.ef("B.#unusedsetter = 19");
-assertEq(B.setter, 19);
-
assertEq(B.f(), 14);
assertEq(B.sef(`this.#smethod()`), 14);
assertEq(B.sLayerEf(`this.#smethod()`), 14);
@@ -215,4 +208,4 @@ var x = () => {
})();
};
x()
-`), 12); \ No newline at end of file
+`), 12);
diff --git a/js/src/jit-test/tests/errors/bug-1886940-2.js b/js/src/jit-test/tests/errors/bug-1886940-2.js
new file mode 100644
index 0000000000..654071be04
--- /dev/null
+++ b/js/src/jit-test/tests/errors/bug-1886940-2.js
@@ -0,0 +1,6 @@
+oomTest(function () {
+ (function () {
+ var x = [disassemble, new Int8Array(2 ** 8 + 1)];
+ x.shift().apply([], x);
+ })();
+});
diff --git a/js/src/jit-test/tests/errors/bug-1886940.js b/js/src/jit-test/tests/errors/bug-1886940.js
new file mode 100644
index 0000000000..f8d3020d8c
--- /dev/null
+++ b/js/src/jit-test/tests/errors/bug-1886940.js
@@ -0,0 +1,2 @@
+// |jit-test| error: RangeError
+[].with(Symbol.hasInstance);
diff --git a/js/src/jit-test/tests/fuses/optimized-getiterator-invalidation.js b/js/src/jit-test/tests/fuses/optimized-getiterator-invalidation.js
new file mode 100644
index 0000000000..6505f8d023
--- /dev/null
+++ b/js/src/jit-test/tests/fuses/optimized-getiterator-invalidation.js
@@ -0,0 +1,37 @@
+
+const ITERS = 1000;
+
+// A function which when warp compiled should use
+// OptimizedGetIterator elision, and rely on invalidation
+function f(x) {
+ let sum = 0;
+ for (let i = 0; i < ITERS; i++) {
+ const [a, b, c] = x
+ sum = a + b + c;
+ }
+ return sum
+}
+
+// Run the function f 1000 times to warp compile it. Use 4 elements here to ensure
+// the return property of the ArrayIteratorPrototype is called.
+let arr = [1, 2, 3, 4];
+for (let i = 0; i < 1000; i++) {
+ f(arr);
+}
+
+// Initialize the globally scoped counter
+let counter = 0;
+const ArrayIteratorPrototype = Object.getPrototypeOf([][Symbol.iterator]());
+
+// Setting the return property should invalidate the warp script here.
+ArrayIteratorPrototype.return = function () {
+ counter++;
+ return { done: true };
+};
+
+
+// Call f one more time
+f(arr);
+
+// Use assertEq to check the value of counter.
+assertEq(counter, ITERS);
diff --git a/js/src/jit-test/tests/gc/alllcation-metadata-builder-over-recursion.js b/js/src/jit-test/tests/gc/alllcation-metadata-builder-over-recursion.js
new file mode 100644
index 0000000000..67093026b4
--- /dev/null
+++ b/js/src/jit-test/tests/gc/alllcation-metadata-builder-over-recursion.js
@@ -0,0 +1,22 @@
+// |jit-test| allow-unhandlable-oom
+
+// Over-recursion should suppress alloation metadata builder, to avoid another
+// over-recursion while generating an error object for the first over-recursion.
+//
+// This test should catch the error for the "load" testing function's arguments,
+// or crash with unhandlable OOM inside allocation metadata builder.
+
+const g = newGlobal();
+g.enableShellAllocationMetadataBuilder();
+function run() {
+ const g_load = g.load;
+ g_load.toString = run;
+ return g_load(g_load);
+}
+let caught = false;
+try {
+ run();
+} catch (e) {
+ caught = true;
+}
+assertEq(caught, true);
diff --git a/js/src/jit-test/tests/gc/bug-1568740.js b/js/src/jit-test/tests/gc/bug-1568740.js
index 6cc003cb94..5c311b855d 100644
--- a/js/src/jit-test/tests/gc/bug-1568740.js
+++ b/js/src/jit-test/tests/gc/bug-1568740.js
@@ -1,11 +1,11 @@
gczeal(0);
+gcparam("semispaceNurseryEnabled", 0);
function setAndTest(param, value) {
gcparam(param, value);
assertEq(gcparam(param), value);
}
-
// Set a large nursery size.
setAndTest("maxNurseryBytes", 1024*1024);
setAndTest("minNurseryBytes", 1024*1024);
diff --git a/js/src/jit-test/tests/gc/bug-1569840.js b/js/src/jit-test/tests/gc/bug-1569840.js
index 70d28add73..45df339405 100644
--- a/js/src/jit-test/tests/gc/bug-1569840.js
+++ b/js/src/jit-test/tests/gc/bug-1569840.js
@@ -1,5 +1,5 @@
-
gczeal(0);
+gcparam("semispaceNurseryEnabled", 0);
gcparam("maxNurseryBytes", 1024*1024);
gcparam("minNurseryBytes", 1024*1024);
diff --git a/js/src/jit-test/tests/gc/bug-1885819-2.js b/js/src/jit-test/tests/gc/bug-1885819-2.js
new file mode 100644
index 0000000000..a87e4c701a
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1885819-2.js
@@ -0,0 +1,12 @@
+let g = newGlobal();
+function f() {
+ var o = {};
+ o["prop" + Date.now()] = 1;
+ gc();
+ schedulezone("atoms");
+ schedulezone(g);
+ gc("zone");
+ let [x] = [0];
+}
+f();
+oomTest(f);
diff --git a/js/src/jit-test/tests/gc/bug-1885819.js b/js/src/jit-test/tests/gc/bug-1885819.js
new file mode 100644
index 0000000000..8341c3ff52
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1885819.js
@@ -0,0 +1,10 @@
+function f() {
+ var o = {};
+ o["prop" + Date.now()] = 1;
+ gc();
+ schedulezone("atoms");
+ gc("zone");
+ let [x] = [0];
+}
+f();
+oomTest(f);
diff --git a/js/src/jit-test/tests/gc/bug-1886466.js b/js/src/jit-test/tests/gc/bug-1886466.js
new file mode 100644
index 0000000000..4347ea3e6b
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1886466.js
@@ -0,0 +1,5 @@
+gczeal(7, 6)
+a = new WeakSet
+for (let i = 0; i < 200000; i++) {
+ a.add({})
+}
diff --git a/js/src/jit-test/tests/gc/bug-1888717.js b/js/src/jit-test/tests/gc/bug-1888717.js
new file mode 100644
index 0000000000..7e54543994
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1888717.js
@@ -0,0 +1,3 @@
+// |jit-test| --no-ggc
+gcparam("semispaceNurseryEnabled", 1);
+let o = {};
diff --git a/js/src/jit-test/tests/gc/dedupe-03.js b/js/src/jit-test/tests/gc/dedupe-03.js
new file mode 100644
index 0000000000..4e9b4c1bbc
--- /dev/null
+++ b/js/src/jit-test/tests/gc/dedupe-03.js
@@ -0,0 +1,66 @@
+// |jit-test| skip-if: !hasFunction.stringRepresentation
+
+// Test handling of tenured dependent strings pointing to nursery base strings.
+
+gczeal(0);
+
+function makeExtensibleStrFrom(str) {
+ var left = str.substr(0, str.length/2);
+ var right = str.substr(str.length/2, str.length);
+ var ropeStr = left + right;
+ return ensureLinearString(ropeStr);
+}
+
+function repr(s) {
+ return JSON.parse(stringRepresentation(s));
+}
+
+function dependsOn(s1, s2) {
+ const rep1 = JSON.parse(stringRepresentation(s1));
+ const rep2 = JSON.parse(stringRepresentation(s2));
+ return rep1.base && rep1.base.address == rep2.address;
+}
+
+// Make a string to deduplicate to.
+var original = makeExtensibleStrFrom('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklm');
+
+// Construct T1 -> Nbase.
+var Nbase = makeExtensibleStrFrom('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklm');
+var T1 = newDependentString(Nbase, 0, 60, { tenured: true });
+
+// Get prevented from creating T2 -> T1 -> Nbase
+// (will be T2 -> Nbase instead to avoid dependency chains).
+var T2 = newDependentString(T1, 30, { tenured: true });
+
+assertEq(dependsOn(T2, Nbase), "expect: T2 -> base");
+
+// Construct T1 -> Ndep1 (was Nbase) -> Nbase2.
+var Nbase2 = newRope(Nbase, "ABC");
+ensureLinearString(Nbase2);
+var Ndep1 = Nbase;
+
+assertEq(dependsOn(T1, Ndep1), "expect: T1 -> Ndep1");
+assertEq(dependsOn(Ndep1, Nbase2), "expect: Ndep1 -> Nbase2");
+
+// Fail to construct T3 -> Tbase3 -> Nbase4. It will refuse because T3 would be using
+// chars from Nbase4 that can't be updated since T3 is not in the store buffer. Instead,
+// it will allocate a new buffer for the rope root, leaving Tbase3 alone and keeping
+// T3 -> Tbase3.
+var Tbase3 = makeExtensibleStrFrom('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklm');
+minorgc();
+var T3 = newDependentString(Tbase3, 0, 30, { tenured: true });
+var Nbase4 = newRope(Tbase3, "DEF");
+ensureLinearString(Nbase4);
+assertEq(repr(Tbase3).isTenured, true, "Tbase3 is tenured");
+assertEq(repr(Tbase3).flags.includes("EXTENSIBLE"), true, "Tbase3 is extensible");
+assertEq(repr(Nbase4).flags.includes("DEPENDENT_BIT"), false, "expect: Nbase4 is not a dependent string")
+assertEq(repr(T3).flags.includes("DEPENDENT_BIT"), true, "expect: T3 is a dependent string")
+assertEq(dependsOn(T3, Tbase3), "expect: T3 -> Tbase3");
+
+function bug1879918() {
+ const s = JSON.parse('["abcdefabcdefabcdefabcdefabcdefabcdefabcdef"]')[0];
+ const dep = newDependentString(s, 1, { tenured: true });
+ minorgc();
+ assertEq(dep, "bcdefabcdefabcdefabcdefabcdefabcdefabcdef");
+}
+bug1879918();
diff --git a/js/src/jit-test/tests/gc/deduplicateTenuringStrings.js b/js/src/jit-test/tests/gc/deduplicateTenuringStrings.js
index 1b8259cc15..de2fb0c028 100644
--- a/js/src/jit-test/tests/gc/deduplicateTenuringStrings.js
+++ b/js/src/jit-test/tests/gc/deduplicateTenuringStrings.js
@@ -13,6 +13,7 @@
// We require predictable GC timing to make sure the correct
// strings are tenured together.
gczeal(0);
+gcparam('semispaceNurseryEnabled', 0);
var helperCode = `
function makeInlineStr(isLatin1) {
diff --git a/js/src/jit-test/tests/gc/gcparam.js b/js/src/jit-test/tests/gc/gcparam.js
index 05e0359088..c57b400642 100644
--- a/js/src/jit-test/tests/gc/gcparam.js
+++ b/js/src/jit-test/tests/gc/gcparam.js
@@ -60,3 +60,4 @@ testChangeParam("mallocThresholdBase");
testChangeParam("urgentThreshold");
testChangeParam("helperThreadRatio");
testChangeParam("maxHelperThreads");
+testChangeParam("semispaceNurseryEnabled");
diff --git a/js/src/jit-test/tests/gc/pretenuring.js b/js/src/jit-test/tests/gc/pretenuring.js
index 6f20706e9b..30156b0e98 100644
--- a/js/src/jit-test/tests/gc/pretenuring.js
+++ b/js/src/jit-test/tests/gc/pretenuring.js
@@ -1,6 +1,7 @@
// Test nursery string allocation and pretenuring.
gczeal(0);
+gcparam("semispaceNurseryEnabled", 0);
gcparam("minNurseryBytes", 4096 * 1024);
gcparam("maxNurseryBytes", 4096 * 1024);
diff --git a/js/src/jit-test/tests/heap-analysis/byteSize-of-string.js b/js/src/jit-test/tests/heap-analysis/byteSize-of-string.js
index aaac0c4f1f..b4cfdffb04 100644
--- a/js/src/jit-test/tests/heap-analysis/byteSize-of-string.js
+++ b/js/src/jit-test/tests/heap-analysis/byteSize-of-string.js
@@ -12,6 +12,7 @@
// stable.
gczeal(0); // Need to control when tenuring happens
+gcparam('semispaceNurseryEnabled', 0);
// Hack to skip this test if strings are not allocated in the nursery.
{
@@ -75,16 +76,20 @@ function tByteSize(str) {
// JSExternalString - limited by MaxStringLength - E
// JSThinInlineString 8 4 16 8 T
// JSFatInlineString 24 12 24 12 F
+// ThinInlineAtom 12 6 20 10 T
+// FatInlineAtom 20 10 20 10 F
// JSExtensibleString - limited by MaxStringLength - X
// Notes:
// - labels are suffixed with A for atoms and N for non-atoms
-// - atoms are 8 bytes larger than non-atoms, to store the atom's hash code.
+// - atoms store a 4 byte hash code, and some add to the size to adjust
// - Nursery-allocated strings require a header that stores the zone.
// Expected sizes based on type of string
const m32 = (getBuildConfiguration("pointer-byte-size") == 4);
-const TA = m32 ? 24 : 32; // ThinInlineString atom, includes a hash value
+const TA = m32 ? 24 : 32; // ThinInlineAtom (includes a hash value)
+const FA = m32 ? 32 : 32; // FatInlineAtom (includes a hash value)
+const NA = m32 ? 24 : 32; // NormalAtom
const TN = m32 ? 16 : 24; // ThinInlineString
const FN = m32 ? 32 : 32; // FatInlineString
const XN = m32 ? 16 : 24; // ExtensibleString, has additional storage buffer
@@ -95,8 +100,8 @@ const EN = m32 ? 16 : 24; // ExternalString
// A function that pads out a tenured size to the nursery size. We store a zone
// pointer in the nursery just before the string (4 bytes on 32-bit, 8 bytes on
// 64-bit), and the string struct itself must be 8-byte aligned (resulting in
-// +4 bytes on 32-bit, +0 bytes on 64-bit). The end result? Nursery strings are
-// 8 bytes larger.
+// +4 bytes on 32-bit, +0 bytes on 64-bit). The end result is that nursery
+// strings are 8 bytes larger.
const Nursery = m32 ? s => s + 4 + 4 : s => s + 8 + 0;
// Latin-1
@@ -130,6 +135,23 @@ assertEq(nByteSize("123456789.123456789.123456789.1"), s(Nursery(
assertEq(nByteSize("123456789.123456789.123456789.12"), s(Nursery(XN)+32,Nursery(XN)+32));
assertEq(nByteSize("123456789.123456789.123456789.123"), s(Nursery(XN)+64,Nursery(XN)+64));
+function Atom(s) { return Object.keys({ [s]: true })[0]; }
+assertEq(byteSize(Atom("1234567")), s(TA, TA));
+assertEq(byteSize(Atom("12345678")), s(TA, FA));
+assertEq(byteSize(Atom("123456789.12")), s(TA, FA));
+assertEq(byteSize(Atom("123456789.123")), s(FA, FA));
+assertEq(byteSize(Atom("123456789.12345")), s(FA, FA));
+assertEq(byteSize(Atom("123456789.123456")), s(FA, FA));
+assertEq(byteSize(Atom("123456789.1234567")), s(FA, FA));
+assertEq(byteSize(Atom("123456789.123456789.")), s(FA, FA));
+assertEq(byteSize(Atom("123456789.123456789.1")), s(NA+32, NA+32));
+assertEq(byteSize(Atom("123456789.123456789.123")), s(NA+32, NA+32));
+assertEq(byteSize(Atom("123456789.123456789.1234")), s(NA+32, NA+32));
+assertEq(byteSize(Atom("123456789.123456789.12345")), s(NA+32, NA+32));
+assertEq(byteSize(Atom("123456789.123456789.123456789.1")), s(NA+32, NA+32));
+assertEq(byteSize(Atom("123456789.123456789.123456789.12")), s(NA+32, NA+32));
+assertEq(byteSize(Atom("123456789.123456789.123456789.123")), s(NA+48, NA+48));
+
// Inline char16_t atoms.
// "Impassionate gods have never seen the red that is the Tatsuta River."
// - Ariwara no Narihira
@@ -183,20 +205,43 @@ assertEq(byteSize(rope8), s(Nurser
minorgc();
assertEq(byteSize(rope8), s(RN, RN));
var matches8 = rope8.match(/(de cuyo nombre no quiero acordarme)/);
-assertEq(byteSize(rope8), s(XN + 65536, XN + 65536));
+assertEq(byteSize(rope8), s(XN + 64 * 1024, XN + 64 * 1024));
+var ext8 = rope8; // Stop calling it what it's not (though it'll change again soon.)
// Test extensible strings.
//
// Appending another copy of the fragment should yield another rope.
//
-// Flatting that should turn the original rope into a dependent string, and
+// Flattening that should turn the original rope into a dependent string, and
// yield a new linear string, of the same size as the original.
-rope8a = rope8 + fragment8;
+var rope8a = ext8 + fragment8;
assertEq(byteSize(rope8a), s(Nursery(RN), Nursery(RN)));
rope8a.match(/x/, function() { assertEq(true, false); });
assertEq(byteSize(rope8a), s(Nursery(XN) + 65536, Nursery(XN) + 65536));
+assertEq(byteSize(ext8), s(DN, DN));
+
+// Latin-1 dependent strings in the nursery.
+assertEq(byteSize(ext8.substr(1000, 2000)), s(Nursery(DN), Nursery(DN)));
+assertEq(byteSize(matches8[0]), s(Nursery(DN), Nursery(DN)));
+assertEq(byteSize(matches8[1]), s(Nursery(DN), Nursery(DN)));
+
+// Tenure everything and do it again.
+ext8 = copyString(ext8);
+rope8a = ext8 + fragment8;
+minorgc();
+assertEq(byteSize(rope8a), s(RN, RN));
+rope8a.match(/x/, function() { assertEq(true, false); });
+assertEq(byteSize(rope8a), s(XN + 65536, XN + 65536));
assertEq(byteSize(rope8), s(RN, RN));
+// Latin-1 tenured dependent strings.
+function tenure(s) {
+ minorgc();
+ return s;
+}
+assertEq(byteSize(tenure(rope8.substr(1000, 2000))), s(DN, DN));
+assertEq(byteSize(matches8[0]), s(DN, DN));
+assertEq(byteSize(matches8[1]), s(DN, DN));
// A char16_t rope. This changes size when flattened.
// "From the Heliconian Muses let us begin to sing"
@@ -207,13 +252,11 @@ for (var i = 0; i < 10; i++) // 1024 repetitions
rope16 = rope16 + rope16;
assertEq(byteSize(rope16), s(Nursery(RN), Nursery(RN)));
let matches16 = rope16.match(/(Ἑλικωνιάδων ἀρχώμεθ᾽)/);
-assertEq(byteSize(rope16), s(Nursery(RN) + 131072, Nursery(RN) + 131072));
+assertEq(byteSize(rope16), s(Nursery(XN) + 128 * 1024, Nursery(XN) + 128 * 1024));
+var ext16 = rope16;
-// Latin-1 and char16_t dependent strings.
-assertEq(byteSize(rope8.substr(1000, 2000)), s(Nursery(DN), Nursery(DN)));
-assertEq(byteSize(rope16.substr(1000, 2000)), s(Nursery(DN), Nursery(DN)));
-assertEq(byteSize(matches8[0]), s(Nursery(DN), Nursery(DN)));
-assertEq(byteSize(matches8[1]), s(Nursery(DN), Nursery(DN)));
+// char16_t dependent strings in the nursery.
+assertEq(byteSize(ext16.substr(1000, 2000)), s(Nursery(DN), Nursery(DN)));
assertEq(byteSize(matches16[0]), s(Nursery(DN), Nursery(DN)));
assertEq(byteSize(matches16[1]), s(Nursery(DN), Nursery(DN)));
@@ -221,13 +264,23 @@ assertEq(byteSize(matches16[1]), s(Nurser
//
// Appending another copy of the fragment should yield another rope.
//
-// Flatting that should turn the original rope into a dependent string, and
+// Flattening that should turn the original rope into a dependent string, and
// yield a new linear string, of the some size as the original.
-rope16a = rope16 + fragment16;
+rope16a = ext16 + fragment16;
assertEq(byteSize(rope16a), s(Nursery(RN), Nursery(RN)));
rope16a.match(/x/, function() { assertEq(true, false); });
-assertEq(byteSize(rope16a), s(Nursery(XN) + 131072, Nursery(XN) + 131072));
-assertEq(byteSize(rope16), s(Nursery(XN), Nursery(XN)));
+assertEq(byteSize(rope16a), s(Nursery(XN) + 128 * 1024, Nursery(XN) + 128 * 1024));
+assertEq(byteSize(ext16), s(Nursery(DN), Nursery(DN)));
+
+// Tenure everything and try again. This time it should steal the extensible
+// characters and convert the root into an extensible string using them.
+ext16 = copyString(ext16);
+rope16a = ext16 + fragment16;
+minorgc();
+assertEq(byteSize(rope16a), s(RN, RN));
+rope16a.match(/x/, function() { assertEq(true, false); });
+assertEq(byteSize(rope16a), s(XN + 128 * 1024, XN + 128 * 1024));
+assertEq(byteSize(ext16), s(RN, RN));
// Test external strings.
//
diff --git a/js/src/jit-test/tests/ion/apply-native-arguments-object.js b/js/src/jit-test/tests/ion/apply-native-arguments-object.js
new file mode 100644
index 0000000000..e06a5e0965
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-arguments-object.js
@@ -0,0 +1,46 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function escape() {
+ with ({}) ;
+}
+
+function f() {
+ // Let |arguments| escape to force the allocation of an arguments object.
+ escape(arguments);
+
+ // FunApply to a native function with an arguments object.
+ return Array.apply(null, arguments);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-arguments.js b/js/src/jit-test/tests/ion/apply-native-arguments.js
new file mode 100644
index 0000000000..3d6729ca76
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-arguments.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f() {
+ // FunApply to a native function with frame arguments.
+ return Array.apply(null, arguments);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-array.js b/js/src/jit-test/tests/ion/apply-native-array.js
new file mode 100644
index 0000000000..0dfa2df947
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-array.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f(x) {
+ // FunApply to a native function with an array.
+ return Array.apply(null, x);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f(x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadcall-arguments.js b/js/src/jit-test/tests/ion/apply-native-spreadcall-arguments.js
new file mode 100644
index 0000000000..9f769e4a59
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadcall-arguments.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f() {
+ // SpreadCall to a native function with frame arguments.
+ return Array(...arguments);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadcall-array.js b/js/src/jit-test/tests/ion/apply-native-spreadcall-array.js
new file mode 100644
index 0000000000..24e5621484
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadcall-array.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f(x) {
+ // SpreadCall to a native function with an array.
+ return Array(...x);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f(x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadcall-rest.js b/js/src/jit-test/tests/ion/apply-native-spreadcall-rest.js
new file mode 100644
index 0000000000..ba7038244d
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadcall-rest.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f(...x) {
+ // SpreadCall to a native function with rest-args.
+ return Array(...x);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadnew-arguments.js b/js/src/jit-test/tests/ion/apply-native-spreadnew-arguments.js
new file mode 100644
index 0000000000..7e31cdcbd6
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadnew-arguments.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f() {
+ // SpreadNew to a native function with frame arguments.
+ return new Array(...arguments);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadnew-array.js b/js/src/jit-test/tests/ion/apply-native-spreadnew-array.js
new file mode 100644
index 0000000000..5c716f48b4
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadnew-array.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f(x) {
+ // SpreadNew to a native function with an array.
+ return new Array(...x);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f(x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadnew-newtarget.js b/js/src/jit-test/tests/ion/apply-native-spreadnew-newtarget.js
new file mode 100644
index 0000000000..9ffe53277b
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadnew-newtarget.js
@@ -0,0 +1,66 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+class ArrayWithExplicitConstructor extends Array {
+ constructor(...args) {
+ super(...args);
+ }
+}
+
+class ArrayWithImplicitConstructor extends Array {
+ constructor(...args) {
+ super(...args);
+ }
+}
+
+function f(...x) {
+ return new ArrayWithExplicitConstructor(...x);
+}
+
+function g(...x) {
+ return new ArrayWithImplicitConstructor(...x);
+}
+
+// Don't inline |f| and |g| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+ assertEq(Object.getPrototypeOf(result), ArrayWithExplicitConstructor.prototype);
+}
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = g.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+ assertEq(Object.getPrototypeOf(result), ArrayWithImplicitConstructor.prototype);
+}
diff --git a/js/src/jit-test/tests/ion/apply-native-spreadnew-rest.js b/js/src/jit-test/tests/ion/apply-native-spreadnew-rest.js
new file mode 100644
index 0000000000..58de8fa239
--- /dev/null
+++ b/js/src/jit-test/tests/ion/apply-native-spreadnew-rest.js
@@ -0,0 +1,39 @@
+load(libdir + "array-compare.js");
+
+const xs = [
+ // Zero arguments.
+ [],
+
+ // Single argument.
+ [1],
+
+ // Few arguments. Even number of arguments.
+ [1, 2],
+
+ // Few arguments. Odd number of arguments.
+ [1, 2, 3],
+
+ // Many arguments. Even number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 0],
+
+ // Many arguments. Odd number of arguments.
+ [1, 2, 3, 4, 5, 6, 7, 8, 9],
+];
+
+function f(...x) {
+ // SpreadNew to a native function with rest-args.
+ return new Array(...x);
+}
+
+// Don't inline |f| into the top-level script.
+with ({}) ;
+
+for (let i = 0; i < 400; ++i) {
+ let x = xs[i % xs.length];
+
+ // NB: Array(1) creates the array `[,]`.
+ let expected = x.length !== 1 ? x : [,];
+
+ let result = f.apply(null, x);
+ assertEq(arraysEqual(result, expected), true);
+}
diff --git a/js/src/jit-test/tests/ion/recover-atomics-islockfree.js b/js/src/jit-test/tests/ion/recover-atomics-islockfree.js
new file mode 100644
index 0000000000..2a57afd49b
--- /dev/null
+++ b/js/src/jit-test/tests/ion/recover-atomics-islockfree.js
@@ -0,0 +1,25 @@
+// |jit-test| --fast-warmup; --ion-offthread-compile=off
+
+function foo(n, trigger) {
+ let result = Atomics.isLockFree(n * -1);
+ if (trigger) {
+ assertEq(result, false);
+ }
+}
+
+for (var i = 0; i < 100; i++) {
+ foo(-50, false);
+}
+foo(0, true);
+
+function bar(n, trigger) {
+ let result = Atomics.isLockFree(n * 4);
+ if (trigger) {
+ assertEq(result, false);
+ }
+}
+
+for (var i = 0; i < 100; i++) {
+ bar(1, false);
+}
+bar(0x40000001, true);
diff --git a/js/src/jit-test/tests/ion/recover-string-from-charcode.js b/js/src/jit-test/tests/ion/recover-string-from-charcode.js
new file mode 100644
index 0000000000..be060be8e7
--- /dev/null
+++ b/js/src/jit-test/tests/ion/recover-string-from-charcode.js
@@ -0,0 +1,13 @@
+// |jit-test| --fast-warmup; --ion-offthread-compile=off
+
+function foo(n, trigger) {
+ let result = String.fromCharCode(n * -1);
+ if (trigger) {
+ assertEq(result, "\0");
+ }
+}
+
+for (var i = 0; i < 100; i++) {
+ foo(-50, false);
+}
+foo(0, true);
diff --git a/js/src/jit-test/tests/modules/bug-1888902.js b/js/src/jit-test/tests/modules/bug-1888902.js
new file mode 100644
index 0000000000..7804bef98a
--- /dev/null
+++ b/js/src/jit-test/tests/modules/bug-1888902.js
@@ -0,0 +1,16 @@
+// |jit-test| error:Error
+
+const v0 = `
+ function F1() {
+ const v11 = registerModule("module1", parseModule(\`import {} from "module2";
+ import {} from "module3";\`));
+ const v13 = "await 1;";
+ drainJobQueue();
+ registerModule("module2", parseModule(v13));
+ registerModule("module3", parseModule(v0));
+ moduleLink(v11);
+ moduleEvaluate(v11);
+ }
+ F1();
+`;
+eval(v0);
diff --git a/js/src/jit-test/tests/modules/dynamic-import-error.js b/js/src/jit-test/tests/modules/dynamic-import-error.js
index 98a6af75d0..56713c8485 100644
--- a/js/src/jit-test/tests/modules/dynamic-import-error.js
+++ b/js/src/jit-test/tests/modules/dynamic-import-error.js
@@ -1,5 +1,3 @@
-// |jit-test| module
-
let result = null;
let error = null;
let promise = import("nonexistent.js");
diff --git a/js/src/jit-test/tests/modules/dynamic-import-module.js b/js/src/jit-test/tests/modules/dynamic-import-module.js
index 3c004258a3..fa19b74303 100644
--- a/js/src/jit-test/tests/modules/dynamic-import-module.js
+++ b/js/src/jit-test/tests/modules/dynamic-import-module.js
@@ -1,5 +1,3 @@
-// |jit-test| module
-
function testImport(path, name, value) {
let result = null;
let error = null;
diff --git a/js/src/jit-test/tests/modules/inline-data-2.js b/js/src/jit-test/tests/modules/inline-data-2.js
new file mode 100644
index 0000000000..8dbf92574d
--- /dev/null
+++ b/js/src/jit-test/tests/modules/inline-data-2.js
@@ -0,0 +1,12 @@
+let result = null;
+let error = null;
+let promise = import("javascript: export let b = 100;");
+promise.then((ns) => {
+ result = ns;
+}).catch((e) => {
+ error = e;
+});
+
+drainJobQueue();
+assertEq(error, null);
+assertEq(result.b, 100);
diff --git a/js/src/jit-test/tests/modules/inline-data.js b/js/src/jit-test/tests/modules/inline-data.js
index 9c56856f8d..d81da0efe4 100644
--- a/js/src/jit-test/tests/modules/inline-data.js
+++ b/js/src/jit-test/tests/modules/inline-data.js
@@ -2,16 +2,3 @@
import { a } from "javascript: export let a = 42;";
assertEq(a, 42);
-
-let result = null;
-let error = null;
-let promise = import("javascript: export let b = 100;");
-promise.then((ns) => {
- result = ns;
-}).catch((e) => {
- error = e;
-});
-
-drainJobQueue();
-assertEq(error, null);
-assertEq(result.b, 100);
diff --git a/js/src/jit-test/tests/modules/shell-wrapper.js b/js/src/jit-test/tests/modules/shell-wrapper.js
index 1be1c486c6..058e574d4e 100644
--- a/js/src/jit-test/tests/modules/shell-wrapper.js
+++ b/js/src/jit-test/tests/modules/shell-wrapper.js
@@ -1,4 +1,3 @@
-// |jit-test| module
// Test shell ModuleObject wrapper's accessors and methods
load(libdir + "asserts.js");
@@ -49,10 +48,8 @@ const d = registerModule('d', parseModule(`
f();
`));
moduleLink(d);
-try {
- await moduleEvaluate(d);
-} catch (e) {
-}
+moduleEvaluate(d).catch(e => undefined);
+drainJobQueue();
assertEq(d.evaluationError instanceof ReferenceError, true);
testGetter(d, "evaluationError");
diff --git a/js/src/jit-test/tests/parser/bug1887176.js b/js/src/jit-test/tests/parser/bug1887176.js
new file mode 100644
index 0000000000..bea2db519b
--- /dev/null
+++ b/js/src/jit-test/tests/parser/bug1887176.js
@@ -0,0 +1,46 @@
+
+// This tests a case where TokenStreamAnyChars::fillExceptingContext
+// mishandled a wasm frame, leading to an assertion failure.
+
+if (!wasmIsSupported())
+ quit();
+
+const v0 = `
+ const o6 = {
+ f() {
+ function F2() {
+ if (!new.target) { throw 'must be called with new'; }
+ }
+ return F2();
+ return {}; // This can be anything, but it must be present
+ },
+ };
+
+ const o7 = {
+ "main": o6,
+ };
+
+ const v15 = new WebAssembly.Module(wasmTextToBinary(\`
+ (module
+ (import "main" "f" (func))
+ (func (export "go")
+ call 0
+ )
+ )\`));
+ const v16 = new WebAssembly.Instance(v15, o7);
+ v16.exports.go();
+`;
+
+const o27 = {
+ // Both "fileName" and null are necessary
+ "fileName": null,
+};
+
+let caught = false;
+try {
+ evaluate(v0, o27);
+} catch (e) {
+ assertEq(e, "must be called with new");
+ caught = true;
+}
+assertEq(caught, true);
diff --git a/js/src/jit-test/tests/parser/dumpStencil-02.js b/js/src/jit-test/tests/parser/dumpStencil-02.js
new file mode 100644
index 0000000000..e21962c36b
--- /dev/null
+++ b/js/src/jit-test/tests/parser/dumpStencil-02.js
@@ -0,0 +1,8 @@
+let caught = false;
+try {
+ dumpStencil("export var z;", { module : true, lineNumber: 0 });
+} catch (e) {
+ caught = true;
+ assertEq(e.message.includes("Module cannot be compiled with lineNumber == 0"), true);
+}
+assertEq(caught, true);
diff --git a/js/src/jit-test/tests/parser/module-filename.js b/js/src/jit-test/tests/parser/module-filename.js
new file mode 100644
index 0000000000..59017dd674
--- /dev/null
+++ b/js/src/jit-test/tests/parser/module-filename.js
@@ -0,0 +1,13 @@
+load(libdir + "asserts.js");
+
+compileToStencil("", { fileName: "", module: true });
+assertThrowsInstanceOf(() => {
+ compileToStencil("", { fileName: null, module: true });
+}, Error);
+
+if (helperThreadCount() > 0) {
+ offThreadCompileModuleToStencil("", { fileName: "", module: true });
+ assertThrowsInstanceOf(() => {
+ offThreadCompileModuleToStencil("", { fileName: null, module: true });
+ }, Error);
+}
diff --git a/js/src/jit-test/tests/profiler/native-trampoline-2.js b/js/src/jit-test/tests/profiler/native-trampoline-2.js
new file mode 100644
index 0000000000..a85913431b
--- /dev/null
+++ b/js/src/jit-test/tests/profiler/native-trampoline-2.js
@@ -0,0 +1,7 @@
+let arr = [1, 2, 3, 4, 5, 6, 7, 8];
+arr.sort((x, y) => {
+ enableGeckoProfilingWithSlowAssertions();
+ readGeckoProfilingStack();
+ return y - x;
+});
+assertEq(arr.toString(), "8,7,6,5,4,3,2,1");
diff --git a/js/src/jit-test/tests/profiler/native-trampoline-3.js b/js/src/jit-test/tests/profiler/native-trampoline-3.js
new file mode 100644
index 0000000000..16fe547051
--- /dev/null
+++ b/js/src/jit-test/tests/profiler/native-trampoline-3.js
@@ -0,0 +1,32 @@
+// |jit-test| skip-if: !wasmIsSupported()
+
+// Use a Wasm module to get the following stack frames:
+//
+// .. => array sort trampoline => wasmfunc comparator (Wasm) => comparator (JS)
+
+let binary = wasmTextToBinary(`
+(module
+ (import "" "comparator" (func $comparator (param i32) (param i32) (result i32)))
+ (func $wasmfunc
+ (export "wasmfunc")
+ (param $x i32)
+ (param $y i32)
+ (result i32)
+ (return (call $comparator (local.get $x) (local.get $y)))
+ )
+)`);
+let mod = new WebAssembly.Module(binary);
+let instance = new WebAssembly.Instance(mod, {"": {comparator}});
+
+function comparator(x, y) {
+ readGeckoProfilingStack();
+ return y - x;
+}
+
+enableGeckoProfilingWithSlowAssertions();
+
+for (let i = 0; i < 20; i++) {
+ let arr = [3, 1, 2, -1, 0, 4];
+ arr.sort(instance.exports.wasmfunc);
+ assertEq(arr.toString(), "4,3,2,1,0,-1");
+}
diff --git a/js/src/jit-test/tests/profiler/native-trampoline.js b/js/src/jit-test/tests/profiler/native-trampoline.js
new file mode 100644
index 0000000000..e140874a15
--- /dev/null
+++ b/js/src/jit-test/tests/profiler/native-trampoline.js
@@ -0,0 +1,40 @@
+enableGeckoProfilingWithSlowAssertions();
+
+function testBasic() {
+ var arr = [2, -1];
+ var cmp = function(x, y) {
+ readGeckoProfilingStack();
+ return x - y;
+ };
+ for (var i = 0; i < 20; i++) {
+ arr.sort(cmp);
+ }
+}
+testBasic();
+
+function testRectifierFrame() {
+ var arr = [2, -1];
+ var cmp = function(x, y, z, a) {
+ readGeckoProfilingStack();
+ return x - y;
+ };
+ for (var i = 0; i < 20; i++) {
+ arr.sort(cmp);
+ }
+}
+testRectifierFrame();
+
+function testRectifierFrameCaller() {
+ var o = {};
+ var calls = 0;
+ Object.defineProperty(o, "length", {get: function() {
+ calls++;
+ readGeckoProfilingStack();
+ return 0;
+ }});
+ for (var i = 0; i < 20; i++) {
+ Array.prototype.sort.call(o);
+ }
+ assertEq(calls, 20);
+}
+testRectifierFrameCaller();
diff --git a/js/src/jit-test/tests/profiler/wasm-to-js-1.js b/js/src/jit-test/tests/profiler/wasm-to-js-1.js
new file mode 100644
index 0000000000..2ce48f391c
--- /dev/null
+++ b/js/src/jit-test/tests/profiler/wasm-to-js-1.js
@@ -0,0 +1,20 @@
+// |jit-test| skip-if: !wasmIsSupported(); --fast-warmup
+function sample() {
+ enableGeckoProfiling();
+ readGeckoProfilingStack();
+ disableGeckoProfiling();
+}
+const text = `(module
+ (import "m" "f" (func $f))
+ (func (export "test")
+ (call $f)
+))`;
+const bytes = wasmTextToBinary(text);
+const mod = new WebAssembly.Module(bytes);
+const imports = {"m": {"f": sample}};
+const instance = new WebAssembly.Instance(mod, imports);
+sample();
+for (let i = 0; i < 5; i++) {
+ gc(this, "shrinking");
+ instance.exports.test();
+}
diff --git a/js/src/jit-test/tests/profiler/wasm-to-js-2.js b/js/src/jit-test/tests/profiler/wasm-to-js-2.js
new file mode 100644
index 0000000000..3949c3a587
--- /dev/null
+++ b/js/src/jit-test/tests/profiler/wasm-to-js-2.js
@@ -0,0 +1,19 @@
+// |jit-test| skip-if: !wasmIsSupported()
+// Ensure readGeckoProfilingStack finds at least 1 Wasm frame on the stack.
+function calledFromWasm() {
+ let frames = readGeckoProfilingStack().flat();
+ assertEq(frames.filter(f => f.kind === "wasm").length >= 1, true);
+}
+enableGeckoProfiling();
+const text = `(module
+ (import "m" "f" (func $f))
+ (func (export "test")
+ (call $f)
+))`;
+const bytes = wasmTextToBinary(text);
+const mod = new WebAssembly.Module(bytes);
+const imports = {"m": {"f": calledFromWasm}};
+const instance = new WebAssembly.Instance(mod, imports);
+for (let i = 0; i < 150; i++) {
+ instance.exports.test();
+}
diff --git a/js/src/jit-test/tests/promise/allSettled-dead.js b/js/src/jit-test/tests/promise/allSettled-dead.js
new file mode 100644
index 0000000000..8ae8e53d6b
--- /dev/null
+++ b/js/src/jit-test/tests/promise/allSettled-dead.js
@@ -0,0 +1,20 @@
+newGlobal();
+const g = newGlobal({
+ "newCompartment": true,
+});
+const p1 = g.eval(`
+Promise.resolve();
+`);
+const p2 = p1.then();
+nukeAllCCWs();
+ignoreUnhandledRejections();
+Promise.resolve = function() {
+ return p2;
+};
+let caught = false;
+Promise.allSettled([1]).catch(e => {
+ caught = true;
+ assertEq(e.message.includes("dead object"), true);
+});
+drainJobQueue();
+assertEq(caught, true);
diff --git a/js/src/jit-test/tests/promise/jobqueue-interrupt-01.js b/js/src/jit-test/tests/promise/jobqueue-interrupt-01.js
new file mode 100644
index 0000000000..758680e031
--- /dev/null
+++ b/js/src/jit-test/tests/promise/jobqueue-interrupt-01.js
@@ -0,0 +1,23 @@
+// catchTermination should undo the quit() operation and let the remaining jobs
+// run.
+
+evaluate(`
+ quit();
+`, {
+ catchTermination : true
+});
+
+const global = newGlobal({ newCompartment: true });
+
+let called = false;
+const dbg = new Debugger(global);
+dbg.onDebuggerStatement = function (frame) {
+ Promise.resolve(42).then(v => { called = true; });
+};
+global.eval(`
+ debugger;
+`);
+
+drainJobQueue();
+
+assertEq(called, true);
diff --git a/js/src/jit-test/tests/promise/jobqueue-interrupt-02.js b/js/src/jit-test/tests/promise/jobqueue-interrupt-02.js
new file mode 100644
index 0000000000..8d8f27ef91
--- /dev/null
+++ b/js/src/jit-test/tests/promise/jobqueue-interrupt-02.js
@@ -0,0 +1,14 @@
+// quit() while draining job queue leaves the remaining jobs untouched.
+
+const global = newGlobal({ newCompartment:true });
+const dbg = Debugger(global);
+dbg.onDebuggerStatement = function() {
+ Promise.resolve().then(() => {
+ quit();
+ });
+ Promise.resolve().then(() => {
+ // This shouldn't be called.
+ assertEq(true, false);
+ });
+};
+global.eval("debugger");
diff --git a/js/src/jit-test/tests/proxy/bug1885774.js b/js/src/jit-test/tests/proxy/bug1885774.js
new file mode 100644
index 0000000000..fa88cbf823
--- /dev/null
+++ b/js/src/jit-test/tests/proxy/bug1885774.js
@@ -0,0 +1,25 @@
+// |jit-test| --no-threads; --fast-warmup
+
+var {proxy, revoke} = Proxy.revocable({x:1}, {});
+
+function foo(o) {
+ var res = 0;
+ for (var i = 0; i < 2; i++) {
+ res += o.x;
+ }
+ return res;
+}
+
+with ({}) {}
+for (var i = 0; i < 100; i++) {
+ assertEq(foo(proxy), 2);
+}
+
+revoke();
+var caught = false;
+try {
+ foo(proxy);
+} catch {
+ caught = true;
+}
+assertEq(caught, true);
diff --git a/js/src/jit-test/tests/structured-clone/bug1888727.js b/js/src/jit-test/tests/structured-clone/bug1888727.js
new file mode 100644
index 0000000000..7958781c92
--- /dev/null
+++ b/js/src/jit-test/tests/structured-clone/bug1888727.js
@@ -0,0 +1,21 @@
+function test() {
+ // Construct a structured clone of a random BigInt value.
+ const n = 0xfeeddeadbeef2dadfeeddeadbeef2dadfeeddeadbeef2dadfeeddeadbeef2dadn;
+ const s = serialize(n, [], {scope: 'DifferentProcess'});
+ assertEq(deserialize(s), n);
+
+ // Truncate it by chopping off the last 8 bytes.
+ s.clonebuffer = s.arraybuffer.slice(0, -8);
+
+ // Deserialization should now throw a catchable exception.
+ try {
+ deserialize(s);
+ // The bug was throwing an uncatchable error, so this next assertion won't
+ // be reached in either the buggy or fixed code.
+ assertEq(true, false, "should have thrown truncation error");
+ } catch (e) {
+ assertEq(e.message.includes("truncated"), true);
+ }
+}
+
+test();
diff --git a/js/src/jit-test/tests/structured-clone/tenuring.js b/js/src/jit-test/tests/structured-clone/tenuring.js
index 0fffa064fa..cec53a6956 100644
--- a/js/src/jit-test/tests/structured-clone/tenuring.js
+++ b/js/src/jit-test/tests/structured-clone/tenuring.js
@@ -1,4 +1,4 @@
-// Check that we switch to allocating in the tenure heap after the first
+// Check that we switch to allocating in the tenured heap after the first
// nursery collection.
function buildObjectTree(depth) {
@@ -82,6 +82,7 @@ function countHeapLocations(tree, objectTree, counts) {
gczeal(0);
gcparam('minNurseryBytes', 1024 * 1024);
gcparam('maxNurseryBytes', 1024 * 1024);
+gcparam('semispaceNurseryEnabled', 0);
gc();
testRoundTrip(1, true, true);
diff --git a/js/src/jit-test/tests/typedarray/resizable-typedarray-from-pinned-buffer.js b/js/src/jit-test/tests/typedarray/resizable-typedarray-from-pinned-buffer.js
new file mode 100644
index 0000000000..b17c7c0157
--- /dev/null
+++ b/js/src/jit-test/tests/typedarray/resizable-typedarray-from-pinned-buffer.js
@@ -0,0 +1,9 @@
+// |jit-test| --enable-arraybuffer-resizable
+
+let ab = new ArrayBuffer(8, {maxByteLength: 10});
+
+pinArrayBufferOrViewLength(ab);
+
+let ta = new Int8Array(ab);
+
+assertEq(ta.length, 8);
diff --git a/js/src/jit-test/tests/warp/bug1876425.js b/js/src/jit-test/tests/warp/bug1876425.js
new file mode 100644
index 0000000000..aca528aac6
--- /dev/null
+++ b/js/src/jit-test/tests/warp/bug1876425.js
@@ -0,0 +1,62 @@
+// 1) Trial inline f1 => g (g1) => h.
+// 2) Set g to g2, to fail the f1 => g1 call site.
+// 3) Set g to g1 again.
+// 4) Make g1's generic ICScript trial inline a different callee, h2.
+// 5) Bail out from f1 => g1 => h.
+//
+// The bailout must not confuse the ICScripts of h1 and h2.
+
+function noninlined1(x) {
+ with (this) {};
+ if (x === 4002) {
+ // Step 4.
+ f2();
+ // Step 5.
+ return true;
+ }
+ return false;
+}
+function noninlined2(x) {
+ with (this) {};
+ if (x === 4000) {
+ // Step 2.
+ g = (h, x) => {
+ return x + 1;
+ };
+ }
+ if (x === 4001) {
+ // Step 3.
+ g = g1;
+ }
+}
+var h = function(x) {
+ if (noninlined1(x)) {
+ // Step 5.
+ bailout();
+ }
+ return x + 1;
+};
+var g = function(callee, x) {
+ return callee(x) + 1;
+};
+var g1 = g;
+
+function f2() {
+ var h2 = x => x + 1;
+ for (var i = 0; i < 300; i++) {
+ var x = (i % 2 === 0) ? "foo" : i; // Force trial inlining.
+ g1(h2, x);
+ }
+}
+
+function f1() {
+ for (var i = 0; i < 4200; i++) {
+ var x = (i < 900 && i % 2 === 0) ? "foo" : i; // Force trial inlining.
+ g(h, x);
+ noninlined2(i);
+ if (i === 200) {
+ trialInline();
+ }
+ }
+}
+f1();
diff --git a/js/src/jit-test/tests/wasm/directiveless/bug1877358.js b/js/src/jit-test/tests/wasm/directiveless/bug1877358.js
index 10cb54398a..8d512efcfe 100644
--- a/js/src/jit-test/tests/wasm/directiveless/bug1877358.js
+++ b/js/src/jit-test/tests/wasm/directiveless/bug1877358.js
@@ -1,4 +1,4 @@
-// |jit-test| -P wasm_exceptions=false; include:wasm.js
+// |jit-test| include:wasm.js
let {test} = wasmEvalText(`(module
(func $m (import "" "m"))
diff --git a/js/src/jit-test/tests/wasm/gc/casting.js b/js/src/jit-test/tests/wasm/gc/casting.js
index a71a589db8..3b550e6415 100644
--- a/js/src/jit-test/tests/wasm/gc/casting.js
+++ b/js/src/jit-test/tests/wasm/gc/casting.js
@@ -114,3 +114,72 @@ function testAllCasts(types) {
}
}
testAllCasts(TYPES);
+
+// Test that combinations of ref.test and ref.cast compile correctly.
+// (These can be optimized together.)
+{
+ const { make, test1, test2, test3, test4 } = wasmEvalText(`(module
+ (type $a (array i32))
+ (func (export "make") (param i32) (result anyref)
+ local.get 0
+ local.get 0
+ array.new_fixed $a 2
+ )
+ (func (export "test1") (param anyref) (result i32)
+ (if (ref.test (ref $a) (local.get 0))
+ (then
+ (ref.cast (ref $a) (local.get 0))
+ (array.get $a (i32.const 0))
+ return
+ )
+ )
+ i32.const -1
+ )
+ (func (export "test2") (param anyref) (result i32)
+ (if (ref.test (ref $a) (local.get 0))
+ (then)
+ (else
+ (ref.cast (ref $a) (local.get 0))
+ (array.get $a (i32.const 0))
+ return
+ )
+ )
+ i32.const -1
+ )
+ (func (export "test3") (param anyref) (result i32)
+ (if (ref.test (ref $a) (local.get 0))
+ (then
+ (if (ref.test (ref $a) (local.get 0))
+ (then)
+ (else
+ (ref.cast (ref $a) (local.get 0))
+ (array.get $a (i32.const 0))
+ return
+ )
+ )
+ )
+ )
+ i32.const -1
+ )
+ (func (export "test4") (param anyref) (result i32)
+ (if (ref.test (ref $a) (local.get 0))
+ (then
+ (if (ref.test (ref $a) (local.get 0))
+ (then
+ local.get 0
+ ref.cast (ref $a)
+ ref.cast (ref $a)
+ (array.get $a (i32.const 0))
+ return
+ )
+ )
+ )
+ )
+ i32.const -1
+ )
+ )`).exports;
+ assertEq(test1(make(99)), 99);
+ assertEq(test2(make(99)), -1);
+ assertEq(test3(make(99)), -1);
+ assertEq(test4(make(99)), 99);
+}
diff --git a/js/src/jit-test/tests/wasm/gc/i31ref.js b/js/src/jit-test/tests/wasm/gc/i31ref.js
index 65f2fccc3f..298447e848 100644
--- a/js/src/jit-test/tests/wasm/gc/i31ref.js
+++ b/js/src/jit-test/tests/wasm/gc/i31ref.js
@@ -149,6 +149,24 @@ for (const {input, expected} of bigI32Tests) {
assertEq(getElem(), expected);
}
+// Test that (ref.i31 (i32 const value)) optimization is correct
+for (let value of WasmI31refValues) {
+ let {compare} = wasmEvalText(`(module
+ (func $innerCompare (param i32) (param i31ref) (result i32)
+ (ref.eq
+ (ref.i31 local.get 0)
+ local.get 1
+ )
+ )
+ (func (export "compare") (result i32)
+ i32.const ${value}
+ (ref.i31 i32.const ${value})
+ call $innerCompare
+ )
+)`).exports;
+ assertEq(compare(value), 1);
+}
+
const { i31GetU_null, i31GetS_null } = wasmEvalText(`(module
(func (export "i31GetU_null") (result i32)
ref.null i31
diff --git a/js/src/jit-test/tests/wasm/regress/bug1886870.js b/js/src/jit-test/tests/wasm/regress/bug1886870.js
new file mode 100644
index 0000000000..a4947bd91a
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/bug1886870.js
@@ -0,0 +1,8 @@
+// Check proper handling of OOM after toQuotedString().
+
+oomTest(function () {
+ new WebAssembly.Instance(
+ new WebAssembly.Module(wasmTextToBinary('(import "m" "f" (func $f))')),
+ {}
+ );
+});
diff --git a/js/src/jit-test/tests/wasm/regress/bug1887535.js b/js/src/jit-test/tests/wasm/regress/bug1887535.js
new file mode 100644
index 0000000000..e2793831bf
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/bug1887535.js
@@ -0,0 +1,25 @@
+// |jit-test| slow;
+
+// Tests the exception handling works during stack overflow.
+const v1 = newGlobal({sameZoneAs: this});
+class C2 {
+ static { }
+}
+
+function f() { v1.constructor; }
+
+const { test } = wasmEvalText(`
+(module
+ (import "" "f" (func $f))
+ (export "test" (func $f))
+)`, { "": { f, },}).exports;
+
+
+function f4() {
+ try {
+ f4();
+ } catch(_) {
+ test(); test();
+ }
+}
+f4();
diff --git a/js/src/jit-test/tests/wasm/regress/bug1887596.js b/js/src/jit-test/tests/wasm/regress/bug1887596.js
new file mode 100644
index 0000000000..8ff579fc35
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/bug1887596.js
@@ -0,0 +1,14 @@
+const t = `
+ (module
+ (func $f (result f32)
+ f32.const 1.25
+ )
+ (table (export "table") 10 funcref)
+ (elem (i32.const 0) $f)
+ )`;
+const i = new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(t)));
+const f = i.exports.table.get(0);
+
+// These FP equality comparisons are safe because 1.25 is representable exactly.
+assertEq(1.25, f());
+assertEq(1.25, this.wasmLosslessInvoke(f).value);
diff --git a/js/src/jit/ABIFunctionList-inl.h b/js/src/jit/ABIFunctionList-inl.h
index f8a52beeff..eb2123f7a2 100644
--- a/js/src/jit/ABIFunctionList-inl.h
+++ b/js/src/jit/ABIFunctionList-inl.h
@@ -103,6 +103,8 @@ namespace jit {
_(js::ArgumentsObject::finishForIonPure) \
_(js::ArgumentsObject::finishInlineForIonPure) \
_(js::ArrayShiftMoveElements) \
+ _(js::ArraySortData::sortWithComparator) \
+ _(js::ArraySortFromJit) \
_(js::ecmaAtan2) \
_(js::ecmaHypot) \
_(js::ecmaPow) \
diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
index d916d000ed..150e16b618 100644
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -517,7 +517,12 @@ void BaselineStackBuilder::setNextCallee(
//
// Also use the callee's own ICScript if we purged callee ICScripts.
icScript_ = nextCallee->nonLazyScript()->jitScript()->icScript();
+
if (trialInliningState != TrialInliningState::MonomorphicInlined) {
+ // Don't use specialized ICScripts for any of the callees if we had an
+ // inlining failure. We're now using the generic ICScript but compilation
+ // might have used the trial-inlined ICScript and these can have very
+ // different inlining graphs.
canUseTrialInlinedICScripts_ = false;
}
}
@@ -1567,6 +1572,7 @@ bool jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation,
prevFrameType == FrameType::IonJS ||
prevFrameType == FrameType::BaselineStub ||
prevFrameType == FrameType::Rectifier ||
+ prevFrameType == FrameType::TrampolineNative ||
prevFrameType == FrameType::IonICCall ||
prevFrameType == FrameType::BaselineJS ||
prevFrameType == FrameType::BaselineInterpreterEntry);
@@ -1965,14 +1971,6 @@ bool jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfoArg) {
UnwindEnvironment(cx, ei, bailoutInfo->tryPC);
}
- // Check for interrupts now because we might miss an interrupt check in JIT
- // code when resuming in the prologue, after the stack/interrupt check.
- if (!cx->isExceptionPending()) {
- if (!CheckForInterrupt(cx)) {
- return false;
- }
- }
-
BailoutKind bailoutKind = *bailoutInfo->bailoutKind;
JitSpew(JitSpew_BaselineBailouts,
" Restored outerScript=(%s:%u:%u,%u) innerScript=(%s:%u:%u,%u) "
@@ -2169,7 +2167,17 @@ bool jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfoArg) {
ionScript->incNumFixableBailouts();
if (ionScript->shouldInvalidate()) {
#ifdef DEBUG
- if (saveFailedICHash && !JitOptions.disableBailoutLoopCheck) {
+ // To detect bailout loops, we save a hash of the CacheIR used to
+ // compile this script, and assert that we don't recompile with the
+ // exact same inputs. Some of our bailout detection strategies, like
+ // LICM and stub folding, rely on bailing out, updating some state
+ // when we hit the baseline fallback, and using that information when
+ // we invalidate. If the frequentBailoutThreshold is set too low, we
+ // will instead invalidate the first time we bail out, so we don't
+ // have the chance to make those decisions. That doesn't happen in
+ // regular code, so we just skip bailout loop detection in that case.
+ if (saveFailedICHash && !JitOptions.disableBailoutLoopCheck &&
+ JitOptions.frequentBailoutThreshold > 1) {
outerScript->jitScript()->setFailedICHash(ionScript->icHash());
}
#endif
diff --git a/js/src/jit/BaselineFrame.h b/js/src/jit/BaselineFrame.h
index 6138332c81..f2a6811177 100644
--- a/js/src/jit/BaselineFrame.h
+++ b/js/src/jit/BaselineFrame.h
@@ -109,7 +109,9 @@ class BaselineFrame {
bool isConstructing() const {
return CalleeTokenIsConstructing(calleeToken());
}
- JSScript* script() const { return ScriptFromCalleeToken(calleeToken()); }
+ JSScript* script() const {
+ return MaybeForwardedScriptFromCalleeToken(calleeToken());
+ }
JSFunction* callee() const { return CalleeTokenToFunction(calleeToken()); }
Value calleev() const { return ObjectValue(*callee()); }
diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp
index 68dbd6bfee..03eae14140 100644
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -1199,7 +1199,8 @@ static ObjOperandId GuardAndLoadWindowProxyWindow(CacheIRWriter& writer,
ObjOperandId objId,
GlobalObject* windowObj) {
writer.guardClass(objId, GuardClassKind::WindowProxy);
- ObjOperandId windowObjId = writer.loadWrapperTarget(objId);
+ ObjOperandId windowObjId = writer.loadWrapperTarget(objId,
+ /*fallible = */ false);
writer.guardSpecificObject(windowObjId, windowObj);
return windowObjId;
}
@@ -1357,7 +1358,8 @@ AttachDecision GetPropIRGenerator::tryAttachCrossCompartmentWrapper(
writer.guardHasProxyHandler(objId, Wrapper::wrapperHandler(obj));
// Load the object wrapped by the CCW
- ObjOperandId wrapperTargetId = writer.loadWrapperTarget(objId);
+ ObjOperandId wrapperTargetId =
+ writer.loadWrapperTarget(objId, /*fallible = */ false);
// If the compartment of the wrapped object is different we should fail.
writer.guardCompartment(wrapperTargetId, wrappedTargetGlobal,
@@ -1468,7 +1470,8 @@ AttachDecision GetPropIRGenerator::tryAttachXrayCrossCompartmentWrapper(
writer.guardHasProxyHandler(objId, GetProxyHandler(obj));
// Load the object wrapped by the CCW
- ObjOperandId wrapperTargetId = writer.loadWrapperTarget(objId);
+ ObjOperandId wrapperTargetId =
+ writer.loadWrapperTarget(objId, /*fallible = */ false);
// Test the wrapped object's class. The properties held by xrays or their
// prototypes will be invariant for objects of a given class, except for
@@ -1578,9 +1581,9 @@ AttachDecision GetPropIRGenerator::tryAttachScriptedProxy(
writer.guardIsProxy(objId);
writer.guardHasProxyHandler(objId, &ScriptedProxyHandler::singleton);
- ValOperandId handlerValId = writer.loadScriptedProxyHandler(objId);
- ObjOperandId handlerObjId = writer.guardToObject(handlerValId);
- ObjOperandId targetObjId = writer.loadWrapperTarget(objId);
+ ObjOperandId handlerObjId = writer.loadScriptedProxyHandler(objId);
+ ObjOperandId targetObjId =
+ writer.loadWrapperTarget(objId, /*fallible =*/true);
writer.guardIsNativeObject(targetObjId);
diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h
index 9bedbb7ddc..132070d535 100644
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -321,6 +321,12 @@ class CallFlags {
CallFlags() = default;
explicit CallFlags(ArgFormat format) : argFormat_(format) {}
+ CallFlags(ArgFormat format, bool isConstructing, bool isSameRealm,
+ bool needsUninitializedThis)
+ : argFormat_(format),
+ isConstructing_(isConstructing),
+ isSameRealm_(isSameRealm),
+ needsUninitializedThis_(needsUninitializedThis) {}
CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false,
bool needsUninitializedThis = false)
: argFormat_(isSpread ? Spread : Standard),
diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp
index 1467cebe08..9a26b0816c 100644
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -2379,19 +2379,23 @@ bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
return true;
}
-bool CacheIRCompiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
+bool CacheIRCompiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
- ValueOperand output = allocator.defineValueRegister(masm, resultId);
+ Register output = allocator.defineRegister(masm, resultId);
+
+ FailurePath* failure;
+ if (!addFailurePath(&failure)) {
+ return false;
+ }
+
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
+ Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
+ ScriptedProxyHandler::HANDLER_EXTRA));
+ masm.fallibleUnboxObject(handlerAddr, output, failure->label());
- masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
- output.scratchReg());
- masm.loadValue(
- Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
- ScriptedProxyHandler::HANDLER_EXTRA)),
- output);
return true;
}
@@ -2937,14 +2941,27 @@ bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
}
bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
- ObjOperandId resultId) {
+ ObjOperandId resultId,
+ bool fallible) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register reg = allocator.defineRegister(masm, resultId);
+ FailurePath* failure;
+ if (fallible && !addFailurePath(&failure)) {
+ return false;
+ }
+
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
- masm.unboxObject(
- Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
+
+ Address targetAddr(reg,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+ if (fallible) {
+ masm.fallibleUnboxObject(targetAddr, reg, failure->label());
+ } else {
+ masm.unboxObject(targetAddr, reg);
+ }
+
return true;
}
diff --git a/js/src/jit/CacheIROps.yaml b/js/src/jit/CacheIROps.yaml
index 974404d5c0..2f3097dfd8 100644
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -708,7 +708,7 @@
transpile: true
cost_estimate: 1
args:
- result: ValId
+ result: ObjId
obj: ObjId
- name: IdToStringOrSymbol
@@ -837,6 +837,7 @@
args:
obj: ObjId
result: ObjId
+ fallible: BoolImm
- name: LoadValueTag
shared: true
diff --git a/js/src/jit/CacheIRReader.h b/js/src/jit/CacheIRReader.h
index 54b298c999..59483424a3 100644
--- a/js/src/jit/CacheIRReader.h
+++ b/js/src/jit/CacheIRReader.h
@@ -129,21 +129,15 @@ class MOZ_RAII CacheIRReader {
bool isSameRealm = encoded & CallFlags::IsSameRealm;
bool needsUninitializedThis = encoded & CallFlags::NeedsUninitializedThis;
MOZ_ASSERT_IF(needsUninitializedThis, isConstructing);
- switch (format) {
- case CallFlags::Unknown:
- MOZ_CRASH("Unexpected call flags");
- case CallFlags::Standard:
- return CallFlags(isConstructing, /*isSpread =*/false, isSameRealm,
- needsUninitializedThis);
- case CallFlags::Spread:
- return CallFlags(isConstructing, /*isSpread =*/true, isSameRealm,
- needsUninitializedThis);
- default:
- // The existing non-standard argument formats (FunCall and FunApply)
- // can't be constructors.
- MOZ_ASSERT(!isConstructing);
- return CallFlags(format);
- }
+
+ // FunCall and FunApply can't be constructors.
+ MOZ_ASSERT_IF(format == CallFlags::FunCall, !isConstructing);
+ MOZ_ASSERT_IF(format == CallFlags::FunApplyArgsObj, !isConstructing);
+ MOZ_ASSERT_IF(format == CallFlags::FunApplyArray, !isConstructing);
+ MOZ_ASSERT_IF(format == CallFlags::FunApplyNullUndefined, !isConstructing);
+
+ return CallFlags(format, isConstructing, isSameRealm,
+ needsUninitializedThis);
}
uint8_t readByte() { return buffer_.readByte(); }
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
index 10a69f0cb3..559ac50cc7 100644
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -4205,13 +4205,23 @@ void CodeGenerator::visitGuardShape(LGuardShape* guard) {
}
void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
+ auto fuseIndex = guard->mir()->fuseIndex();
+ switch (fuseIndex) {
+ case RealmFuses::FuseIndex::OptimizeGetIteratorFuse:
+ addOptimizeGetIteratorFuseDependency();
+ return;
+ default:
+ // validateAndRegisterFuseDependencies doesn't have
+ // handling for this yet, actively check fuse instead.
+ break;
+ }
+
Register temp = ToRegister(guard->temp0());
Label bail;
// Bake specific fuse address for Ion code, because we won't share this code
// across realms.
- GuardFuse* fuse =
- mirGen().realm->realmFuses().getFuseByIndex(guard->mir()->fuseIndex());
+ GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
@@ -6269,7 +6279,8 @@ void CodeGenerator::visitCallKnown(LCallKnown* call) {
UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
WrappedFunction* target = call->getSingleTarget();
- // Native single targets (except wasm) are handled by LCallNative.
+ // Native single targets (except Wasm and TrampolineNative functions) are
+ // handled by LCallNative.
MOZ_ASSERT(target->hasJitEntry());
// Missing arguments must have been explicitly appended by WarpBuilder.
@@ -6341,12 +6352,7 @@ void CodeGenerator::visitCallKnown(LCallKnown* call) {
template <typename T>
void CodeGenerator::emitCallInvokeFunction(T* apply) {
- Register objreg = ToRegister(apply->getTempObject());
-
- // Push the space used by the arguments.
- masm.moveStackPtrTo(objreg);
-
- pushArg(objreg); // argv.
+ pushArg(masm.getStackPointer()); // argv.
pushArg(ToRegister(apply->getArgc())); // argc.
pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
@@ -6370,7 +6376,13 @@ void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
"Stack padding assumes that the frameSize is correct");
MOZ_ASSERT(JitStackValueAlignment == 2);
Label noPaddingNeeded;
- // if the number of arguments is odd, then we do not need any padding.
+ // If the number of arguments is odd, then we do not need any padding.
+ //
+ // Note: The |JitStackValueAlignment == 2| condition requires that the
+ // overall number of values on the stack is even. When we have an odd number
+ // of arguments, we don't need any padding, because the |thisValue| is
+ // pushed after the arguments, so the overall number of values on the stack
+ // is even.
masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
masm.addPtr(Imm32(1), scratch);
masm.bind(&noPaddingNeeded);
@@ -6382,13 +6394,13 @@ void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
masm.subFromStackPtr(scratch);
#ifdef DEBUG
- // Put a magic value in the space reserved for padding. Note, this code
- // cannot be merged with the previous test, as not all architectures can
- // write below their stack pointers.
+ // Put a magic value in the space reserved for padding. Note, this code cannot
+ // be merged with the previous test, as not all architectures can write below
+ // their stack pointers.
if (JitStackValueAlignment > 1) {
MOZ_ASSERT(JitStackValueAlignment == 2);
Label noPaddingNeeded;
- // if the number of arguments is odd, then we do not need any padding.
+ // If the number of arguments is odd, then we do not need any padding.
masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
@@ -6403,8 +6415,8 @@ void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
Register argcreg, Register newTargetAndScratch) {
// Align the JitFrameLayout on the JitStackAlignment. Contrary to
// |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
- // we can't write to |newTargetAndScratch| before |new.target| has
- // been pushed onto the stack.
+ // we can't write to |newTargetAndScratch| before |new.target| has been pushed
+ // onto the stack.
if (JitStackValueAlignment > 1) {
MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
"Stack padding assumes that the frameSize is correct");
@@ -6412,6 +6424,12 @@ void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
Label noPaddingNeeded;
// If the number of arguments is even, then we do not need any padding.
+ //
+ // Note: The |JitStackValueAlignment == 2| condition requires that the
+ // overall number of values on the stack is even. When we have an even
+ // number of arguments, we don't need any padding, because |new.target| is
+ // is pushed before the arguments and |thisValue| is pushed after all
+ // arguments, so the overall number of values on the stack is even.
masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
masm.pushValue(MagicValue(JS_ARG_POISON));
masm.bind(&noPaddingNeeded);
@@ -6437,9 +6455,8 @@ void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
Label loop;
masm.bind(&loop);
- // As argvIndex is off by 1, and we use the decBranchPtr instruction
- // to loop back, we have to substract the size of the word which are
- // copied.
+ // As argvIndex is off by 1, and we use the decBranchPtr instruction to loop
+ // back, we have to substract the size of the word which are copied.
BaseValueIndex srcPtr(argvSrcBase, argvIndex,
int32_t(argvSrcOffset) - sizeof(void*));
BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
@@ -6488,6 +6505,9 @@ void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
// clang-format on
// Compute the source and destination offsets into the stack.
+ //
+ // The |extraFormals| parameter is used when copying rest-parameters and
+ // allows to skip the initial parameters before the actual rest-parameters.
Register argvSrcBase = FramePointer;
size_t argvSrcOffset =
JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
@@ -6500,17 +6520,18 @@ void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
argvDstOffset);
- // Join with all arguments copied and the extra stack usage computed.
+ // Join with all arguments copied.
masm.bind(&end);
}
-void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
- Register scratch) {
- // Holds the function nargs. Initially the number of args to the caller.
+void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply) {
+ // Holds the function nargs.
Register argcreg = ToRegister(apply->getArgc());
Register copyreg = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
uint32_t extraFormals = apply->numExtraFormals();
+ // Allocate space on the stack for arguments.
emitAllocateSpaceForApply(argcreg, scratch);
emitPushArguments(argcreg, scratch, copyreg, extraFormals);
@@ -6519,22 +6540,21 @@ void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
}
-void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
- // argc and argsObj are mapped to the same calltemp register.
- MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
-
- Register tmpArgc = ToRegister(apply->getTempObject());
+void CodeGenerator::emitPushArguments(LApplyArgsObj* apply) {
Register argsObj = ToRegister(apply->getArgsObj());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // argc and argsObj are mapped to the same calltemp register.
+ MOZ_ASSERT(argsObj == ToRegister(apply->getArgc()));
// Load argc into tmpArgc.
- Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
- masm.unboxInt32(lengthAddr, tmpArgc);
- masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
+ masm.loadArgumentsObjectLength(argsObj, tmpArgc);
- // Allocate space on the stack for arguments. This modifies scratch.
+ // Allocate space on the stack for arguments.
emitAllocateSpaceForApply(tmpArgc, scratch);
- // Load arguments data
+ // Load arguments data.
masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
argsObj);
size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
@@ -6543,6 +6563,7 @@ void CodeGenerator::emitPushArguments(LApplyArgsObj* apply, Register scratch) {
// After this call, the argsObj register holds the argument count instead.
emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
+ // Push |this|.
masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
}
@@ -6566,69 +6587,72 @@ void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
// Skip the copy of arguments if there are none.
masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+ {
+ // Copy the values. This code is skipped entirely if there are no values.
+ size_t argvDstOffset = 0;
- // Copy the values. This code is skipped entirely if there are
- // no values.
- size_t argvDstOffset = 0;
-
- Register argvSrcBase = srcBaseAndArgc;
- Register copyreg = scratch;
-
- masm.push(tmpArgc);
- Register argvIndex = tmpArgc;
- argvDstOffset += sizeof(void*);
+ Register argvSrcBase = srcBaseAndArgc;
- // Copy
- emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
- argvDstOffset);
+ // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
+ masm.push(tmpArgc);
+ Register argvIndex = tmpArgc;
+ argvDstOffset += sizeof(void*);
- // Restore.
- masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
- masm.jump(&epilogue);
+ // Copy
+ emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
+ argvDstOffset);
- // Clear argc if we skipped the copy step.
+ // Restore.
+ masm.pop(srcBaseAndArgc); // srcBaseAndArgc now contains argc.
+ masm.jump(&epilogue);
+ }
masm.bind(&noCopy);
- masm.movePtr(ImmWord(0), srcBaseAndArgc);
+ {
+ // Clear argc if we skipped the copy step.
+ masm.movePtr(ImmWord(0), srcBaseAndArgc);
+ }
- // Join with all arguments copied and the extra stack usage computed.
+ // Join with all arguments copied.
// Note, "srcBase" has become "argc".
masm.bind(&epilogue);
}
-void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
- Register scratch) {
+void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply) {
+ Register elements = ToRegister(apply->getElements());
Register tmpArgc = ToRegister(apply->getTempObject());
- Register elementsAndArgc = ToRegister(apply->getElements());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // argc and elements are mapped to the same calltemp register.
+ MOZ_ASSERT(elements == ToRegister(apply->getArgc()));
// Invariants guarded in the caller:
// - the array is not too long
// - the array length equals its initialized length
// The array length is our argc for the purposes of allocating space.
- Address length(ToRegister(apply->getElements()),
- ObjectElements::offsetOfLength());
- masm.load32(length, tmpArgc);
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
// Allocate space for the values.
emitAllocateSpaceForApply(tmpArgc, scratch);
// After this call "elements" has become "argc".
size_t elementsOffset = 0;
- emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
+ emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
// Push |this|.
masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
}
-void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
- Register scratch) {
- MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
-
- // Holds the function nargs. Initially the number of args to the caller.
+void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct) {
+ // Holds the function nargs.
Register argcreg = ToRegister(construct->getArgc());
Register copyreg = ToRegister(construct->getTempObject());
+ Register scratch = ToRegister(construct->getTempForArgCopy());
uint32_t extraFormals = construct->numExtraFormals();
+ // newTarget and scratch are mapped to the same calltemp register.
+ MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
+
// Allocate space for the values.
// After this call "newTarget" has become "scratch".
emitAllocateSpaceForConstructAndPushNewTarget(argcreg, scratch);
@@ -6639,29 +6663,31 @@ void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct,
masm.pushValue(ToValue(construct, LConstructArgsGeneric::ThisIndex));
}
-void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
- Register scratch) {
- MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
-
+void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct) {
+ Register elements = ToRegister(construct->getElements());
Register tmpArgc = ToRegister(construct->getTempObject());
- Register elementsAndArgc = ToRegister(construct->getElements());
+ Register scratch = ToRegister(construct->getTempForArgCopy());
+
+ // argc and elements are mapped to the same calltemp register.
+ MOZ_ASSERT(elements == ToRegister(construct->getArgc()));
+
+ // newTarget and scratch are mapped to the same calltemp register.
+ MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
// Invariants guarded in the caller:
// - the array is not too long
// - the array length equals its initialized length
// The array length is our argc for the purposes of allocating space.
- Address length(ToRegister(construct->getElements()),
- ObjectElements::offsetOfLength());
- masm.load32(length, tmpArgc);
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
// Allocate space for the values.
+ // After this call "newTarget" has become "scratch".
emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, scratch);
- // After this call "elements" has become "argc" and "newTarget" has become
- // "scratch".
+ // After this call "elements" has become "argc".
size_t elementsOffset = 0;
- emitPushArrayAsArguments(tmpArgc, elementsAndArgc, scratch, elementsOffset);
+ emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
// Push |this|.
masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
@@ -6682,43 +6708,24 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
// Copy the arguments of the current function.
//
- // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
- // compute argc. The argc register and the elements/argsObj register
- // are the same; argc must not be referenced before the call to
- // emitPushArguments() and elements/argsObj must not be referenced
- // after it returns.
+ // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also compute
+ // argc. The argc register and the elements/argsObj register are the same;
+ // argc must not be referenced before the call to emitPushArguments() and
+ // elements/argsObj must not be referenced after it returns.
//
- // In the case of ConstructArray or ConstructArgs, also overwrite newTarget
- // with scratch; newTarget must not be referenced after this point.
+ // In the case of ConstructArray or ConstructArgs, also overwrite newTarget;
+ // newTarget must not be referenced after this point.
//
// objreg is dead across this call.
- emitPushArguments(apply, scratch);
+ emitPushArguments(apply);
masm.checkStackAlignment();
bool constructing = apply->mir()->isConstructing();
- // If the function is native, only emit the call to InvokeFunction.
- if (apply->hasSingleTarget() &&
- apply->getSingleTarget()->isNativeWithoutJitEntry()) {
- emitCallInvokeFunction(apply);
-
-#ifdef DEBUG
- // Native constructors are guaranteed to return an Object value, so we never
- // have to replace a primitive result with the previously allocated Object
- // from CreateThis.
- if (constructing) {
- Label notPrimitive;
- masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
- &notPrimitive);
- masm.assumeUnreachable("native constructors don't return primitives");
- masm.bind(&notPrimitive);
- }
-#endif
-
- emitRestoreStackPointerFromFP();
- return;
- }
+ // If the function is native, the call is compiled through emitApplyNative.
+ MOZ_ASSERT_IF(apply->hasSingleTarget(),
+ !apply->getSingleTarget()->isNativeWithoutJitEntry());
Label end, invoke;
@@ -6812,8 +6819,8 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
masm.bind(&end);
- // If the return value of the constructing function is Primitive,
- // replace the return value with the Object from CreateThis.
+ // If the return value of the constructing function is Primitive, replace the
+ // return value with the Object from CreateThis.
if (constructing) {
Label notPrimitive;
masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
@@ -6833,17 +6840,200 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
emitRestoreStackPointerFromFP();
}
-void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
+template <typename T>
+void CodeGenerator::emitCallInvokeNativeFunction(T* apply) {
+ pushArg(masm.getStackPointer()); // argv.
+ pushArg(ToRegister(apply->getArgc())); // argc.
+ pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
+ pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
+
+ using Fn =
+ bool (*)(JSContext*, bool, bool, uint32_t, Value*, MutableHandleValue);
+ callVM<Fn, jit::InvokeNativeFunction>(apply);
+}
+
+template <typename T>
+void CodeGenerator::emitPushNativeArguments(T* apply) {
+ Register argc = ToRegister(apply->getArgc());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+ uint32_t extraFormals = apply->numExtraFormals();
+
+ // Push arguments.
+ Label noCopy;
+ masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
+ {
+ // Use scratch register to calculate stack space (no padding needed).
+ masm.movePtr(argc, scratch);
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), scratch);
+ masm.subFromStackPtr(scratch);
+
+ // Compute the source and destination offsets into the stack.
+ Register argvSrcBase = FramePointer;
+ size_t argvSrcOffset =
+ JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
+ size_t argvDstOffset = 0;
+
+ Register argvIndex = tmpArgc;
+ masm.move32(argc, argvIndex);
+
+ // Copy arguments.
+ emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
+ argvDstOffset);
+ }
+ masm.bind(&noCopy);
+}
+
+template <typename T>
+void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
+ Register argc = ToRegister(apply->getArgc());
+ Register elements = ToRegister(apply->getElements());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // NB: argc and elements are mapped to the same register.
+ MOZ_ASSERT(argc == elements);
+
+ // Invariants guarded in the caller:
+ // - the array is not too long
+ // - the array length equals its initialized length
+
+ // The array length is our argc.
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
+
+ // Skip the copy of arguments if there are none.
+ Label noCopy;
+ masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+ {
+ // |tmpArgc| is off-by-one, so adjust the offset accordingly.
+ BaseObjectElementIndex srcPtr(elements, tmpArgc,
+ -int32_t(sizeof(JS::Value)));
+
+ Label loop;
+ masm.bind(&loop);
+ masm.pushValue(srcPtr, scratch);
+ masm.decBranchPtr(Assembler::NonZero, tmpArgc, Imm32(1), &loop);
+ }
+ masm.bind(&noCopy);
+
+ // Set argc in preparation for emitCallInvokeNativeFunction.
+ masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
+ emitPushNativeArguments(apply);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArrayNative* apply) {
+ emitPushArrayAsNativeArguments(apply);
+}
+
+void CodeGenerator::emitPushArguments(LConstructArgsNative* construct) {
+ emitPushNativeArguments(construct);
+}
+
+void CodeGenerator::emitPushArguments(LConstructArrayNative* construct) {
+ emitPushArrayAsNativeArguments(construct);
+}
+
+void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
+ Register argc = ToRegister(apply->getArgc());
+ Register argsObj = ToRegister(apply->getArgsObj());
+ Register tmpArgc = ToRegister(apply->getTempObject());
+ Register scratch = ToRegister(apply->getTempForArgCopy());
+
+ // NB: argc and argsObj are mapped to the same register.
+ MOZ_ASSERT(argc == argsObj);
+
+ // Load argc into tmpArgc.
+ masm.loadArgumentsObjectLength(argsObj, tmpArgc);
+
+ // Push arguments.
+ Label noCopy, epilogue;
+ masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
+ {
+ // Use scratch register to calculate stack space (no padding needed).
+ masm.movePtr(tmpArgc, scratch);
+
+ // Reserve space for copying the arguments.
+ NativeObject::elementsSizeMustNotOverflow();
+ masm.lshiftPtr(Imm32(ValueShift), scratch);
+ masm.subFromStackPtr(scratch);
+
+ // Load arguments data.
+ Register argvSrcBase = argsObj;
+ masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
+ argvSrcBase);
+ size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
+ size_t argvDstOffset = 0;
+
+ // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
+ masm.push(tmpArgc);
+ argvDstOffset += sizeof(void*);
+
+ // Copy the values.
+ emitCopyValuesForApply(argvSrcBase, tmpArgc, scratch, argvSrcOffset,
+ argvDstOffset);
+
+ // Set argc in preparation for emitCallInvokeNativeFunction.
+ masm.pop(argc);
+ masm.jump(&epilogue);
+ }
+ masm.bind(&noCopy);
+ {
+ // Set argc in preparation for emitCallInvokeNativeFunction.
+ masm.movePtr(ImmWord(0), argc);
+ }
+ masm.bind(&epilogue);
+}
+
+template <typename T>
+void CodeGenerator::emitApplyNative(T* apply) {
+ MOZ_ASSERT(apply->mir()->getSingleTarget()->isNativeWithoutJitEntry());
+
+ constexpr bool isConstructing = T::isConstructing();
+ MOZ_ASSERT(isConstructing == apply->mir()->isConstructing(),
+ "isConstructing condition must be consistent");
+
+ // Push newTarget.
+ if constexpr (isConstructing) {
+ masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
+ }
+
+ // Push arguments.
+ emitPushArguments(apply);
+
+ // Push |this|.
+ if constexpr (isConstructing) {
+ masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
+ } else {
+ masm.pushValue(ToValue(apply, T::ThisIndex));
+ }
+
+ // Push callee.
+ masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getFunction()));
+
+ // Call the native function.
+ emitCallInvokeNativeFunction(apply);
+
+ // Pop arguments and continue.
+ emitRestoreStackPointerFromFP();
+}
+
+template <typename T>
+void CodeGenerator::emitApplyArgsGuard(T* apply) {
LSnapshot* snapshot = apply->snapshot();
Register argcreg = ToRegister(apply->getArgc());
// Ensure that we have a reasonable number of arguments.
bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
-
- emitApplyGeneric(apply);
}
-void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
+template <typename T>
+void CodeGenerator::emitApplyArgsObjGuard(T* apply) {
Register argsObj = ToRegister(apply->getArgsObj());
Register temp = ToRegister(apply->getTempObject());
@@ -6851,16 +7041,15 @@ void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
masm.loadArgumentsObjectLength(argsObj, temp, &bail);
masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
bailoutFrom(&bail, apply->snapshot());
-
- emitApplyGeneric(apply);
}
-void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
+template <typename T>
+void CodeGenerator::emitApplyArrayGuard(T* apply) {
LSnapshot* snapshot = apply->snapshot();
+ Register elements = ToRegister(apply->getElements());
Register tmp = ToRegister(apply->getTempObject());
- Address length(ToRegister(apply->getElements()),
- ObjectElements::offsetOfLength());
+ Address length(elements, ObjectElements::offsetOfLength());
masm.load32(length, tmp);
// Ensure that we have a reasonable number of arguments.
@@ -6868,43 +7057,60 @@ void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
// Ensure that the array does not contain an uninitialized tail.
- Address initializedLength(ToRegister(apply->getElements()),
+ Address initializedLength(elements,
ObjectElements::offsetOfInitializedLength());
masm.sub32(initializedLength, tmp);
bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+}
+void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
+ emitApplyArgsGuard(apply);
emitApplyGeneric(apply);
}
-void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
- LSnapshot* snapshot = lir->snapshot();
- Register argcreg = ToRegister(lir->getArgc());
+void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
+ emitApplyArgsObjGuard(apply);
+ emitApplyGeneric(apply);
+}
- // Ensure that we have a reasonable number of arguments.
- bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
+ emitApplyArrayGuard(apply);
+ emitApplyGeneric(apply);
+}
+void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
+ emitApplyArgsGuard(lir);
emitApplyGeneric(lir);
}
void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
- LSnapshot* snapshot = lir->snapshot();
- Register tmp = ToRegister(lir->getTempObject());
+ emitApplyArrayGuard(lir);
+ emitApplyGeneric(lir);
+}
- Address length(ToRegister(lir->getElements()),
- ObjectElements::offsetOfLength());
- masm.load32(length, tmp);
+void CodeGenerator::visitApplyArgsNative(LApplyArgsNative* lir) {
+ emitApplyArgsGuard(lir);
+ emitApplyNative(lir);
+}
- // Ensure that we have a reasonable number of arguments.
- bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
+void CodeGenerator::visitApplyArgsObjNative(LApplyArgsObjNative* lir) {
+ emitApplyArgsObjGuard(lir);
+ emitApplyNative(lir);
+}
- // Ensure that the array does not contain an uninitialized tail.
+void CodeGenerator::visitApplyArrayNative(LApplyArrayNative* lir) {
+ emitApplyArrayGuard(lir);
+ emitApplyNative(lir);
+}
- Address initializedLength(ToRegister(lir->getElements()),
- ObjectElements::offsetOfInitializedLength());
- masm.sub32(initializedLength, tmp);
- bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
+void CodeGenerator::visitConstructArgsNative(LConstructArgsNative* lir) {
+ emitApplyArgsGuard(lir);
+ emitApplyNative(lir);
+}
- emitApplyGeneric(lir);
+void CodeGenerator::visitConstructArrayNative(LConstructArrayNative* lir) {
+ emitApplyArrayGuard(lir);
+ emitApplyNative(lir);
}
void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
@@ -15460,15 +15666,37 @@ void CodeGenerator::validateAndRegisterFuseDependencies(JSContext* cx,
if (!hasSeenObjectEmulateUndefinedFuse.intact()) {
JitSpew(JitSpew_Codegen,
- "tossing compilation; fuse dependency no longer valid\n");
+ "tossing compilation; hasSeenObjectEmulateUndefinedFuse fuse "
+ "dependency no longer valid\n");
*isValid = false;
return;
}
if (!hasSeenObjectEmulateUndefinedFuse.addFuseDependency(cx, script)) {
- JitSpew(
- JitSpew_Codegen,
- "tossing compilation; failed to register script dependency\n");
+ JitSpew(JitSpew_Codegen,
+ "tossing compilation; failed to register "
+ "hasSeenObjectEmulateUndefinedFuse script dependency\n");
+ *isValid = false;
+ return;
+ }
+ break;
+ }
+
+ case FuseDependencyKind::OptimizeGetIteratorFuse: {
+ auto& optimizeGetIteratorFuse =
+ cx->realm()->realmFuses.optimizeGetIteratorFuse;
+ if (!optimizeGetIteratorFuse.intact()) {
+ JitSpew(JitSpew_Codegen,
+ "tossing compilation; optimizeGetIteratorFuse fuse "
+ "dependency no longer valid\n");
+ *isValid = false;
+ return;
+ }
+
+ if (!optimizeGetIteratorFuse.addFuseDependency(cx, script)) {
+ JitSpew(JitSpew_Codegen,
+ "tossing compilation; failed to register "
+ "optimizeGetIteratorFuse script dependency\n");
*isValid = false;
return;
}
@@ -15837,15 +16065,16 @@ void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
void CodeGenerator::visitLoadScriptedProxyHandler(
LLoadScriptedProxyHandler* ins) {
- const Register obj = ToRegister(ins->getOperand(0));
- ValueOperand output = ToOutValue(ins);
+ Register obj = ToRegister(ins->getOperand(0));
+ Register output = ToRegister(ins->output());
- masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
- output.scratchReg());
- masm.loadValue(
- Address(output.scratchReg(), js::detail::ProxyReservedSlots::offsetOfSlot(
- ScriptedProxyHandler::HANDLER_EXTRA)),
- output);
+ masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
+
+ Label bail;
+ Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
+ ScriptedProxyHandler::HANDLER_EXTRA));
+ masm.fallibleUnboxObject(handlerAddr, output, &bail);
+ bailoutFrom(&bail, ins->snapshot());
}
#ifdef JS_PUNBOX64
@@ -19861,9 +20090,17 @@ void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
Register output = ToRegister(lir->output());
masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
- masm.unboxObject(
- Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
- output);
+
+ // Bail for revoked proxies.
+ Label bail;
+ Address targetAddr(output,
+ js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
+ if (lir->mir()->fallible()) {
+ masm.fallibleUnboxObject(targetAddr, output, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+ } else {
+ masm.unboxObject(targetAddr, output);
+ }
}
void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
@@ -20642,9 +20879,20 @@ void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
}
void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
- Register value = ToRegister(lir->value());
- Register output = ToRegister(lir->output());
- masm.truncate32ToWasmI31Ref(value, output);
+ if (lir->value()->isConstant()) {
+ // i31ref are often created with constants. If that's the case we will
+ // do the operation statically here. This is similar to what is done
+ // in masm.truncate32ToWasmI31Ref.
+ Register output = ToRegister(lir->output());
+ uint32_t value =
+ static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
+ uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
+ masm.movePtr(ImmWord(ptr), output);
+ } else {
+ Register value = ToRegister(lir->value());
+ Register output = ToRegister(lir->output());
+ masm.truncate32ToWasmI31Ref(value, output);
+ }
}
void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
index 274c876e4d..282771a79e 100644
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -239,11 +239,34 @@ class CodeGenerator final : public CodeGeneratorSpecific {
uint32_t extraFormals);
void emitPushArrayAsArguments(Register tmpArgc, Register srcBaseAndArgc,
Register scratch, size_t argvSrcOffset);
- void emitPushArguments(LApplyArgsGeneric* apply, Register scratch);
- void emitPushArguments(LApplyArgsObj* apply, Register scratch);
- void emitPushArguments(LApplyArrayGeneric* apply, Register scratch);
- void emitPushArguments(LConstructArgsGeneric* construct, Register scratch);
- void emitPushArguments(LConstructArrayGeneric* construct, Register scratch);
+ void emitPushArguments(LApplyArgsGeneric* apply);
+ void emitPushArguments(LApplyArgsObj* apply);
+ void emitPushArguments(LApplyArrayGeneric* apply);
+ void emitPushArguments(LConstructArgsGeneric* construct);
+ void emitPushArguments(LConstructArrayGeneric* construct);
+
+ template <typename T>
+ void emitApplyNative(T* apply);
+ template <typename T>
+ void emitCallInvokeNativeFunction(T* apply);
+ template <typename T>
+ void emitPushNativeArguments(T* apply);
+ template <typename T>
+ void emitPushArrayAsNativeArguments(T* apply);
+ void emitPushArguments(LApplyArgsNative* apply);
+ void emitPushArguments(LApplyArgsObjNative* apply);
+ void emitPushArguments(LApplyArrayNative* apply);
+ void emitPushArguments(LConstructArgsNative* construct);
+ void emitPushArguments(LConstructArrayNative* construct);
+
+ template <typename T>
+ void emitApplyArgsGuard(T* apply);
+
+ template <typename T>
+ void emitApplyArgsObjGuard(T* apply);
+
+ template <typename T>
+ void emitApplyArrayGuard(T* apply);
template <class GetInlinedArgument>
void emitGetInlinedArgument(GetInlinedArgument* lir, Register index,
@@ -439,6 +462,7 @@ class CodeGenerator final : public CodeGeneratorSpecific {
// be mapped to an actual fuse by validateAndRegisterFuseDependencies.
enum class FuseDependencyKind {
HasSeenObjectEmulateUndefinedFuse,
+ OptimizeGetIteratorFuse,
};
// The set of fuses this code generation depends on.
@@ -449,6 +473,10 @@ class CodeGenerator final : public CodeGeneratorSpecific {
fuseDependencies += FuseDependencyKind::HasSeenObjectEmulateUndefinedFuse;
}
+ void addOptimizeGetIteratorFuseDependency() {
+ fuseDependencies += FuseDependencyKind::OptimizeGetIteratorFuse;
+ }
+
// Called during linking on main-thread: Ensures that the fuses are still
// intact, and registers a script dependency on a specific fuse before
// finishing compilation.
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
index 85008006e1..e209ace846 100644
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -253,6 +253,10 @@ bool JitRuntime::generateTrampolines(JSContext* cx) {
generateIonGenericCallStub(masm, IonGenericCallKind::Construct);
rangeRecorder.recordOffset("Trampoline: IonGenericConstruct");
+ JitSpew(JitSpew_Codegen, "# Emitting trampoline natives");
+ TrampolineNativeJitEntryOffsets nativeOffsets;
+ generateTrampolineNatives(masm, nativeOffsets, rangeRecorder);
+
Linker linker(masm);
trampolineCode_ = linker.newCode(cx, CodeKind::Other);
if (!trampolineCode_) {
@@ -264,6 +268,14 @@ bool JitRuntime::generateTrampolines(JSContext* cx) {
vtune::MarkStub(trampolineCode_, "Trampolines");
#endif
+ // Initialize TrampolineNative JitEntry array.
+ for (size_t i = 0; i < size_t(TrampolineNative::Count); i++) {
+ TrampolineNative native = TrampolineNative(i);
+ uint32_t offset = nativeOffsets[native];
+ MOZ_ASSERT(offset > 0 && offset < trampolineCode_->instructionsSize());
+ trampolineNativeJitEntries_[native] = trampolineCode_->raw() + offset;
+ }
+
return true;
}
@@ -2346,6 +2358,10 @@ static void InvalidateActivation(JS::GCContext* gcx,
JitSpew(JitSpew_IonInvalidate, "#%zu rectifier frame @ %p", frameno,
frame.fp());
break;
+ case FrameType::TrampolineNative:
+ JitSpew(JitSpew_IonInvalidate, "#%zu TrampolineNative frame @ %p",
+ frameno, frame.fp());
+ break;
case FrameType::IonICCall:
JitSpew(JitSpew_IonInvalidate, "#%zu ion IC call frame @ %p", frameno,
frame.fp());
diff --git a/js/src/jit/JSJitFrameIter.cpp b/js/src/jit/JSJitFrameIter.cpp
index 89d3de3128..fbfef8f210 100644
--- a/js/src/jit/JSJitFrameIter.cpp
+++ b/js/src/jit/JSJitFrameIter.cpp
@@ -78,7 +78,7 @@ CalleeToken JSJitFrameIter::calleeToken() const {
}
JSFunction* JSJitFrameIter::callee() const {
- MOZ_ASSERT(isScripted());
+ MOZ_ASSERT(isScripted() || isTrampolineNative());
MOZ_ASSERT(isFunctionFrame());
return CalleeTokenToFunction(calleeToken());
}
@@ -110,7 +110,7 @@ bool JSJitFrameIter::isFunctionFrame() const {
JSScript* JSJitFrameIter::script() const {
MOZ_ASSERT(isScripted());
- JSScript* script = ScriptFromCalleeToken(calleeToken());
+ JSScript* script = MaybeForwardedScriptFromCalleeToken(calleeToken());
MOZ_ASSERT(script);
return script;
}
@@ -383,6 +383,10 @@ void JSJitFrameIter::dump() const {
fprintf(stderr, " Rectifier frame\n");
fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
break;
+ case FrameType::TrampolineNative:
+ fprintf(stderr, " TrampolineNative frame\n");
+ fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
+ break;
case FrameType::IonICCall:
fprintf(stderr, " Ion IC call\n");
fprintf(stderr, " Caller frame ptr: %p\n", current()->callerFramePtr());
@@ -707,47 +711,47 @@ void JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame) {
* |
* ^--- WasmToJSJit <---- (other wasm frames, not handled by this iterator)
* |
- * ^--- Arguments Rectifier
- * | ^
- * | |
- * | ^--- Ion
- * | |
- * | ^--- Baseline Stub <---- Baseline
- * | |
- * | ^--- WasmToJSJit <--- (other wasm frames)
- * | |
- * | ^--- Entry Frame (CppToJSJit)
+ * ^--- Entry Frame (BaselineInterpreter) (unwrapped)
* |
- * ^--- Entry Frame (CppToJSJit)
+ * ^--- Arguments Rectifier (unwrapped)
+ * |
+ * ^--- Trampoline Native (unwrapped)
* |
- * ^--- Entry Frame (BaselineInterpreter)
- * | ^
- * | |
- * | ^--- Ion
- * | |
- * | ^--- Baseline Stub <---- Baseline
- * | |
- * | ^--- WasmToJSJit <--- (other wasm frames)
- * | |
- * | ^--- Entry Frame (CppToJSJit)
- * | |
- * | ^--- Arguments Rectifier
+ * ^--- Entry Frame (CppToJSJit)
*
* NOTE: Keep this in sync with JitRuntime::generateProfilerExitFrameTailStub!
*/
- // Unwrap baseline interpreter entry frame.
- if (frame->prevType() == FrameType::BaselineInterpreterEntry) {
- frame = GetPreviousRawFrame<BaselineInterpreterEntryFrameLayout*>(frame);
- }
+ while (true) {
+ // Unwrap baseline interpreter entry frame.
+ if (frame->prevType() == FrameType::BaselineInterpreterEntry) {
+ frame = GetPreviousRawFrame<BaselineInterpreterEntryFrameLayout*>(frame);
+ continue;
+ }
+
+ // Unwrap rectifier frames.
+ if (frame->prevType() == FrameType::Rectifier) {
+ frame = GetPreviousRawFrame<RectifierFrameLayout*>(frame);
+ MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
+ frame->prevType() == FrameType::BaselineStub ||
+ frame->prevType() == FrameType::TrampolineNative ||
+ frame->prevType() == FrameType::WasmToJSJit ||
+ frame->prevType() == FrameType::CppToJSJit);
+ continue;
+ }
- // Unwrap rectifier frames.
- if (frame->prevType() == FrameType::Rectifier) {
- frame = GetPreviousRawFrame<RectifierFrameLayout*>(frame);
- MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
- frame->prevType() == FrameType::BaselineStub ||
- frame->prevType() == FrameType::WasmToJSJit ||
- frame->prevType() == FrameType::CppToJSJit);
+ // Unwrap TrampolineNative frames.
+ if (frame->prevType() == FrameType::TrampolineNative) {
+ frame = GetPreviousRawFrame<TrampolineNativeFrameLayout*>(frame);
+ MOZ_ASSERT(frame->prevType() == FrameType::IonJS ||
+ frame->prevType() == FrameType::BaselineStub ||
+ frame->prevType() == FrameType::Rectifier ||
+ frame->prevType() == FrameType::WasmToJSJit ||
+ frame->prevType() == FrameType::CppToJSJit);
+ continue;
+ }
+
+ break;
}
FrameType prevType = frame->prevType();
@@ -773,24 +777,31 @@ void JSJitProfilingFrameIterator::moveToNextFrame(CommonFrameLayout* frame) {
}
case FrameType::WasmToJSJit:
- // No previous js jit frame, this is a transition frame, used to
- // pass a wasm iterator the correct value of FP.
+ // No previous JS JIT frame. Set fp_ to nullptr to indicate the
+ // JSJitProfilingFrameIterator is done(). Also set wasmCallerFP_ so that
+ // the caller can pass it to a Wasm frame iterator.
resumePCinCurrentFrame_ = nullptr;
- fp_ = GetPreviousRawFrame<uint8_t*>(frame);
+ fp_ = nullptr;
type_ = FrameType::WasmToJSJit;
- MOZ_ASSERT(!done());
+ MOZ_ASSERT(!wasmCallerFP_);
+ wasmCallerFP_ = GetPreviousRawFrame<uint8_t*>(frame);
+ MOZ_ASSERT(wasmCallerFP_);
+ MOZ_ASSERT(done());
return;
case FrameType::CppToJSJit:
- // No previous frame, set to nullptr to indicate that
+ // No previous JS JIT frame. Set fp_ to nullptr to indicate the
// JSJitProfilingFrameIterator is done().
resumePCinCurrentFrame_ = nullptr;
fp_ = nullptr;
type_ = FrameType::CppToJSJit;
+ MOZ_ASSERT(!wasmCallerFP_);
+ MOZ_ASSERT(done());
return;
case FrameType::BaselineInterpreterEntry:
case FrameType::Rectifier:
+ case FrameType::TrampolineNative:
case FrameType::Exit:
case FrameType::Bailout:
case FrameType::JSJitToWasm:
diff --git a/js/src/jit/JSJitFrameIter.h b/js/src/jit/JSJitFrameIter.h
index d40a533a20..03fd06852e 100644
--- a/js/src/jit/JSJitFrameIter.h
+++ b/js/src/jit/JSJitFrameIter.h
@@ -73,6 +73,10 @@ enum class FrameType {
// wasm, and is a special kind of exit frame that doesn't have the exit
// footer. From the point of view of the jit, it can be skipped as an exit.
JSJitToWasm,
+
+ // Frame for a TrampolineNative, a JS builtin implemented with a JIT
+ // trampoline. See jit/TrampolineNatives.h.
+ TrampolineNative,
};
enum class ReadFrameArgsBehavior {
@@ -173,6 +177,9 @@ class JSJitFrameIter {
return type_ == FrameType::BaselineInterpreterEntry;
}
bool isRectifier() const { return type_ == FrameType::Rectifier; }
+ bool isTrampolineNative() const {
+ return type_ == FrameType::TrampolineNative;
+ }
bool isBareExit() const;
bool isUnwoundJitExit() const;
template <typename T>
@@ -263,6 +270,7 @@ class JitcodeGlobalTable;
class JSJitProfilingFrameIterator {
uint8_t* fp_;
+ uint8_t* wasmCallerFP_ = nullptr;
// See JS::ProfilingFrameIterator::endStackAddress_ comment.
void* endStackAddress_ = nullptr;
FrameType type_;
@@ -290,6 +298,11 @@ class JSJitProfilingFrameIterator {
MOZ_ASSERT(!done());
return fp_;
}
+ void* wasmCallerFP() const {
+ MOZ_ASSERT(done());
+ MOZ_ASSERT(bool(wasmCallerFP_) == (type_ == FrameType::WasmToJSJit));
+ return wasmCallerFP_;
+ }
inline JitFrameLayout* framePtr() const;
void* stackAddress() const { return fp(); }
FrameType frameType() const {
@@ -491,6 +504,42 @@ class SnapshotIterator {
Value read() { return allocationValue(readAllocation()); }
+ int32_t readInt32() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isInt32());
+ return val.toInt32();
+ }
+
+ double readNumber() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isNumber());
+ return val.toNumber();
+ }
+
+ JSString* readString() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isString());
+ return val.toString();
+ }
+
+ JS::BigInt* readBigInt() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isBigInt());
+ return val.toBigInt();
+ }
+
+ JSObject* readObject() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isObject());
+ return &val.toObject();
+ }
+
+ JS::GCCellPtr readGCCellPtr() {
+ Value val = read();
+ MOZ_RELEASE_ASSERT(val.isGCThing());
+ return val.toGCCellPtr();
+ }
+
// Read the |Normal| value unless it is not available and that the snapshot
// provides a |Default| value. This is useful to avoid invalidations of the
// frame while we are only interested in a few properties which are provided
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
index 176b988e05..45ac1f5def 100644
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -83,6 +83,27 @@ static uint32_t NumArgAndLocalSlots(const InlineFrameIterator& frame) {
return CountArgSlots(script, frame.maybeCalleeTemplate()) + script->nfixed();
}
+static TrampolineNative TrampolineNativeForFrame(
+ JSRuntime* rt, TrampolineNativeFrameLayout* layout) {
+ JSFunction* nativeFun = CalleeTokenToFunction(layout->calleeToken());
+ MOZ_ASSERT(nativeFun->isBuiltinNative());
+ void** jitEntry = nativeFun->nativeJitEntry();
+ return rt->jitRuntime()->trampolineNativeForJitEntry(jitEntry);
+}
+
+static void UnwindTrampolineNativeFrame(JSRuntime* rt,
+ const JSJitFrameIter& frame) {
+ auto* layout = (TrampolineNativeFrameLayout*)frame.fp();
+ TrampolineNative native = TrampolineNativeForFrame(rt, layout);
+ switch (native) {
+ case TrampolineNative::ArraySort:
+ layout->getFrameData<ArraySortData>()->freeMallocData();
+ break;
+ case TrampolineNative::Count:
+ MOZ_CRASH("Invalid value");
+ }
+}
+
static void CloseLiveIteratorIon(JSContext* cx,
const InlineFrameIterator& frame,
const TryNote* tn) {
@@ -739,7 +760,7 @@ void HandleException(ResumeFromException* rfe) {
// JIT code can enter same-compartment realms, so reset cx->realm to
// this frame's realm.
- if (frame.isScripted()) {
+ if (frame.isScripted() || frame.isTrampolineNative()) {
cx->setRealmForJitExceptionHandler(iter.realm());
}
@@ -809,6 +830,8 @@ void HandleException(ResumeFromException* rfe) {
if (rfe->kind == ExceptionResumeKind::ForcedReturnBaseline) {
return;
}
+ } else if (frame.isTrampolineNative()) {
+ UnwindTrampolineNativeFrame(cx->runtime(), frame);
}
prevJitFrame = frame.current();
@@ -910,12 +933,15 @@ static inline uintptr_t ReadAllocation(const JSJitFrameIter& frame,
static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
JitFrameLayout* layout) {
- // Trace |this| and any extra actual arguments for an Ion frame. Tracing
- // of formal arguments is taken care of by the frame's safepoint/snapshot,
- // except when the script might have lazy arguments or rest, in which case
- // we trace them as well. We also have to trace formals if we have a
- // LazyLink frame or an InterpreterStub frame or a special JSJit to wasm
- // frame (since wasm doesn't use snapshots).
+ // Trace |this| and the actual and formal arguments of a JIT frame.
+ //
+ // Tracing of formal arguments of an Ion frame is taken care of by the frame's
+ // safepoint/snapshot. We skip tracing formal arguments if the script doesn't
+ // use |arguments| or rest, because the register allocator can spill values to
+ // argument slots in this case.
+ //
+ // For other frames such as LazyLink frames or InterpreterStub frames, we
+ // always trace all actual and formal arguments.
if (!CalleeTokenIsFunction(layout->calleeToken())) {
return;
@@ -927,8 +953,7 @@ static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
size_t numArgs = std::max(layout->numActualArgs(), numFormals);
size_t firstArg = 0;
- if (frame.type() != FrameType::JSJitToWasm &&
- !frame.isExitFrameLayout<CalledFromJitExitFrameLayout>() &&
+ if (frame.isIonScripted() &&
!fun->nonLazyScript()->mayReadFrameArgsDirectly()) {
firstArg = numFormals;
}
@@ -936,17 +961,17 @@ static void TraceThisAndArguments(JSTracer* trc, const JSJitFrameIter& frame,
Value* argv = layout->thisAndActualArgs();
// Trace |this|.
- TraceRoot(trc, argv, "ion-thisv");
+ TraceRoot(trc, argv, "jit-thisv");
// Trace arguments. Note + 1 for thisv.
for (size_t i = firstArg; i < numArgs; i++) {
- TraceRoot(trc, &argv[i + 1], "ion-argv");
+ TraceRoot(trc, &argv[i + 1], "jit-argv");
}
// Always trace the new.target from the frame. It's not in the snapshots.
// +1 to pass |this|
if (CalleeTokenIsConstructing(layout->calleeToken())) {
- TraceRoot(trc, &argv[1 + numArgs], "ion-newTarget");
+ TraceRoot(trc, &argv[1 + numArgs], "jit-newTarget");
}
}
@@ -1397,6 +1422,22 @@ static void TraceJSJitToWasmFrame(JSTracer* trc, const JSJitFrameIter& frame) {
TraceThisAndArguments(trc, frame, layout);
}
+static void TraceTrampolineNativeFrame(JSTracer* trc,
+ const JSJitFrameIter& frame) {
+ auto* layout = (TrampolineNativeFrameLayout*)frame.fp();
+ layout->replaceCalleeToken(TraceCalleeToken(trc, layout->calleeToken()));
+ TraceThisAndArguments(trc, frame, layout);
+
+ TrampolineNative native = TrampolineNativeForFrame(trc->runtime(), layout);
+ switch (native) {
+ case TrampolineNative::ArraySort:
+ layout->getFrameData<ArraySortData>()->trace(trc);
+ break;
+ case TrampolineNative::Count:
+ MOZ_CRASH("Invalid value");
+ }
+}
+
static void TraceJitActivation(JSTracer* trc, JitActivation* activation) {
#ifdef CHECK_OSIPOINT_REGISTERS
if (JitOptions.checkOsiPointRegisters) {
@@ -1439,6 +1480,9 @@ static void TraceJitActivation(JSTracer* trc, JitActivation* activation) {
case FrameType::Rectifier:
TraceRectifierFrame(trc, jitFrame);
break;
+ case FrameType::TrampolineNative:
+ TraceTrampolineNativeFrame(trc, jitFrame);
+ break;
case FrameType::IonICCall:
TraceIonICCallFrame(trc, jitFrame);
break;
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
index ab882e7986..47c176492b 100644
--- a/js/src/jit/JitFrames.h
+++ b/js/src/jit/JitFrames.h
@@ -299,6 +299,17 @@ class RectifierFrameLayout : public JitFrameLayout {
static inline size_t Size() { return sizeof(RectifierFrameLayout); }
};
+class TrampolineNativeFrameLayout : public JitFrameLayout {
+ public:
+ static inline size_t Size() { return sizeof(TrampolineNativeFrameLayout); }
+
+ template <typename T>
+ T* getFrameData() {
+ uint8_t* raw = reinterpret_cast<uint8_t*>(this) - sizeof(T);
+ return reinterpret_cast<T*>(raw);
+ }
+};
+
class WasmToJSJitFrameLayout : public JitFrameLayout {
public:
static inline size_t Size() { return sizeof(WasmToJSJitFrameLayout); }
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
index e9d389cf60..053cf868a7 100644
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -447,7 +447,8 @@ void DefaultJitOptions::resetNormalIonWarmUpThreshold() {
void DefaultJitOptions::maybeSetWriteProtectCode(bool val) {
#ifdef JS_USE_APPLE_FAST_WX
- // On Apple Silicon we always use pthread_jit_write_protect_np.
+ // On Apple Silicon we always use pthread_jit_write_protect_np, or
+ // be_memory_inline_jit_restrict_*.
MOZ_ASSERT(!writeProtectCode);
#else
writeProtectCode = val;
diff --git a/js/src/jit/JitRuntime.h b/js/src/jit/JitRuntime.h
index 7d038ed0e2..383efca437 100644
--- a/js/src/jit/JitRuntime.h
+++ b/js/src/jit/JitRuntime.h
@@ -27,6 +27,7 @@
#include "jit/JitCode.h"
#include "jit/JitHints.h"
#include "jit/shared/Assembler-shared.h"
+#include "jit/TrampolineNatives.h"
#include "js/AllocPolicy.h"
#include "js/ProfilingFrameIterator.h"
#include "js/TypeDecls.h"
@@ -234,6 +235,13 @@ class JitRuntime {
MainThreadData<IonCompileTaskList> ionLazyLinkList_;
MainThreadData<size_t> ionLazyLinkListSize_{0};
+ // Pointer to trampoline code for each TrampolineNative. The JSFunction has
+ // a JitEntry pointer that points to an item in this array.
+ using TrampolineNativeJitEntryArray =
+ mozilla::EnumeratedArray<TrampolineNative, void*,
+ size_t(TrampolineNative::Count)>;
+ TrampolineNativeJitEntryArray trampolineNativeJitEntries_{};
+
#ifdef DEBUG
// Flag that can be set from JIT code to indicate it's invalid to call
// arbitrary JS code in a particular region. This is checked in RunScript.
@@ -293,6 +301,14 @@ class JitRuntime {
void generateBaselineInterpreterEntryTrampoline(MacroAssembler& masm);
void generateInterpreterEntryTrampoline(MacroAssembler& masm);
+ using TrampolineNativeJitEntryOffsets =
+ mozilla::EnumeratedArray<TrampolineNative, uint32_t,
+ size_t(TrampolineNative::Count)>;
+ void generateTrampolineNatives(MacroAssembler& masm,
+ TrampolineNativeJitEntryOffsets& offsets,
+ PerfSpewerRangeRecorder& rangeRecorder);
+ uint32_t generateArraySortTrampoline(MacroAssembler& masm);
+
void bindLabelToOffset(Label* label, uint32_t offset) {
MOZ_ASSERT(!trampolineCode_);
label->bind(offset);
@@ -418,6 +434,20 @@ class JitRuntime {
return trampolineCode(ionGenericCallStubOffset_[kind]);
}
+ void** trampolineNativeJitEntry(TrampolineNative native) {
+ void** jitEntry = &trampolineNativeJitEntries_[native];
+ MOZ_ASSERT(*jitEntry >= trampolineCode_->raw());
+ MOZ_ASSERT(*jitEntry <
+ trampolineCode_->raw() + trampolineCode_->instructionsSize());
+ return jitEntry;
+ }
+ TrampolineNative trampolineNativeForJitEntry(void** entry) {
+ MOZ_RELEASE_ASSERT(entry >= trampolineNativeJitEntries_.begin());
+ size_t index = entry - trampolineNativeJitEntries_.begin();
+ MOZ_RELEASE_ASSERT(index < size_t(TrampolineNative::Count));
+ return TrampolineNative(index);
+ }
+
bool hasJitcodeGlobalTable() const { return jitcodeGlobalTable_ != nullptr; }
JitcodeGlobalTable* getJitcodeGlobalTable() {
diff --git a/js/src/jit/LIROps.yaml b/js/src/jit/LIROps.yaml
index f13c4b0745..880e756f74 100644
--- a/js/src/jit/LIROps.yaml
+++ b/js/src/jit/LIROps.yaml
@@ -632,6 +632,21 @@
- name: ConstructArrayGeneric
gen_boilerplate: false
+- name: ApplyArgsNative
+ gen_boilerplate: false
+
+- name: ApplyArgsObjNative
+ gen_boilerplate: false
+
+- name: ApplyArrayNative
+ gen_boilerplate: false
+
+- name: ConstructArgsNative
+ gen_boilerplate: false
+
+- name: ConstructArrayNative
+ gen_boilerplate: false
+
- name: TestIAndBranch
gen_boilerplate: false
@@ -2189,7 +2204,7 @@
mir_op: ClampToUint8
- name: LoadScriptedProxyHandler
- result_type: BoxedValue
+ result_type: WordSized
operands:
object: WordSized
mir_op: true
@@ -3694,6 +3709,7 @@
result_type: WordSized
operands:
object: WordSized
+ mir_op: true
- name: GuardHasGetterSetter
operands:
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index b0007a114d..f7b898f240 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -654,12 +654,23 @@ void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- LApplyArgsGeneric* lir = new (alloc()) LApplyArgsGeneric(
- useFixedAtStart(apply->getFunction(), CallTempReg3),
- useFixedAtStart(apply->getArgc(), CallTempReg0),
- useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg1), // object register
- tempFixed(CallTempReg2)); // stack counter register
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+ auto argc = useFixedAtStart(apply->getArgc(), CallTempReg0);
+ auto thisValue =
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
+ auto tempObj = tempFixed(CallTempReg1); // object register
+ auto tempCopy = tempFixed(CallTempReg2); // copy register
+
+ auto* target = apply->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ lir = new (alloc())
+ LApplyArgsNative(function, argc, thisValue, tempObj, tempCopy);
+ } else {
+ lir = new (alloc())
+ LApplyArgsGeneric(function, argc, thisValue, tempObj, tempCopy);
+ }
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, apply->bailoutKind());
@@ -675,12 +686,23 @@ void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- LApplyArgsObj* lir = new (alloc()) LApplyArgsObj(
- useFixedAtStart(apply->getFunction(), CallTempReg3),
- useFixedAtStart(apply->getArgsObj(), CallTempReg0),
- useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg1), // object register
- tempFixed(CallTempReg2)); // stack counter register
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+ auto argsObj = useFixedAtStart(apply->getArgsObj(), CallTempReg0);
+ auto thisValue =
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
+ auto tempObj = tempFixed(CallTempReg1); // object register
+ auto tempCopy = tempFixed(CallTempReg2); // copy register
+
+ auto* target = apply->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ lir = new (alloc())
+ LApplyArgsObjNative(function, argsObj, thisValue, tempObj, tempCopy);
+ } else {
+ lir = new (alloc())
+ LApplyArgsObj(function, argsObj, thisValue, tempObj, tempCopy);
+ }
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, apply->bailoutKind());
@@ -696,12 +718,23 @@ void LIRGenerator::visitApplyArray(MApplyArray* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- LApplyArrayGeneric* lir = new (alloc()) LApplyArrayGeneric(
- useFixedAtStart(apply->getFunction(), CallTempReg3),
- useFixedAtStart(apply->getElements(), CallTempReg0),
- useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg1), // object register
- tempFixed(CallTempReg2)); // stack counter register
+ auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
+ auto elements = useFixedAtStart(apply->getElements(), CallTempReg0);
+ auto thisValue =
+ useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
+ auto tempObj = tempFixed(CallTempReg1); // object register
+ auto tempCopy = tempFixed(CallTempReg2); // copy register
+
+ auto* target = apply->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ lir = new (alloc())
+ LApplyArrayNative(function, elements, thisValue, tempObj, tempCopy);
+ } else {
+ lir = new (alloc())
+ LApplyArrayGeneric(function, elements, thisValue, tempObj, tempCopy);
+ }
// Bailout is needed in the case of too many values in the array, or empty
// space at the end of the array.
@@ -721,12 +754,26 @@ void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto* lir = new (alloc()) LConstructArgsGeneric(
- useFixedAtStart(mir->getFunction(), CallTempReg3),
- useFixedAtStart(mir->getArgc(), CallTempReg0),
- useFixedAtStart(mir->getNewTarget(), CallTempReg1),
- useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg2));
+ auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
+ auto argc = useFixedAtStart(mir->getArgc(), CallTempReg0);
+ auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
+ auto temp = tempFixed(CallTempReg2);
+
+ auto* target = mir->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ auto temp2 = tempFixed(CallTempReg4);
+
+ lir = new (alloc())
+ LConstructArgsNative(function, argc, newTarget, temp, temp2);
+ } else {
+ auto thisValue =
+ useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
+
+ lir = new (alloc())
+ LConstructArgsGeneric(function, argc, newTarget, thisValue, temp);
+ }
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, mir->bailoutKind());
@@ -745,12 +792,26 @@ void LIRGenerator::visitConstructArray(MConstructArray* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
- auto* lir = new (alloc()) LConstructArrayGeneric(
- useFixedAtStart(mir->getFunction(), CallTempReg3),
- useFixedAtStart(mir->getElements(), CallTempReg0),
- useFixedAtStart(mir->getNewTarget(), CallTempReg1),
- useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
- tempFixed(CallTempReg2));
+ auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
+ auto elements = useFixedAtStart(mir->getElements(), CallTempReg0);
+ auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
+ auto temp = tempFixed(CallTempReg2);
+
+ auto* target = mir->getSingleTarget();
+
+ LInstruction* lir;
+ if (target && target->isNativeWithoutJitEntry()) {
+ auto temp2 = tempFixed(CallTempReg4);
+
+ lir = new (alloc())
+ LConstructArrayNative(function, elements, newTarget, temp, temp2);
+ } else {
+ auto thisValue =
+ useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
+
+ lir = new (alloc())
+ LConstructArrayGeneric(function, elements, newTarget, thisValue, temp);
+ }
// Bailout is needed in the case of too many values in the array, or empty
// space at the end of the array.
@@ -3241,7 +3302,9 @@ void LIRGenerator::visitWasmAnyRefFromJSString(MWasmAnyRefFromJSString* ins) {
}
void LIRGenerator::visitWasmNewI31Ref(MWasmNewI31Ref* ins) {
- LWasmNewI31Ref* lir = new (alloc()) LWasmNewI31Ref(useRegister(ins->input()));
+ // If it's a constant, it will be put directly into the register.
+ LWasmNewI31Ref* lir =
+ new (alloc()) LWasmNewI31Ref(useRegisterOrConstant(ins->input()));
define(lir, ins);
}
@@ -4686,7 +4749,8 @@ void LIRGenerator::visitLoadScriptedProxyHandler(
MLoadScriptedProxyHandler* ins) {
LLoadScriptedProxyHandler* lir = new (alloc())
LLoadScriptedProxyHandler(useRegisterAtStart(ins->object()));
- defineBox(lir, ins);
+ assignSnapshot(lir, ins->bailoutKind());
+ define(lir, ins);
}
void LIRGenerator::visitIdToStringOrSymbol(MIdToStringOrSymbol* ins) {
@@ -6750,7 +6814,11 @@ void LIRGenerator::visitLoadWrapperTarget(MLoadWrapperTarget* ins) {
MDefinition* object = ins->object();
MOZ_ASSERT(object->type() == MIRType::Object);
- define(new (alloc()) LLoadWrapperTarget(useRegisterAtStart(object)), ins);
+ auto* lir = new (alloc()) LLoadWrapperTarget(useRegisterAtStart(object));
+ if (ins->fallible()) {
+ assignSnapshot(lir, ins->bailoutKind());
+ }
+ define(lir, ins);
}
void LIRGenerator::visitGuardHasGetterSetter(MGuardHasGetterSetter* ins) {
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index c6daecb166..a74406567b 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -689,7 +689,62 @@ MDefinition* MTest::foldsNeedlessControlFlow(TempAllocator& alloc) {
return MGoto::New(alloc, ifTrue());
}
+// If a test is dominated by either the true or false path of a previous test of
+// the same condition, then the test is redundant and can be converted into a
+// goto true or goto false, respectively.
+MDefinition* MTest::foldsRedundantTest(TempAllocator& alloc) {
+ MBasicBlock* myBlock = this->block();
+ MDefinition* originalInput = getOperand(0);
+
+ // Handle single and double negatives. This ensures that we do not miss a
+ // folding opportunity due to a condition being inverted.
+ MDefinition* newInput = input();
+ bool inverted = false;
+ if (originalInput->isNot()) {
+ newInput = originalInput->toNot()->input();
+ inverted = true;
+ if (originalInput->toNot()->input()->isNot()) {
+ newInput = originalInput->toNot()->input()->toNot()->input();
+ inverted = false;
+ }
+ }
+
+ // The specific order of traversal does not matter. If there are multiple
+ // dominating redundant tests, they will either agree on direction (in which
+ // case we will prune the same way regardless of order), or they will
+ // disagree, in which case we will eventually be marked entirely dead by the
+ // folding of the redundant parent.
+ for (MUseIterator i(newInput->usesBegin()), e(newInput->usesEnd()); i != e;
+ ++i) {
+ if (!i->consumer()->isDefinition()) {
+ continue;
+ }
+ if (!i->consumer()->toDefinition()->isTest()) {
+ continue;
+ }
+ MTest* otherTest = i->consumer()->toDefinition()->toTest();
+ if (otherTest == this) {
+ continue;
+ }
+
+ if (otherTest->ifFalse()->dominates(myBlock)) {
+ // This test cannot be true, so fold to a goto false.
+ return MGoto::New(alloc, inverted ? ifTrue() : ifFalse());
+ }
+ if (otherTest->ifTrue()->dominates(myBlock)) {
+ // This test cannot be false, so fold to a goto true.
+ return MGoto::New(alloc, inverted ? ifFalse() : ifTrue());
+ }
+ }
+
+ return nullptr;
+}
+
MDefinition* MTest::foldsTo(TempAllocator& alloc) {
+ if (MDefinition* def = foldsRedundantTest(alloc)) {
+ return def;
+ }
+
if (MDefinition* def = foldsDoubleNegation(alloc)) {
return def;
}
@@ -7187,6 +7242,16 @@ AliasSet MLoadWrapperTarget::getAliasSet() const {
return AliasSet::Load(AliasSet::Any);
}
+bool MLoadWrapperTarget::congruentTo(const MDefinition* ins) const {
+ if (!ins->isLoadWrapperTarget()) {
+ return false;
+ }
+ if (ins->toLoadWrapperTarget()->fallible() != fallible()) {
+ return false;
+ }
+ return congruentIfOperandsEqual(ins);
+}
+
AliasSet MGuardHasGetterSetter::getAliasSet() const {
return AliasSet::Load(AliasSet::ObjectFields);
}
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index d882665a65..c672092f04 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -1890,6 +1890,7 @@ class MTest : public MAryControlInstruction<1, 2>, public TestPolicy::Data {
MDefinition* foldsConstant(TempAllocator& alloc);
MDefinition* foldsTypes(TempAllocator& alloc);
MDefinition* foldsNeedlessControlFlow(TempAllocator& alloc);
+ MDefinition* foldsRedundantTest(TempAllocator& alloc);
MDefinition* foldsTo(TempAllocator& alloc) override;
#ifdef DEBUG
@@ -2309,7 +2310,7 @@ class WrappedFunction : public TempObject {
return nativeFun_->nativeUnchecked();
}
bool hasJitInfo() const {
- return flags_.isBuiltinNative() && nativeFun_->jitInfoUnchecked();
+ return flags_.canHaveJitInfo() && nativeFun_->jitInfoUnchecked();
}
const JSJitInfo* jitInfo() const {
MOZ_ASSERT(hasJitInfo());
diff --git a/js/src/jit/MIROps.yaml b/js/src/jit/MIROps.yaml
index 7f0df52742..78ab989221 100644
--- a/js/src/jit/MIROps.yaml
+++ b/js/src/jit/MIROps.yaml
@@ -539,7 +539,8 @@
- name: LoadScriptedProxyHandler
operands:
object: Object
- result_type: Value
+ result_type: Object
+ guard: true
congruent_to: if_operands_equal
alias_set: none
@@ -1421,8 +1422,6 @@
index: Int32
type_policy: none
alias_set: custom
- # By default no, unless built as a recovered instruction.
- can_recover: custom
# Load the function length. Bails for functions with lazy scripts or a
# resolved "length" property.
@@ -2810,13 +2809,16 @@
alias_set: none
# Load the target object from a proxy wrapper. The target is stored in the
-# proxy object's private slot.
+# proxy object's private slot. This operation is fallible if the proxy can
+# be revoked.
- name: LoadWrapperTarget
operands:
object: Object
+ arguments:
+ fallible: bool
result_type: Object
movable: true
- congruent_to: if_operands_equal
+ congruent_to: custom
# Can't use |AliasSet::None| because the target changes on navigation.
# TODO: Investigate using a narrower or a custom alias set.
alias_set: custom
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 3b094d49dc..9fc4b96830 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3169,6 +3169,8 @@ void MacroAssembler::emitMegamorphicCachedSetSlot(
passABIArg(scratch2);
callWithABI<Fn, NativeObject::growSlotsPure>();
storeCallPointerResult(scratch2);
+
+ MOZ_ASSERT(!save.has(scratch2));
PopRegsInMask(save);
branchIfFalseBool(scratch2, &cacheMiss);
@@ -7803,6 +7805,24 @@ void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output,
rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
}
+void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output) {
+ // Get initial length value.
+ unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
+ output);
+
+#ifdef DEBUG
+ // Assert length hasn't been overridden.
+ Label ok;
+ branchTest32(Assembler::Zero, output,
+ Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT), &ok);
+ assumeUnreachable("arguments object length has been overridden");
+ bind(&ok);
+#endif
+
+ // Shift out arguments length and return it.
+ rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
+}
+
void MacroAssembler::branchTestArgumentsObjectFlags(Register obj, Register temp,
uint32_t flags,
Condition cond,
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 361de3ac5f..114aaa47d7 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -5291,6 +5291,7 @@ class MacroAssembler : public MacroAssemblerSpecific {
Label* fail);
void loadArgumentsObjectLength(Register obj, Register output, Label* fail);
+ void loadArgumentsObjectLength(Register obj, Register output);
void branchTestArgumentsObjectFlags(Register obj, Register temp,
uint32_t flags, Condition cond,
diff --git a/js/src/jit/PerfSpewer.cpp b/js/src/jit/PerfSpewer.cpp
index 81954f3d92..c9d9cc8d88 100644
--- a/js/src/jit/PerfSpewer.cpp
+++ b/js/src/jit/PerfSpewer.cpp
@@ -23,7 +23,7 @@
# define gettid() static_cast<pid_t>(syscall(__NR_gettid))
#endif
-#if defined(JS_ION_PERF) && (defined(ANDROID) || defined(XP_MACOSX))
+#if defined(JS_ION_PERF) && (defined(ANDROID) || defined(XP_DARWIN))
# include <limits.h>
# include <stdlib.h>
# include <unistd.h>
@@ -42,7 +42,7 @@ char* get_current_dir_name() {
}
#endif
-#if defined(JS_ION_PERF) && defined(XP_MACOSX)
+#if defined(JS_ION_PERF) && defined(XP_DARWIN)
# include <pthread.h>
# include <unistd.h>
@@ -128,7 +128,7 @@ static uint64_t GetMonotonicTimestamp() {
return TimeStamp::Now().RawClockMonotonicNanosecondsSinceBoot();
# elif XP_WIN
return TimeStamp::Now().RawQueryPerformanceCounterValue().value();
-# elif XP_MACOSX
+# elif XP_DARWIN
return TimeStamp::Now().RawMachAbsoluteTimeNanoseconds();
# else
MOZ_CRASH("no timestamp");
diff --git a/js/src/jit/ProcessExecutableMemory.cpp b/js/src/jit/ProcessExecutableMemory.cpp
index 830d15f7fb..0c00b17c73 100644
--- a/js/src/jit/ProcessExecutableMemory.cpp
+++ b/js/src/jit/ProcessExecutableMemory.cpp
@@ -46,6 +46,10 @@
# include <valgrind/valgrind.h>
#endif
+#if defined(XP_IOS)
+# include <BrowserEngineCore/BEMemory.h>
+#endif
+
using namespace js;
using namespace js::jit;
@@ -990,11 +994,19 @@ bool js::jit::ReprotectRegion(void* start, size_t size,
#ifdef JS_USE_APPLE_FAST_WX
void js::jit::AutoMarkJitCodeWritableForThread::markExecutable(
bool executable) {
+# if defined(XP_IOS)
+ if (executable) {
+ be_memory_inline_jit_restrict_rwx_to_rx_with_witness();
+ } else {
+ be_memory_inline_jit_restrict_rwx_to_rw_with_witness();
+ }
+# else
if (__builtin_available(macOS 11.0, *)) {
pthread_jit_write_protect_np(executable);
} else {
MOZ_CRASH("pthread_jit_write_protect_np must be available");
}
+# endif
}
#endif
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
index 220ffe7bb2..4c1ff56436 100644
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -6,6 +6,8 @@
#include "jit/Recover.h"
+#include "mozilla/Casting.h"
+
#include "jsmath.h"
#include "builtin/Object.h"
@@ -495,16 +497,15 @@ bool MBigIntAdd::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntAdd::RBigIntAdd(CompactBufferReader& reader) {}
bool RBigIntAdd::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::AddValues(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::add(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -517,16 +518,15 @@ bool MBigIntSub::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntSub::RBigIntSub(CompactBufferReader& reader) {}
bool RBigIntSub::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::SubValues(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::sub(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -539,16 +539,15 @@ bool MBigIntMul::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntMul::RBigIntMul(CompactBufferReader& reader) {}
bool RBigIntMul::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::MulValues(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::mul(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -561,18 +560,17 @@ bool MBigIntDiv::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntDiv::RBigIntDiv(CompactBufferReader& reader) {}
bool RBigIntDiv::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
-
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- MOZ_ASSERT(!rhs.toBigInt()->isZero(),
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
+ MOZ_ASSERT(!rhs->isZero(),
"division by zero throws and therefore can't be recovered");
- if (!js::DivValues(cx, &lhs, &rhs, &result)) {
+
+ BigInt* result = BigInt::div(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -585,18 +583,17 @@ bool MBigIntMod::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntMod::RBigIntMod(CompactBufferReader& reader) {}
bool RBigIntMod::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
-
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- MOZ_ASSERT(!rhs.toBigInt()->isZero(),
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
+ MOZ_ASSERT(!rhs->isZero(),
"division by zero throws and therefore can't be recovered");
- if (!js::ModValues(cx, &lhs, &rhs, &result)) {
+
+ BigInt* result = BigInt::mod(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -609,18 +606,17 @@ bool MBigIntPow::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntPow::RBigIntPow(CompactBufferReader& reader) {}
bool RBigIntPow::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
-
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- MOZ_ASSERT(!rhs.toBigInt()->isNegative(),
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
+ MOZ_ASSERT(!rhs->isNegative(),
"negative exponent throws and therefore can't be recovered");
- if (!js::PowValues(cx, &lhs, &rhs, &result)) {
+
+ BigInt* result = BigInt::pow(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -633,16 +629,15 @@ bool MBigIntBitAnd::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitAnd::RBigIntBitAnd(CompactBufferReader& reader) {}
bool RBigIntBitAnd::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitAnd(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::bitAnd(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -655,16 +650,15 @@ bool MBigIntBitOr::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitOr::RBigIntBitOr(CompactBufferReader& reader) {}
bool RBigIntBitOr::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitOr(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::bitOr(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -677,16 +671,15 @@ bool MBigIntBitXor::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitXor::RBigIntBitXor(CompactBufferReader& reader) {}
bool RBigIntBitXor::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitXor(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::bitXor(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -699,16 +692,15 @@ bool MBigIntLsh::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntLsh::RBigIntLsh(CompactBufferReader& reader) {}
bool RBigIntLsh::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitLsh(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::lsh(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -721,16 +713,15 @@ bool MBigIntRsh::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntRsh::RBigIntRsh(CompactBufferReader& reader) {}
bool RBigIntRsh::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue lhs(cx, iter.read());
- RootedValue rhs(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> lhs(cx, iter.readBigInt());
+ Rooted<BigInt*> rhs(cx, iter.readBigInt());
- MOZ_ASSERT(lhs.isBigInt() && rhs.isBigInt());
- if (!js::BitRsh(cx, &lhs, &rhs, &result)) {
+ BigInt* result = BigInt::rsh(cx, lhs, rhs);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -743,15 +734,14 @@ bool MBigIntIncrement::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntIncrement::RBigIntIncrement(CompactBufferReader& reader) {}
bool RBigIntIncrement::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::IncOperation(cx, operand, &result)) {
+ BigInt* result = BigInt::inc(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -764,15 +754,14 @@ bool MBigIntDecrement::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntDecrement::RBigIntDecrement(CompactBufferReader& reader) {}
bool RBigIntDecrement::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::DecOperation(cx, operand, &result)) {
+ BigInt* result = BigInt::dec(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -785,15 +774,14 @@ bool MBigIntNegate::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntNegate::RBigIntNegate(CompactBufferReader& reader) {}
bool RBigIntNegate::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::NegOperation(cx, &operand, &result)) {
+ BigInt* result = BigInt::neg(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -806,15 +794,14 @@ bool MBigIntBitNot::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntBitNot::RBigIntBitNot(CompactBufferReader& reader) {}
bool RBigIntBitNot::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedValue operand(cx, iter.read());
- RootedValue result(cx);
+ Rooted<BigInt*> operand(cx, iter.readBigInt());
- MOZ_ASSERT(operand.isBigInt());
- if (!js::BitNot(cx, &operand, &result)) {
+ BigInt* result = BigInt::bitNot(cx, operand);
+ if (!result) {
return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(BigIntValue(result));
return true;
}
@@ -910,7 +897,7 @@ bool RConcat::recover(JSContext* cx, SnapshotIterator& iter) const {
RStringLength::RStringLength(CompactBufferReader& reader) {}
bool RStringLength::recover(JSContext* cx, SnapshotIterator& iter) const {
- JSString* string = iter.read().toString();
+ JSString* string = iter.readString();
static_assert(JSString::MAX_LENGTH <= INT32_MAX,
"Can cast string length to int32_t");
@@ -953,7 +940,7 @@ bool MFloor::writeRecoverData(CompactBufferWriter& writer) const {
RFloor::RFloor(CompactBufferReader& reader) {}
bool RFloor::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_floor_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -969,7 +956,7 @@ bool MCeil::writeRecoverData(CompactBufferWriter& writer) const {
RCeil::RCeil(CompactBufferReader& reader) {}
bool RCeil::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_ceil_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -985,7 +972,7 @@ bool MRound::writeRecoverData(CompactBufferWriter& writer) const {
RRound::RRound(CompactBufferReader& reader) {}
bool RRound::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_round_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1001,7 +988,7 @@ bool MTrunc::writeRecoverData(CompactBufferWriter& writer) const {
RTrunc::RTrunc(CompactBufferReader& reader) {}
bool RTrunc::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_trunc_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1017,21 +1004,18 @@ bool MCharCodeAt::writeRecoverData(CompactBufferWriter& writer) const {
RCharCodeAt::RCharCodeAt(CompactBufferReader& reader) {}
bool RCharCodeAt::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString string(cx, iter.read().toString());
- int32_t index = iter.read().toInt32();
+ JSString* string = iter.readString();
- RootedValue result(cx);
- if (0 <= index && size_t(index) < string->length()) {
- char16_t c;
- if (!string->getChar(cx, index, &c)) {
- return false;
- }
- result.setInt32(c);
- } else {
- result.setNaN();
+ // Int32 because |index| is computed from MBoundsCheck.
+ int32_t index = iter.readInt32();
+ MOZ_RELEASE_ASSERT(0 <= index && size_t(index) < string->length());
+
+ char16_t c;
+ if (!string->getChar(cx, index, &c)) {
+ return false;
}
- iter.storeInstructionResult(result);
+ iter.storeInstructionResult(Int32Value(c));
return true;
}
@@ -1044,7 +1028,8 @@ bool MFromCharCode::writeRecoverData(CompactBufferWriter& writer) const {
RFromCharCode::RFromCharCode(CompactBufferReader& reader) {}
bool RFromCharCode::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t charCode = iter.read().toInt32();
+ // Number because |charCode| is computed from (recoverable) user input.
+ int32_t charCode = JS::ToInt32(iter.readNumber());
JSString* str = StringFromCharCode(cx, charCode);
if (!str) {
@@ -1068,7 +1053,8 @@ RFromCharCodeEmptyIfNegative::RFromCharCodeEmptyIfNegative(
bool RFromCharCodeEmptyIfNegative::recover(JSContext* cx,
SnapshotIterator& iter) const {
- int32_t charCode = iter.read().toInt32();
+ // Int32 because |charCode| is computed from MCharCodeAtOrNegative.
+ int32_t charCode = iter.readInt32();
JSString* str;
if (charCode < 0) {
@@ -1093,8 +1079,8 @@ bool MPow::writeRecoverData(CompactBufferWriter& writer) const {
RPow::RPow(CompactBufferReader& reader) {}
bool RPow::recover(JSContext* cx, SnapshotIterator& iter) const {
- double base = iter.read().toNumber();
- double power = iter.read().toNumber();
+ double base = iter.readNumber();
+ double power = iter.readNumber();
double result = ecmaPow(base, power);
iter.storeInstructionResult(NumberValue(result));
@@ -1110,7 +1096,7 @@ bool MPowHalf::writeRecoverData(CompactBufferWriter& writer) const {
RPowHalf::RPowHalf(CompactBufferReader& reader) {}
bool RPowHalf::recover(JSContext* cx, SnapshotIterator& iter) const {
- double base = iter.read().toNumber();
+ double base = iter.readNumber();
double power = 0.5;
double result = ecmaPow(base, power);
@@ -1128,8 +1114,8 @@ bool MMinMax::writeRecoverData(CompactBufferWriter& writer) const {
RMinMax::RMinMax(CompactBufferReader& reader) { isMax_ = reader.readByte(); }
bool RMinMax::recover(JSContext* cx, SnapshotIterator& iter) const {
- double x = iter.read().toNumber();
- double y = iter.read().toNumber();
+ double x = iter.readNumber();
+ double y = iter.readNumber();
double result;
if (isMax_) {
@@ -1151,7 +1137,7 @@ bool MAbs::writeRecoverData(CompactBufferWriter& writer) const {
RAbs::RAbs(CompactBufferReader& reader) {}
bool RAbs::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_abs_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1170,7 +1156,7 @@ RSqrt::RSqrt(CompactBufferReader& reader) {
}
bool RSqrt::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_sqrt_impl(num);
// MIRType::Float32 is a specialization embedding the fact that the result is
@@ -1192,8 +1178,8 @@ bool MAtan2::writeRecoverData(CompactBufferWriter& writer) const {
RAtan2::RAtan2(CompactBufferReader& reader) {}
bool RAtan2::recover(JSContext* cx, SnapshotIterator& iter) const {
- double y = iter.read().toNumber();
- double x = iter.read().toNumber();
+ double y = iter.readNumber();
+ double x = iter.readNumber();
double result = js::ecmaAtan2(y, x);
iter.storeInstructionResult(DoubleValue(result));
@@ -1218,7 +1204,7 @@ bool RHypot::recover(JSContext* cx, SnapshotIterator& iter) const {
}
for (uint32_t i = 0; i < numOperands_; ++i) {
- vec.infallibleAppend(iter.read());
+ vec.infallibleAppend(NumberValue(iter.readNumber()));
}
RootedValue result(cx);
@@ -1265,7 +1251,7 @@ bool MSign::writeRecoverData(CompactBufferWriter& writer) const {
RSign::RSign(CompactBufferReader& reader) {}
bool RSign::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::math_sign_impl(num);
iter.storeInstructionResult(NumberValue(result));
@@ -1322,7 +1308,7 @@ RMathFunction::RMathFunction(CompactBufferReader& reader) {
}
bool RMathFunction::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result;
switch (function_) {
@@ -1431,8 +1417,8 @@ bool MStringSplit::writeRecoverData(CompactBufferWriter& writer) const {
RStringSplit::RStringSplit(CompactBufferReader& reader) {}
bool RStringSplit::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString str(cx, iter.read().toString());
- RootedString sep(cx, iter.read().toString());
+ RootedString str(cx, iter.readString());
+ RootedString sep(cx, iter.readString());
JSObject* res = StringSplitString(cx, str, sep, INT32_MAX);
if (!res) {
@@ -1452,7 +1438,7 @@ bool MNaNToZero::writeRecoverData(CompactBufferWriter& writer) const {
RNaNToZero::RNaNToZero(CompactBufferReader& reader) {}
bool RNaNToZero::recover(JSContext* cx, SnapshotIterator& iter) const {
- double v = iter.read().toNumber();
+ double v = iter.readNumber();
if (std::isnan(v) || mozilla::IsNegativeZero(v)) {
v = 0.0;
}
@@ -1470,9 +1456,11 @@ bool MRegExpMatcher::writeRecoverData(CompactBufferWriter& writer) const {
RRegExpMatcher::RRegExpMatcher(CompactBufferReader& reader) {}
bool RRegExpMatcher::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject regexp(cx, &iter.read().toObject());
- RootedString input(cx, iter.read().toString());
- int32_t lastIndex = iter.read().toInt32();
+ RootedObject regexp(cx, iter.readObject());
+ RootedString input(cx, iter.readString());
+
+ // Int32 because |lastIndex| is computed from transpiled self-hosted call.
+ int32_t lastIndex = iter.readInt32();
RootedValue result(cx);
if (!RegExpMatcherRaw(cx, regexp, input, lastIndex, nullptr, &result)) {
@@ -1507,7 +1495,8 @@ bool MTypeOfName::writeRecoverData(CompactBufferWriter& writer) const {
RTypeOfName::RTypeOfName(CompactBufferReader& reader) {}
bool RTypeOfName::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t type = iter.read().toInt32();
+ // Int32 because |type| is computed from MTypeOf.
+ int32_t type = iter.readInt32();
MOZ_ASSERT(JSTYPE_UNDEFINED <= type && type < JSTYPE_LIMIT);
JSString* name = TypeName(JSType(type), *cx->runtime()->commonNames);
@@ -1548,7 +1537,7 @@ bool MToFloat32::writeRecoverData(CompactBufferWriter& writer) const {
RToFloat32::RToFloat32(CompactBufferReader& reader) {}
bool RToFloat32::recover(JSContext* cx, SnapshotIterator& iter) const {
- double num = iter.read().toNumber();
+ double num = iter.readNumber();
double result = js::RoundFloat32(num);
iter.storeInstructionResult(DoubleValue(result));
@@ -1588,7 +1577,7 @@ bool MNewObject::writeRecoverData(CompactBufferWriter& writer) const {
RNewObject::RNewObject(CompactBufferReader& reader) {}
bool RNewObject::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
// See CodeGenerator::visitNewObjectVMCall.
// Note that recover instructions are only used if mode == ObjectCreate.
@@ -1622,8 +1611,7 @@ RNewPlainObject::RNewPlainObject(CompactBufferReader& reader) {
}
bool RNewPlainObject::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<SharedShape*> shape(cx,
- &iter.read().toGCCellPtr().as<Shape>().asShared());
+ Rooted<SharedShape*> shape(cx, &iter.readGCCellPtr().as<Shape>().asShared());
// See CodeGenerator::visitNewPlainObject.
JSObject* resultObject =
@@ -1676,7 +1664,7 @@ bool MNewTypedArray::writeRecoverData(CompactBufferWriter& writer) const {
RNewTypedArray::RNewTypedArray(CompactBufferReader& reader) {}
bool RNewTypedArray::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
size_t length = templateObject.as<FixedLengthTypedArrayObject>()->length();
MOZ_ASSERT(length <= INT32_MAX,
@@ -1704,7 +1692,7 @@ RNewArray::RNewArray(CompactBufferReader& reader) {
}
bool RNewArray::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
Rooted<Shape*> shape(cx, templateObject->shape());
ArrayObject* resultObject = NewArrayWithShape(cx, count_, shape);
@@ -1728,7 +1716,7 @@ RNewIterator::RNewIterator(CompactBufferReader& reader) {
}
bool RNewIterator::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject templateObject(cx, &iter.read().toObject());
+ RootedObject templateObject(cx, iter.readObject());
JSObject* resultObject = nullptr;
switch (MNewIterator::Type(type_)) {
@@ -1760,8 +1748,8 @@ bool MLambda::writeRecoverData(CompactBufferWriter& writer) const {
RLambda::RLambda(CompactBufferReader& reader) {}
bool RLambda::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject scopeChain(cx, &iter.read().toObject());
- RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+ RootedObject scopeChain(cx, iter.readObject());
+ RootedFunction fun(cx, &iter.readObject()->as<JSFunction>());
JSObject* resultObject = js::Lambda(cx, fun, scopeChain);
if (!resultObject) {
@@ -1781,9 +1769,9 @@ bool MFunctionWithProto::writeRecoverData(CompactBufferWriter& writer) const {
RFunctionWithProto::RFunctionWithProto(CompactBufferReader& reader) {}
bool RFunctionWithProto::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject scopeChain(cx, &iter.read().toObject());
- RootedObject prototype(cx, &iter.read().toObject());
- RootedFunction fun(cx, &iter.read().toObject().as<JSFunction>());
+ RootedObject scopeChain(cx, iter.readObject());
+ RootedObject prototype(cx, iter.readObject());
+ RootedFunction fun(cx, &iter.readObject()->as<JSFunction>());
JSObject* resultObject =
js::FunWithProtoOperation(cx, fun, scopeChain, prototype);
@@ -1804,7 +1792,7 @@ bool MNewCallObject::writeRecoverData(CompactBufferWriter& writer) const {
RNewCallObject::RNewCallObject(CompactBufferReader& reader) {}
bool RNewCallObject::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<CallObject*> templateObj(cx, &iter.read().toObject().as<CallObject>());
+ Rooted<CallObject*> templateObj(cx, &iter.readObject()->as<CallObject>());
Rooted<SharedShape*> shape(cx, templateObj->sharedShape());
@@ -1832,7 +1820,7 @@ bool MObjectKeys::writeRecoverData(CompactBufferWriter& writer) const {
RObjectKeys::RObjectKeys(CompactBufferReader& reader) {}
bool RObjectKeys::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<JSObject*> obj(cx, &iter.read().toObject());
+ Rooted<JSObject*> obj(cx, iter.readObject());
JSObject* resultKeys = ObjectKeys(cx, obj);
if (!resultKeys) {
@@ -1855,7 +1843,7 @@ RObjectState::RObjectState(CompactBufferReader& reader) {
}
bool RObjectState::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedObject object(cx, &iter.read().toObject());
+ RootedObject object(cx, iter.readObject());
Handle<NativeObject*> nativeObject = object.as<NativeObject>();
MOZ_ASSERT(!Watchtower::watchesPropertyModification(nativeObject));
MOZ_ASSERT(nativeObject->slotSpan() == numSlots());
@@ -1881,8 +1869,10 @@ RArrayState::RArrayState(CompactBufferReader& reader) {
}
bool RArrayState::recover(JSContext* cx, SnapshotIterator& iter) const {
- ArrayObject* object = &iter.read().toObject().as<ArrayObject>();
- uint32_t initLength = iter.read().toInt32();
+ ArrayObject* object = &iter.readObject()->as<ArrayObject>();
+
+ // Int32 because |initLength| is computed from MConstant.
+ uint32_t initLength = iter.readInt32();
MOZ_ASSERT(object->getDenseInitializedLength() == 0,
"initDenseElement call below relies on this");
@@ -1903,37 +1893,6 @@ bool RArrayState::recover(JSContext* cx, SnapshotIterator& iter) const {
return true;
}
-bool MSetArrayLength::writeRecoverData(CompactBufferWriter& writer) const {
- MOZ_ASSERT(canRecoverOnBailout());
- // For simplicity, we capture directly the object instead of the elements
- // pointer.
- MOZ_ASSERT(elements()->type() != MIRType::Elements);
- writer.writeUnsigned(uint32_t(RInstruction::Recover_SetArrayLength));
- return true;
-}
-
-bool MSetArrayLength::canRecoverOnBailout() const {
- return isRecoveredOnBailout();
-}
-
-RSetArrayLength::RSetArrayLength(CompactBufferReader& reader) {}
-
-bool RSetArrayLength::recover(JSContext* cx, SnapshotIterator& iter) const {
- Rooted<ArrayObject*> obj(cx, &iter.read().toObject().as<ArrayObject>());
- RootedValue len(cx, iter.read());
-
- RootedId id(cx, NameToId(cx->names().length));
- Rooted<PropertyDescriptor> desc(
- cx, PropertyDescriptor::Data(len, JS::PropertyAttribute::Writable));
- ObjectOpResult error;
- if (!ArraySetLength(cx, obj, id, desc, error)) {
- return false;
- }
-
- iter.storeInstructionResult(ObjectValue(*obj));
- return true;
-}
-
bool MAssertRecoveredOnBailout::writeRecoverData(
CompactBufferWriter& writer) const {
MOZ_ASSERT(canRecoverOnBailout());
@@ -1966,9 +1925,9 @@ RStringReplace::RStringReplace(CompactBufferReader& reader) {
}
bool RStringReplace::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString string(cx, iter.read().toString());
- RootedString pattern(cx, iter.read().toString());
- RootedString replace(cx, iter.read().toString());
+ RootedString string(cx, iter.readString());
+ RootedString pattern(cx, iter.readString());
+ RootedString replace(cx, iter.readString());
JSString* result =
isFlatReplacement_
@@ -1992,9 +1951,20 @@ bool MSubstr::writeRecoverData(CompactBufferWriter& writer) const {
RSubstr::RSubstr(CompactBufferReader& reader) {}
bool RSubstr::recover(JSContext* cx, SnapshotIterator& iter) const {
- RootedString str(cx, iter.read().toString());
- int32_t begin = iter.read().toInt32();
- int32_t length = iter.read().toInt32();
+ RootedString str(cx, iter.readString());
+
+ // Int32 because |begin| is computed from MStringTrimStartIndex, MConstant,
+ // or CallSubstringKernelResult.
+ int32_t begin = iter.readInt32();
+
+ // |length| is computed from MSub(truncated), MStringTrimEndIndex, or
+ // CallSubstringKernelResult. The current MSub inputs won't overflow, so when
+ // RSub recovers the MSub instruction, the input will be representable as an
+ // Int32. This is only true as long as RSub calls |js::SubOperation|, which in
+ // turn calls |JS::Value::setNumber|. We don't want to rely on this exact call
+ // sequence, so instead use |readNumber| here and then release-assert the
+ // number is exactly representable as an Int32.
+ int32_t length = mozilla::ReleaseAssertedCast<int32_t>(iter.readNumber());
JSString* result = SubstringKernel(cx, str, begin, length);
if (!result) {
@@ -2014,10 +1984,11 @@ bool MAtomicIsLockFree::writeRecoverData(CompactBufferWriter& writer) const {
RAtomicIsLockFree::RAtomicIsLockFree(CompactBufferReader& reader) {}
bool RAtomicIsLockFree::recover(JSContext* cx, SnapshotIterator& iter) const {
- Value operand = iter.read();
- MOZ_ASSERT(operand.isInt32());
+ double dsize = JS::ToInteger(iter.readNumber());
- bool result = AtomicOperations::isLockfreeJS(operand.toInt32());
+ int32_t size;
+ bool result = mozilla::NumberEqualsInt32(dsize, &size) &&
+ AtomicOperations::isLockfreeJS(size);
iter.storeInstructionResult(BooleanValue(result));
return true;
}
@@ -2031,10 +2002,12 @@ bool MBigIntAsIntN::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntAsIntN::RBigIntAsIntN(CompactBufferReader& reader) {}
bool RBigIntAsIntN::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t bits = iter.read().toInt32();
- RootedBigInt input(cx, iter.read().toBigInt());
-
+ // Int32 because |bits| is computed from MGuardInt32IsNonNegative.
+ int32_t bits = iter.readInt32();
MOZ_ASSERT(bits >= 0);
+
+ RootedBigInt input(cx, iter.readBigInt());
+
BigInt* result = BigInt::asIntN(cx, input, bits);
if (!result) {
return false;
@@ -2053,10 +2026,12 @@ bool MBigIntAsUintN::writeRecoverData(CompactBufferWriter& writer) const {
RBigIntAsUintN::RBigIntAsUintN(CompactBufferReader& reader) {}
bool RBigIntAsUintN::recover(JSContext* cx, SnapshotIterator& iter) const {
- int32_t bits = iter.read().toInt32();
- RootedBigInt input(cx, iter.read().toBigInt());
-
+ // Int32 because |bits| is computed from MGuardInt32IsNonNegative.
+ int32_t bits = iter.readInt32();
MOZ_ASSERT(bits >= 0);
+
+ RootedBigInt input(cx, iter.readBigInt());
+
BigInt* result = BigInt::asUintN(cx, input, bits);
if (!result) {
return false;
@@ -2077,7 +2052,7 @@ RCreateArgumentsObject::RCreateArgumentsObject(CompactBufferReader& reader) {}
bool RCreateArgumentsObject::recover(JSContext* cx,
SnapshotIterator& iter) const {
- RootedObject callObject(cx, &iter.read().toObject());
+ RootedObject callObject(cx, iter.readObject());
RootedObject result(
cx, ArgumentsObject::createForIon(cx, iter.frame(), callObject));
if (!result) {
@@ -2104,8 +2079,8 @@ RCreateInlinedArgumentsObject::RCreateInlinedArgumentsObject(
bool RCreateInlinedArgumentsObject::recover(JSContext* cx,
SnapshotIterator& iter) const {
- RootedObject callObject(cx, &iter.read().toObject());
- RootedFunction callee(cx, &iter.read().toObject().as<JSFunction>());
+ RootedObject callObject(cx, iter.readObject());
+ RootedFunction callee(cx, &iter.readObject()->as<JSFunction>());
JS::RootedValueArray<ArgumentsObject::MaxInlinedArgs> argsArray(cx);
for (uint32_t i = 0; i < numActuals_; i++) {
@@ -2136,7 +2111,8 @@ RRest::RRest(CompactBufferReader& reader) {
bool RRest::recover(JSContext* cx, SnapshotIterator& iter) const {
JitFrameLayout* frame = iter.frame();
- uint32_t numActuals = iter.read().toInt32();
+ // Int32 because |numActuals| is computed from MArgumentsLength.
+ uint32_t numActuals = iter.readInt32();
MOZ_ASSERT(numActuals == frame->numActualArgs());
uint32_t numFormals = numFormals_;
diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
index 7cc46c636d..878204de83 100644
--- a/js/src/jit/Recover.h
+++ b/js/src/jit/Recover.h
@@ -129,7 +129,6 @@ namespace jit {
_(ObjectKeys) \
_(ObjectState) \
_(ArrayState) \
- _(SetArrayLength) \
_(AtomicIsLockFree) \
_(BigIntAsIntN) \
_(BigIntAsUintN) \
@@ -882,14 +881,6 @@ class RArrayState final : public RInstruction {
SnapshotIterator& iter) const override;
};
-class RSetArrayLength final : public RInstruction {
- public:
- RINSTRUCTION_HEADER_NUM_OP_(SetArrayLength, 2)
-
- [[nodiscard]] bool recover(JSContext* cx,
- SnapshotIterator& iter) const override;
-};
-
class RAtomicIsLockFree final : public RInstruction {
public:
RINSTRUCTION_HEADER_NUM_OP_(AtomicIsLockFree, 1)
diff --git a/js/src/jit/Trampoline.cpp b/js/src/jit/Trampoline.cpp
index 85661784a7..e6d0cd31c9 100644
--- a/js/src/jit/Trampoline.cpp
+++ b/js/src/jit/Trampoline.cpp
@@ -96,18 +96,13 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
// |
// ^--- IonICCall <---- Ion
// |
- // ^--- Arguments Rectifier
- // | ^
- // | |
- // | ^--- Ion
- // | |
- // | ^--- Baseline Stub <---- Baseline
- // | |
- // | ^--- Entry Frame (CppToJSJit or WasmToJSJit)
+ // ^--- Entry Frame (BaselineInterpreter) (unwrapped)
// |
- // ^--- Entry Frame (CppToJSJit or WasmToJSJit)
+ // ^--- Arguments Rectifier (unwrapped)
+ // |
+ // ^--- Trampoline Native (unwrapped)
// |
- // ^--- Entry Frame (BaselineInterpreter)
+ // ^--- Entry Frame (CppToJSJit or WasmToJSJit)
//
// NOTE: Keep this in sync with JSJitProfilingFrameIterator::moveToNextFrame!
@@ -153,6 +148,7 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
Label handle_BaselineOrIonJS;
Label handle_BaselineStub;
Label handle_Rectifier;
+ Label handle_TrampolineNative;
Label handle_BaselineInterpreterEntry;
Label handle_IonICCall;
Label handle_Entry;
@@ -176,6 +172,8 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
&handle_BaselineOrIonJS);
masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::IonICCall),
&handle_IonICCall);
+ masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::TrampolineNative),
+ &handle_TrampolineNative);
masm.branch32(Assembler::Equal, scratch, Imm32(FrameType::WasmToJSJit),
&handle_Entry);
@@ -237,9 +235,21 @@ void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
// There can be multiple previous frame types so just "unwrap" the arguments
// rectifier frame and try again.
masm.loadPtr(Address(fpScratch, CallerFPOffset), fpScratch);
- emitAssertPrevFrameType(fpScratch, scratch,
- {FrameType::IonJS, FrameType::BaselineStub,
- FrameType::CppToJSJit, FrameType::WasmToJSJit});
+ emitAssertPrevFrameType(
+ fpScratch, scratch,
+ {FrameType::IonJS, FrameType::BaselineStub, FrameType::TrampolineNative,
+ FrameType::CppToJSJit, FrameType::WasmToJSJit});
+ masm.jump(&again);
+ }
+
+ masm.bind(&handle_TrampolineNative);
+ {
+ // Unwrap this frame, similar to arguments rectifier frames.
+ masm.loadPtr(Address(fpScratch, CallerFPOffset), fpScratch);
+ emitAssertPrevFrameType(
+ fpScratch, scratch,
+ {FrameType::IonJS, FrameType::BaselineStub, FrameType::Rectifier,
+ FrameType::CppToJSJit, FrameType::WasmToJSJit});
masm.jump(&again);
}
diff --git a/js/src/jit/TrampolineNatives.cpp b/js/src/jit/TrampolineNatives.cpp
new file mode 100644
index 0000000000..0bde6d9985
--- /dev/null
+++ b/js/src/jit/TrampolineNatives.cpp
@@ -0,0 +1,274 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/TrampolineNatives.h"
+
+#include "jit/CalleeToken.h"
+#include "jit/Ion.h"
+#include "jit/JitCommon.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PerfSpewer.h"
+#include "js/CallArgs.h"
+#include "js/experimental/JitInfo.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/Activation-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+#define ADD_NATIVE(native) \
+ const JSJitInfo js::jit::JitInfo_##native{ \
+ {nullptr}, \
+ {uint16_t(TrampolineNative::native)}, \
+ {0}, \
+ JSJitInfo::TrampolineNative};
+TRAMPOLINE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+void js::jit::SetTrampolineNativeJitEntry(JSContext* cx, JSFunction* fun,
+ TrampolineNative native) {
+ if (!cx->runtime()->jitRuntime()) {
+ // No JIT support so there's no trampoline.
+ return;
+ }
+ void** entry = cx->runtime()->jitRuntime()->trampolineNativeJitEntry(native);
+ MOZ_ASSERT(entry);
+ MOZ_ASSERT(*entry);
+ fun->setTrampolineNativeJitEntry(entry);
+}
+
+uint32_t JitRuntime::generateArraySortTrampoline(MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateArraySortTrampoline");
+
+ const uint32_t offset = startTrampolineCode(masm);
+
+ // The stack for the trampoline frame will look like this:
+ //
+ // [TrampolineNativeFrameLayout]
+ // * this and arguments passed by the caller
+ // * CalleeToken
+ // * Descriptor
+ // * Return Address
+ // * Saved frame pointer <= FramePointer
+ // [ArraySortData]
+ // * ...
+ // * Comparator this + argument Values --+ -> comparator JitFrameLayout
+ // * Comparator (CalleeToken) |
+ // * Descriptor ----+ <= StackPointer
+ //
+ // The call to the comparator pushes the return address and the frame pointer,
+ // so we check the alignment after pushing these two pointers.
+ constexpr size_t FrameSize = sizeof(ArraySortData);
+ constexpr size_t PushedByCall = 2 * sizeof(void*);
+ static_assert((FrameSize + PushedByCall) % JitStackAlignment == 0);
+
+ // Assert ArraySortData comparator data matches JitFrameLayout.
+ static_assert(PushedByCall + ArraySortData::offsetOfDescriptor() ==
+ JitFrameLayout::offsetOfDescriptor());
+ static_assert(PushedByCall + ArraySortData::offsetOfComparator() ==
+ JitFrameLayout::offsetOfCalleeToken());
+ static_assert(PushedByCall + ArraySortData::offsetOfComparatorThis() ==
+ JitFrameLayout::offsetOfThis());
+ static_assert(PushedByCall + ArraySortData::offsetOfComparatorArgs() ==
+ JitFrameLayout::offsetOfActualArgs());
+ static_assert(CalleeToken_Function == 0,
+ "JSFunction* is valid CalleeToken for non-constructor calls");
+
+ // Compute offsets from FramePointer.
+ constexpr int32_t ComparatorOffset =
+ -int32_t(FrameSize) + ArraySortData::offsetOfComparator();
+ constexpr int32_t RvalOffset =
+ -int32_t(FrameSize) + ArraySortData::offsetOfComparatorReturnValue();
+ constexpr int32_t DescriptorOffset =
+ -int32_t(FrameSize) + ArraySortData::offsetOfDescriptor();
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+ masm.push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.takeUnchecked(ReturnReg);
+ regs.takeUnchecked(JSReturnOperand);
+ Register temp0 = regs.takeAny();
+ Register temp1 = regs.takeAny();
+ Register temp2 = regs.takeAny();
+
+ // Reserve space and check alignment of the comparator frame.
+ masm.reserveStack(FrameSize);
+ masm.assertStackAlignment(JitStackAlignment, PushedByCall);
+
+ // Trampoline control flow looks like this:
+ //
+ // call ArraySortFromJit
+ // goto checkReturnValue
+ // call_comparator:
+ // call comparator
+ // call ArraySortData::sortWithComparator
+ // checkReturnValue:
+ // check return value, jump to call_comparator if needed
+ // return rval
+
+ auto pushExitFrame = [&](Register cxReg, Register scratchReg) {
+ MOZ_ASSERT(masm.framePushed() == FrameSize);
+ masm.PushFrameDescriptor(FrameType::TrampolineNative);
+ masm.Push(ImmWord(0)); // Fake return address.
+ masm.Push(FramePointer);
+ masm.enterFakeExitFrame(cxReg, scratchReg, ExitFrameType::Bare);
+ };
+
+ // Call ArraySortFromJit.
+ using Fn1 = ArraySortResult (*)(JSContext* cx,
+ jit::TrampolineNativeFrameLayout* frame);
+ masm.loadJSContext(temp0);
+ pushExitFrame(temp0, temp1);
+ masm.setupAlignedABICall();
+ masm.passABIArg(temp0);
+ masm.passABIArg(FramePointer);
+ masm.callWithABI<Fn1, ArraySortFromJit>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Check return value.
+ Label checkReturnValue;
+ masm.jump(&checkReturnValue);
+ masm.setFramePushed(FrameSize);
+
+ // Call the comparator. Store the frame descriptor before each call to ensure
+ // the HASCACHEDSAVEDFRAME_BIT flag from a previous call is cleared.
+ uintptr_t jitCallDescriptor = MakeFrameDescriptorForJitCall(
+ jit::FrameType::TrampolineNative, ArraySortData::ComparatorActualArgs);
+ Label callDone, jitCallFast, jitCallSlow;
+ masm.bind(&jitCallFast);
+ {
+ masm.storePtr(ImmWord(jitCallDescriptor),
+ Address(FramePointer, DescriptorOffset));
+ masm.loadPtr(Address(FramePointer, ComparatorOffset), temp0);
+ masm.loadJitCodeRaw(temp0, temp1);
+ masm.callJit(temp1);
+ masm.jump(&callDone);
+ }
+ masm.bind(&jitCallSlow);
+ {
+ masm.storePtr(ImmWord(jitCallDescriptor),
+ Address(FramePointer, DescriptorOffset));
+ masm.loadPtr(Address(FramePointer, ComparatorOffset), temp0);
+ masm.loadJitCodeRaw(temp0, temp1);
+ masm.switchToObjectRealm(temp0, temp2);
+
+ // Handle arguments underflow.
+ Label noUnderflow, restoreRealm;
+ masm.loadFunctionArgCount(temp0, temp0);
+ masm.branch32(Assembler::BelowOrEqual, temp0,
+ Imm32(ArraySortData::ComparatorActualArgs), &noUnderflow);
+ {
+ Label rectifier;
+ bindLabelToOffset(&rectifier, argumentsRectifierOffset_);
+ masm.call(&rectifier);
+ masm.jump(&restoreRealm);
+ }
+ masm.bind(&noUnderflow);
+ masm.callJit(temp1);
+
+ masm.bind(&restoreRealm);
+ Address calleeToken(FramePointer,
+ TrampolineNativeFrameLayout::offsetOfCalleeToken());
+ masm.loadFunctionFromCalleeToken(calleeToken, temp0);
+ masm.switchToObjectRealm(temp0, temp1);
+ }
+
+ // Store the comparator's return value.
+ masm.bind(&callDone);
+ masm.storeValue(JSReturnOperand, Address(FramePointer, RvalOffset));
+
+ // Call ArraySortData::sortWithComparator.
+ using Fn2 = ArraySortResult (*)(ArraySortData* data);
+ masm.moveStackPtrTo(temp2);
+ masm.loadJSContext(temp0);
+ pushExitFrame(temp0, temp1);
+ masm.setupAlignedABICall();
+ masm.passABIArg(temp2);
+ masm.callWithABI<Fn2, ArraySortData::sortWithComparator>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Check return value.
+ masm.bind(&checkReturnValue);
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::Failure)), masm.failureLabel());
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::CallJSSameRealmNoRectifier)),
+ &jitCallFast);
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::CallJS)), &jitCallSlow);
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, ReturnReg,
+ Imm32(int32_t(ArraySortResult::Done)), &ok);
+ masm.assumeUnreachable("Unexpected return value");
+ masm.bind(&ok);
+#endif
+
+ masm.loadValue(Address(FramePointer, RvalOffset), JSReturnOperand);
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+ masm.ret();
+
+ return offset;
+}
+
+void JitRuntime::generateTrampolineNatives(
+ MacroAssembler& masm, TrampolineNativeJitEntryOffsets& offsets,
+ PerfSpewerRangeRecorder& rangeRecorder) {
+ offsets[TrampolineNative::ArraySort] = generateArraySortTrampoline(masm);
+ rangeRecorder.recordOffset("Trampoline: ArraySort");
+}
+
+bool jit::CallTrampolineNativeJitCode(JSContext* cx, TrampolineNative native,
+ CallArgs& args) {
+ // Use the EnterJit trampoline to enter the native's trampoline code.
+
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ MOZ_ASSERT(!args.isConstructing());
+ CalleeToken calleeToken = CalleeToToken(&args.callee().as<JSFunction>(),
+ /* constructing = */ false);
+
+ Value* maxArgv = args.array() - 1; // -1 to include |this|
+ size_t maxArgc = args.length() + 1;
+
+ Rooted<Value> result(cx, Int32Value(args.length()));
+
+ AssertRealmUnchanged aru(cx);
+ ActivationEntryMonitor entryMonitor(cx, calleeToken);
+ JitActivation activation(cx);
+
+ EnterJitCode enter = cx->runtime()->jitRuntime()->enterJit();
+ void* code = *cx->runtime()->jitRuntime()->trampolineNativeJitEntry(native);
+
+ CALL_GENERATED_CODE(enter, code, maxArgc, maxArgv, /* osrFrame = */ nullptr,
+ calleeToken, /* envChain = */ nullptr,
+ /* osrNumStackValues = */ 0, result.address());
+
+ // Ensure the counter was reset to zero after exiting from JIT code.
+ MOZ_ASSERT(!cx->isInUnsafeRegion());
+
+ // Release temporary buffer used for OSR into Ion.
+ cx->runtime()->jitRuntime()->freeIonOsrTempData();
+
+ if (result.isMagic()) {
+ MOZ_ASSERT(result.isMagic(JS_ION_ERROR));
+ return false;
+ }
+
+ args.rval().set(result);
+ return true;
+}
diff --git a/js/src/jit/TrampolineNatives.h b/js/src/jit/TrampolineNatives.h
new file mode 100644
index 0000000000..f71a3b707d
--- /dev/null
+++ b/js/src/jit/TrampolineNatives.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_TrampolineNatives_h
+#define jit_TrampolineNatives_h
+
+#include <stdint.h>
+
+#include "js/TypeDecls.h"
+
+// [SMDOC] Trampoline Natives
+//
+// Trampoline natives are JS builtin functions that use the NATIVE_JIT_ENTRY
+// mechanism. This means they have two implementations: the usual native C++
+// implementation and a generated JIT trampoline that JIT callers can call
+// directly using the JIT ABI calling convention. (This is very similar to how
+// calls from JS to WebAssembly are optimized in the JITs.)
+//
+// The JIT trampoline lets us implement some natives in a more efficient way. In
+// particular, it's much faster to call (other) JS functions with JIT code from
+// a JIT trampoline than from C++ code.
+//
+// Trampoline frames use FrameType::TrampolineNative.
+
+class JSJitInfo;
+
+namespace JS {
+class CallArgs;
+} // namespace JS
+
+// List of all trampoline natives.
+#define TRAMPOLINE_NATIVE_LIST(_) _(ArraySort)
+
+namespace js {
+namespace jit {
+
+enum class TrampolineNative : uint16_t {
+#define ADD_NATIVE(native) native,
+ TRAMPOLINE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+ Count
+};
+
+#define ADD_NATIVE(native) extern const JSJitInfo JitInfo_##native;
+TRAMPOLINE_NATIVE_LIST(ADD_NATIVE)
+#undef ADD_NATIVE
+
+void SetTrampolineNativeJitEntry(JSContext* cx, JSFunction* fun,
+ TrampolineNative native);
+
+bool CallTrampolineNativeJitCode(JSContext* cx, TrampolineNative native,
+ JS::CallArgs& args);
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_TrampolineNatives_h */
diff --git a/js/src/jit/VMFunctionList-inl.h b/js/src/jit/VMFunctionList-inl.h
index d87b010df6..99b98f17ed 100644
--- a/js/src/jit/VMFunctionList-inl.h
+++ b/js/src/jit/VMFunctionList-inl.h
@@ -211,6 +211,7 @@ namespace jit {
_(InterpretResume, js::jit::InterpretResume) \
_(InterruptCheck, js::jit::InterruptCheck) \
_(InvokeFunction, js::jit::InvokeFunction) \
+ _(InvokeNativeFunction, js::jit::InvokeNativeFunction) \
_(IonBinaryArithICUpdate, js::jit::IonBinaryArithIC::update) \
_(IonBindNameICUpdate, js::jit::IonBindNameIC::update) \
_(IonCheckPrivateFieldICUpdate, js::jit::IonCheckPrivateFieldIC::update) \
diff --git a/js/src/jit/VMFunctions.cpp b/js/src/jit/VMFunctions.cpp
index ed3f63c88c..3ec85a72c2 100644
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -545,6 +545,39 @@ bool InvokeFunction(JSContext* cx, HandleObject obj, bool constructing,
return Call(cx, fval, thisv, args, rval);
}
+bool InvokeNativeFunction(JSContext* cx, bool constructing,
+ bool ignoresReturnValue, uint32_t argc, Value* argv,
+ MutableHandleValue rval) {
+ // Ensure argv array is rooted - we may GC in here.
+ size_t numValues = argc + 2 + constructing;
+ RootedExternalValueArray argvRoot(cx, numValues, argv);
+
+ // Data in the argument vector is arranged for a JIT -> C++ call.
+ CallArgs callArgs = CallArgsFromSp(argc + constructing, argv + numValues,
+ constructing, ignoresReturnValue);
+
+ // This function is only called when the callee is a native function.
+ MOZ_ASSERT(callArgs.callee().as<JSFunction>().isNativeWithoutJitEntry());
+
+ if (constructing) {
+ MOZ_ASSERT(callArgs.thisv().isMagic(JS_IS_CONSTRUCTING));
+
+ if (!ConstructFromStack(cx, callArgs)) {
+ return false;
+ }
+
+ MOZ_ASSERT(callArgs.rval().isObject(),
+ "native constructors don't return primitives");
+ } else {
+ if (!CallFromStack(cx, callArgs)) {
+ return false;
+ }
+ }
+
+ rval.set(callArgs.rval());
+ return true;
+}
+
void* GetContextSensitiveInterpreterStub() {
return TlsContext.get()->runtime()->jitRuntime()->interpreterStub().value;
}
@@ -1111,7 +1144,7 @@ bool NormalSuspend(JSContext* cx, HandleObject obj, BaselineFrame* frame,
bool FinalSuspend(JSContext* cx, HandleObject obj, const jsbytecode* pc) {
MOZ_ASSERT(JSOp(*pc) == JSOp::FinalYieldRval);
- AbstractGeneratorObject::finalSuspend(obj);
+ AbstractGeneratorObject::finalSuspend(cx, obj);
return true;
}
diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h
index a68dd8279f..b5ac5d700b 100644
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -354,6 +354,10 @@ struct LastArg<HeadType, TailTypes...> {
uint32_t argc, Value* argv,
MutableHandleValue rval);
+[[nodiscard]] bool InvokeNativeFunction(JSContext* cx, bool constructing,
+ bool ignoresReturnValue, uint32_t argc,
+ Value* argv, MutableHandleValue rval);
+
bool InvokeFromInterpreterStub(JSContext* cx,
InterpreterStubExitFrameLayout* frame);
void* GetContextSensitiveInterpreterStub();
diff --git a/js/src/jit/WarpCacheIRTranspiler.cpp b/js/src/jit/WarpCacheIRTranspiler.cpp
index 9a99e0f5c3..fdaafd00b3 100644
--- a/js/src/jit/WarpCacheIRTranspiler.cpp
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -977,7 +977,7 @@ bool WarpCacheIRTranspiler::emitGuardDynamicSlotValue(ObjOperandId objId,
return true;
}
-bool WarpCacheIRTranspiler::emitLoadScriptedProxyHandler(ValOperandId resultId,
+bool WarpCacheIRTranspiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
ObjOperandId objId) {
MDefinition* obj = getOperand(objId);
@@ -5216,10 +5216,14 @@ bool WarpCacheIRTranspiler::emitLoadOperandResult(ValOperandId inputId) {
}
bool WarpCacheIRTranspiler::emitLoadWrapperTarget(ObjOperandId objId,
- ObjOperandId resultId) {
+ ObjOperandId resultId,
+ bool fallible) {
MDefinition* obj = getOperand(objId);
- auto* ins = MLoadWrapperTarget::New(alloc(), obj);
+ auto* ins = MLoadWrapperTarget::New(alloc(), obj, fallible);
+ if (fallible) {
+ ins->setGuard();
+ }
add(ins);
return defineOperand(resultId, ins);
diff --git a/js/src/jit/arm/Architecture-arm.h b/js/src/jit/arm/Architecture-arm.h
index fa2ae8e0ed..00edac33da 100644
--- a/js/src/jit/arm/Architecture-arm.h
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -32,7 +32,7 @@ namespace jit {
static const int32_t NUNBOX32_TYPE_OFFSET = 4;
static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
// How far forward/back can a jump go? Provide a generous buffer for thunks.
static const uint32_t JumpImmediateRange = 20 * 1024 * 1024;
diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h
index 96bbc63848..7101709f18 100644
--- a/js/src/jit/arm64/Architecture-arm64.h
+++ b/js/src/jit/arm64/Architecture-arm64.h
@@ -551,7 +551,7 @@ static const uint32_t SpillSlotSize =
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent));
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
// When our only strategy for far jumps is to encode the offset directly, and
// not insert any jump islands during assembly for even further jumps, then the
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
index 12244e73e4..b425b286ee 100644
--- a/js/src/jit/arm64/vixl/Cpu-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
@@ -214,7 +214,7 @@ CPUFeatures CPU::InferCPUFeaturesFromOS(
for (size_t i = 0; i < kFeatureBitCount; i++) {
if (auxv & (1UL << i)) features.Combine(kFeatureBits[i]);
}
-#elif defined(XP_MACOSX)
+#elif defined(XP_DARWIN)
// Apple processors have kJSCVT, kDotProduct, and kAtomics features.
features.Combine(CPUFeatures::kJSCVT, CPUFeatures::kDotProduct,
CPUFeatures::kAtomics);
diff --git a/js/src/jit/loong64/Architecture-loong64.h b/js/src/jit/loong64/Architecture-loong64.h
index 48745ee37a..29da43272f 100644
--- a/js/src/jit/loong64/Architecture-loong64.h
+++ b/js/src/jit/loong64/Architecture-loong64.h
@@ -335,7 +335,7 @@ static const uint32_t SpillSlotSize =
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent));
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t SizeOfReturnAddressAfterCall = 0;
// When our only strategy for far jumps is to encode the offset directly, and
diff --git a/js/src/jit/mips32/Architecture-mips32.h b/js/src/jit/mips32/Architecture-mips32.h
index 8e186d2c9c..4ce68032b2 100644
--- a/js/src/jit/mips32/Architecture-mips32.h
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -20,7 +20,7 @@
namespace js {
namespace jit {
-static const uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
+static constexpr uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
// These offsets are specific to nunboxing, and capture offsets into the
// components of a js::Value.
diff --git a/js/src/jit/mips64/Architecture-mips64.h b/js/src/jit/mips64/Architecture-mips64.h
index d3db37ea2c..7bf6054a72 100644
--- a/js/src/jit/mips64/Architecture-mips64.h
+++ b/js/src/jit/mips64/Architecture-mips64.h
@@ -20,7 +20,7 @@ namespace js {
namespace jit {
// Shadow stack space is not required on MIPS64.
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
// MIPS64 have 64 bit floating-point coprocessor. There are 32 double
// precision register which can also be used as single precision registers.
diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
index c0d2d5f2df..c49b4fcd9f 100644
--- a/js/src/jit/moz.build
+++ b/js/src/jit/moz.build
@@ -87,6 +87,7 @@ UNIFIED_SOURCES += [
"Sink.cpp",
"Snapshots.cpp",
"Trampoline.cpp",
+ "TrampolineNatives.cpp",
"TrialInlining.cpp",
"TypePolicy.cpp",
"ValueNumbering.cpp",
diff --git a/js/src/jit/none/Architecture-none.h b/js/src/jit/none/Architecture-none.h
index 2433234fbf..9218404992 100644
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -157,7 +157,7 @@ struct FloatRegister {
inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
inline bool hasMultiAlias() { MOZ_CRASH(); }
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t JumpImmediateRange = INT32_MAX;
#ifdef JS_NUNBOX32
diff --git a/js/src/jit/riscv64/Architecture-riscv64.h b/js/src/jit/riscv64/Architecture-riscv64.h
index c75bd05ff1..8d02e6e806 100644
--- a/js/src/jit/riscv64/Architecture-riscv64.h
+++ b/js/src/jit/riscv64/Architecture-riscv64.h
@@ -494,7 +494,7 @@ FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
inline bool hasUnaliasedDouble() { return false; }
inline bool hasMultiAlias() { return false; }
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t JumpImmediateRange = INT32_MAX;
#ifdef JS_NUNBOX32
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index d8b5693d85..74c11bd91b 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -649,14 +649,14 @@ class LApplyArgsGeneric
LIR_HEADER(ApplyArgsGeneric)
LApplyArgsGeneric(const LAllocation& func, const LAllocation& argc,
- const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
- const LDefinition& tmpcopy)
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argc);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
- setTemp(1, tmpcopy);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
}
MApplyArgs* mir() const { return mir_->toApplyArgs(); }
@@ -712,14 +712,14 @@ class LApplyArrayGeneric
LIR_HEADER(ApplyArrayGeneric)
LApplyArrayGeneric(const LAllocation& func, const LAllocation& elements,
- const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
- const LDefinition& tmpcopy)
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, elements);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
- setTemp(1, tmpcopy);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
}
MApplyArray* mir() const { return mir_->toApplyArray(); }
@@ -746,13 +746,13 @@ class LConstructArgsGeneric
LConstructArgsGeneric(const LAllocation& func, const LAllocation& argc,
const LAllocation& newTarget,
const LBoxAllocation& thisv,
- const LDefinition& tmpobjreg)
+ const LDefinition& tmpObjReg)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argc);
setOperand(2, newTarget);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
+ setTemp(0, tmpObjReg);
}
MConstructArgs* mir() const { return mir_->toConstructArgs(); }
@@ -784,13 +784,13 @@ class LConstructArrayGeneric
LConstructArrayGeneric(const LAllocation& func, const LAllocation& elements,
const LAllocation& newTarget,
const LBoxAllocation& thisv,
- const LDefinition& tmpobjreg)
+ const LDefinition& tmpObjReg)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, elements);
setOperand(2, newTarget);
setBoxOperand(ThisIndex, thisv);
- setTemp(0, tmpobjreg);
+ setTemp(0, tmpObjReg);
}
MConstructArray* mir() const { return mir_->toConstructArray(); }
@@ -816,6 +816,164 @@ class LConstructArrayGeneric
const LAllocation* getTempForArgCopy() { return getOperand(2); }
};
+class LApplyArgsNative
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsNative)
+
+ LApplyArgsNative(const LAllocation& func, const LAllocation& argc,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return false; }
+
+ MApplyArgs* mir() const { return mir_->toApplyArgs(); }
+
+ uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LApplyArgsObjNative
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsObjNative)
+
+ LApplyArgsObjNative(const LAllocation& func, const LAllocation& argsObj,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argsObj);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return false; }
+
+ MApplyArgsObj* mir() const { return mir_->toApplyArgsObj(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgsObj() { return getOperand(1); }
+
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+
+ // argc is mapped to the same register as argsObj: argc becomes live as
+ // argsObj is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+};
+
+class LApplyArrayNative
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArrayNative)
+
+ LApplyArrayNative(const LAllocation& func, const LAllocation& elements,
+ const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return false; }
+
+ MApplyArray* mir() const { return mir_->toApplyArray(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+
+ // argc is mapped to the same register as elements: argc becomes live as
+ // elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+};
+
+class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
+ public:
+ LIR_HEADER(ConstructArgsNative)
+
+ LConstructArgsNative(const LAllocation& func, const LAllocation& argc,
+ const LAllocation& newTarget,
+ const LDefinition& tmpObjReg, const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setOperand(2, newTarget);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return true; }
+
+ MConstructArgs* mir() const { return mir_->toConstructArgs(); }
+
+ uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+};
+
+class LConstructArrayNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
+ public:
+ LIR_HEADER(ConstructArrayNative)
+
+ LConstructArrayNative(const LAllocation& func, const LAllocation& elements,
+ const LAllocation& newTarget,
+ const LDefinition& tmpObjReg,
+ const LDefinition& tmpCopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setOperand(2, newTarget);
+ setTemp(0, tmpObjReg);
+ setTemp(1, tmpCopy);
+ }
+
+ static constexpr bool isConstructing() { return true; }
+
+ MConstructArray* mir() const { return mir_->toConstructArray(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempForArgCopy() { return getTemp(1); }
+
+ // argc is mapped to the same register as elements: argc becomes live as
+ // elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+};
+
// Takes in either an integer or boolean input and tests it for truthiness.
class LTestIAndBranch : public LControlInstructionHelper<2, 1, 0> {
public:
diff --git a/js/src/jit/wasm32/Architecture-wasm32.h b/js/src/jit/wasm32/Architecture-wasm32.h
index d7726eaa5f..2419591664 100644
--- a/js/src/jit/wasm32/Architecture-wasm32.h
+++ b/js/src/jit/wasm32/Architecture-wasm32.h
@@ -161,7 +161,7 @@ struct FloatRegister {
inline bool hasUnaliasedDouble() { MOZ_CRASH(); }
inline bool hasMultiAlias() { MOZ_CRASH(); }
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
static const uint32_t JumpImmediateRange = INT32_MAX;
#ifdef JS_NUNBOX32
diff --git a/js/src/jit/x86-shared/Architecture-x86-shared.h b/js/src/jit/x86-shared/Architecture-x86-shared.h
index b4701af284..72055efb7d 100644
--- a/js/src/jit/x86-shared/Architecture-x86-shared.h
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.h
@@ -31,9 +31,9 @@ static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
#endif
#if defined(JS_CODEGEN_X64) && defined(_WIN64)
-static const uint32_t ShadowStackSpace = 32;
+static constexpr uint32_t ShadowStackSpace = 32;
#else
-static const uint32_t ShadowStackSpace = 0;
+static constexpr uint32_t ShadowStackSpace = 0;
#endif
static const uint32_t JumpImmediateRange = INT32_MAX;