summaryrefslogtreecommitdiffstats
path: root/js/src/tests/lib
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/tests/lib')
-rwxr-xr-xjs/src/tests/lib/jittests.py48
-rw-r--r--js/src/tests/lib/manifest.py19
-rw-r--r--js/src/tests/lib/results.py35
-rw-r--r--js/src/tests/lib/structuredlog.py5
4 files changed, 76 insertions, 31 deletions
diff --git a/js/src/tests/lib/jittests.py b/js/src/tests/lib/jittests.py
index 7d79ba9f2a..fbd00c0c37 100755
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -399,8 +399,10 @@ class JitTest:
# We may have specified '-a' or '-d' twice: once via --jitflags, once
# via the "|jit-test|" line. Remove dups because they are toggles.
+ # Note: |dict.fromkeys(flags)| is similar to |set(flags)| but it
+ # preserves order.
cmd = prefix + []
- cmd += list(set(self.jitflags))
+ cmd += list(dict.fromkeys(self.jitflags))
# Handle selfhosted XDR file.
if self.selfhosted_xdr_mode != "off":
cmd += [
@@ -603,7 +605,12 @@ def print_automation_format(ok, res, slog):
print("INFO (warn-stderr) 2> " + line.strip())
-def print_test_summary(num_tests, failures, complete, doing, options):
+def print_test_summary(num_tests, failures, complete, slow_tests, doing, options):
+ def test_details(res):
+ if options.show_failed:
+ return escape_cmdline(res.cmd)
+ return " ".join(res.test.jitflags + [res.test.relpath_tests])
+
if failures:
if options.write_failures:
try:
@@ -628,21 +635,15 @@ def print_test_summary(num_tests, failures, complete, doing, options):
traceback.print_exc()
sys.stderr.write("---\n")
- def show_test(res):
- if options.show_failed:
- print(" " + escape_cmdline(res.cmd))
- else:
- print(" " + " ".join(res.test.jitflags + [res.test.relpath_tests]))
-
print("FAILURES:")
for res in failures:
if not res.timed_out:
- show_test(res)
+ print(" " + test_details(res))
print("TIMEOUTS:")
for res in failures:
if res.timed_out:
- show_test(res)
+ print(" " + test_details(res))
else:
print(
"PASSED ALL"
@@ -659,6 +660,23 @@ def print_test_summary(num_tests, failures, complete, doing, options):
print("Passed: {:d}".format(num_tests - num_failures))
print("Failed: {:d}".format(num_failures))
+ if num_tests != 0 and options.show_slow:
+ threshold = options.slow_test_threshold
+ fraction_fast = 1 - len(slow_tests) / num_tests
+ print(
+ "{:5.2f}% of tests ran in under {}s".format(fraction_fast * 100, threshold)
+ )
+
+ print("Slowest tests that took longer than {}s:".format(threshold))
+ slow_tests.sort(key=lambda res: res.dt, reverse=True)
+ any = False
+ for i in range(min(len(slow_tests), 20)):
+ res = slow_tests[i]
+ print(" {:6.2f} {}".format(res.dt, test_details(res)))
+ any = True
+ if not any:
+ print("None")
+
return not failures
@@ -684,11 +702,14 @@ def process_test_results(results, num_tests, pb, options, slog):
complete = False
output_dict = {}
doing = "before starting"
+ slow_tests = []
if num_tests == 0:
pb.finish(True)
complete = True
- return print_test_summary(num_tests, failures, complete, doing, options)
+ return print_test_summary(
+ num_tests, failures, complete, slow_tests, doing, options
+ )
try:
for i, res in enumerate(results):
@@ -742,6 +763,9 @@ def process_test_results(results, num_tests, pb, options, slog):
"SKIP": 0,
},
)
+
+ if res.dt > options.slow_test_threshold:
+ slow_tests.append(res)
complete = True
except KeyboardInterrupt:
print(
@@ -750,7 +774,7 @@ def process_test_results(results, num_tests, pb, options, slog):
)
pb.finish(True)
- return print_test_summary(num_tests, failures, complete, doing, options)
+ return print_test_summary(num_tests, failures, complete, slow_tests, doing, options)
def run_tests(tests, num_tests, prefix, options, remote=False):
diff --git a/js/src/tests/lib/manifest.py b/js/src/tests/lib/manifest.py
index 834bcab088..a7590b1b04 100644
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -31,22 +31,25 @@ class XULInfo:
self.abi = abi
self.os = os
self.isdebug = isdebug
- self.browserIsRemote = False
def as_js(self):
"""Return JS that when executed sets up variables so that JS expression
predicates on XUL build info evaluate properly."""
return (
- 'var xulRuntime = {{ OS: "{}", XPCOMABI: "{}", shell: true }};'
+ "var winWidget = {};"
+ "var gtkWidget = {};"
+ "var cocoaWidget = {};"
+ "var is64Bit = {};"
+ "var xulRuntime = {{ shell: true }};"
"var release_or_beta = getBuildConfiguration('release_or_beta');"
- "var isDebugBuild={}; var Android={}; "
- "var browserIsRemote={}".format(
- self.os,
- self.abi,
+ "var isDebugBuild={}; var Android={}; ".format(
+ str(self.os == "WINNT").lower(),
+ str(self.os == "Darwin").lower(),
+ str(self.os == "Linux").lower(),
+ str("x86-" not in self.abi).lower(),
str(self.isdebug).lower(),
str(self.os == "Android").lower(),
- str(self.browserIsRemote).lower(),
)
)
@@ -258,7 +261,7 @@ def _parse_one(testcase, terms, xul_tester):
pos += 1
elif parts[pos] == "silentfail":
# silentfails use tons of memory, and Darwin doesn't support ulimit.
- if xul_tester.test("xulRuntime.OS == 'Darwin'", testcase.options):
+ if xul_tester.test("cocoaWidget", testcase.options):
testcase.expect = testcase.enable = False
pos += 1
elif parts[pos].startswith("error:"):
diff --git a/js/src/tests/lib/results.py b/js/src/tests/lib/results.py
index 9c623b863f..42f8d7c163 100644
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -372,6 +372,9 @@ class ResultsSink:
else:
self.list(completed)
+ if self.n != 0 and self.options.show_slow:
+ self.show_slow_tests()
+
if self.wptreport is not None:
self.wptreport.suite_end()
@@ -427,16 +430,28 @@ class ResultsSink:
else:
print("FAIL" + suffix)
- if self.options.show_slow:
- min_duration = self.options.slow_test_threshold
- print("Slow tests (duration > {}s)".format(min_duration))
- slow_tests = sorted(self.slow_tests, key=lambda x: x.duration, reverse=True)
- any = False
- for test in slow_tests:
- print("{:>5} {}".format(round(test.duration, 2), test.test))
- any = True
- if not any:
- print("None")
+ def show_slow_tests(self):
+ threshold = self.options.slow_test_threshold
+ fraction_fast = 1 - len(self.slow_tests) / self.n
+ self.log_info(
+ "{:5.2f}% of tests ran in under {}s".format(fraction_fast * 100, threshold)
+ )
+
+ self.log_info("Slowest tests that took longer than {}s:".format(threshold))
+ slow_tests = sorted(self.slow_tests, key=lambda x: x.duration, reverse=True)
+ any = False
+ for i in range(min(len(slow_tests), 20)):
+ test = slow_tests[i]
+ self.log_info(" {:6.2f} {}".format(test.duration, test.test))
+ any = True
+ if not any:
+ self.log_info("None")
+
+ def log_info(self, message):
+ if self.options.format == "automation":
+ self.slog.log_info(message)
+ else:
+ print(message)
def all_passed(self):
return "REGRESSIONS" not in self.groups and "TIMEOUTS" not in self.groups
diff --git a/js/src/tests/lib/structuredlog.py b/js/src/tests/lib/structuredlog.py
index 2f2d317d02..b00914cc90 100644
--- a/js/src/tests/lib/structuredlog.py
+++ b/js/src/tests/lib/structuredlog.py
@@ -1,5 +1,5 @@
# produce mozlog-compatible log messages, following the spec at
-# https://mozbase.readthedocs.io/en/latest/mozlog.html
+# https://firefox-source-docs.mozilla.org/mozbase/mozlog.html
import json
import os
@@ -54,3 +54,6 @@ class TestLogger(object):
record["status"] = status
record.update(**details)
self._log_obj(record)
+
+ def log_info(self, message):
+ self._log(action="log", level="INFO", message=message)