summaryrefslogtreecommitdiffstats
path: root/js/src/tests/lib/jittests.py
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/tests/lib/jittests.py')
-rwxr-xr-xjs/src/tests/lib/jittests.py48
1 files changed, 36 insertions, 12 deletions
diff --git a/js/src/tests/lib/jittests.py b/js/src/tests/lib/jittests.py
index 7d79ba9f2a..fbd00c0c37 100755
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -399,8 +399,10 @@ class JitTest:
# We may have specified '-a' or '-d' twice: once via --jitflags, once
# via the "|jit-test|" line. Remove dups because they are toggles.
+ # Note: |dict.fromkeys(flags)| is similar to |set(flags)| but it
+ # preserves order.
cmd = prefix + []
- cmd += list(set(self.jitflags))
+ cmd += list(dict.fromkeys(self.jitflags))
# Handle selfhosted XDR file.
if self.selfhosted_xdr_mode != "off":
cmd += [
@@ -603,7 +605,12 @@ def print_automation_format(ok, res, slog):
print("INFO (warn-stderr) 2> " + line.strip())
-def print_test_summary(num_tests, failures, complete, doing, options):
+def print_test_summary(num_tests, failures, complete, slow_tests, doing, options):
+ def test_details(res):
+ if options.show_failed:
+ return escape_cmdline(res.cmd)
+ return " ".join(res.test.jitflags + [res.test.relpath_tests])
+
if failures:
if options.write_failures:
try:
@@ -628,21 +635,15 @@ def print_test_summary(num_tests, failures, complete, doing, options):
traceback.print_exc()
sys.stderr.write("---\n")
- def show_test(res):
- if options.show_failed:
- print(" " + escape_cmdline(res.cmd))
- else:
- print(" " + " ".join(res.test.jitflags + [res.test.relpath_tests]))
-
print("FAILURES:")
for res in failures:
if not res.timed_out:
- show_test(res)
+ print(" " + test_details(res))
print("TIMEOUTS:")
for res in failures:
if res.timed_out:
- show_test(res)
+ print(" " + test_details(res))
else:
print(
"PASSED ALL"
@@ -659,6 +660,23 @@ def print_test_summary(num_tests, failures, complete, doing, options):
print("Passed: {:d}".format(num_tests - num_failures))
print("Failed: {:d}".format(num_failures))
+ if num_tests != 0 and options.show_slow:
+ threshold = options.slow_test_threshold
+ fraction_fast = 1 - len(slow_tests) / num_tests
+ print(
+ "{:5.2f}% of tests ran in under {}s".format(fraction_fast * 100, threshold)
+ )
+
+ print("Slowest tests that took longer than {}s:".format(threshold))
+ slow_tests.sort(key=lambda res: res.dt, reverse=True)
+ any = False
+ for i in range(min(len(slow_tests), 20)):
+ res = slow_tests[i]
+ print(" {:6.2f} {}".format(res.dt, test_details(res)))
+ any = True
+ if not any:
+ print("None")
+
return not failures
@@ -684,11 +702,14 @@ def process_test_results(results, num_tests, pb, options, slog):
complete = False
output_dict = {}
doing = "before starting"
+ slow_tests = []
if num_tests == 0:
pb.finish(True)
complete = True
- return print_test_summary(num_tests, failures, complete, doing, options)
+ return print_test_summary(
+ num_tests, failures, complete, slow_tests, doing, options
+ )
try:
for i, res in enumerate(results):
@@ -742,6 +763,9 @@ def process_test_results(results, num_tests, pb, options, slog):
"SKIP": 0,
},
)
+
+ if res.dt > options.slow_test_threshold:
+ slow_tests.append(res)
complete = True
except KeyboardInterrupt:
print(
@@ -750,7 +774,7 @@ def process_test_results(results, num_tests, pb, options, slog):
)
pb.finish(True)
- return print_test_summary(num_tests, failures, complete, doing, options)
+ return print_test_summary(num_tests, failures, complete, slow_tests, doing, options)
def run_tests(tests, num_tests, prefix, options, remote=False):