summaryrefslogtreecommitdiffstats
path: root/js/src/tests/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /js/src/tests/lib
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/tests/lib')
-rw-r--r--js/src/tests/lib/__init__.py0
-rw-r--r--js/src/tests/lib/adaptor.py21
-rwxr-xr-xjs/src/tests/lib/jittests.py817
-rw-r--r--js/src/tests/lib/manifest.py641
-rw-r--r--js/src/tests/lib/progressbar.py136
-rw-r--r--js/src/tests/lib/remote.py106
-rw-r--r--js/src/tests/lib/results.py471
-rw-r--r--js/src/tests/lib/structuredlog.py56
-rw-r--r--js/src/tests/lib/tasks_adb_remote.py284
-rw-r--r--js/src/tests/lib/tasks_unix.py273
-rw-r--r--js/src/tests/lib/tasks_win.py177
-rw-r--r--js/src/tests/lib/tempfile.py18
-rw-r--r--js/src/tests/lib/terminal_unix.py33
-rw-r--r--js/src/tests/lib/terminal_win.py114
-rw-r--r--js/src/tests/lib/tests.py334
-rw-r--r--js/src/tests/lib/wptreport.py85
16 files changed, 3566 insertions, 0 deletions
diff --git a/js/src/tests/lib/__init__.py b/js/src/tests/lib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/js/src/tests/lib/__init__.py
diff --git a/js/src/tests/lib/adaptor.py b/js/src/tests/lib/adaptor.py
new file mode 100644
index 0000000000..31413d1e60
--- /dev/null
+++ b/js/src/tests/lib/adaptor.py
@@ -0,0 +1,21 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# adaptor.py -- Use generators to mutate the sequence of tests to be executed.
+
+
+def xdr_annotate(tests, options):
+ """Returns a tuple a test which is encoding the self-hosted
+ code and a generator of tests which is decoding the self-hosted
+ code."""
+ selfhosted_is_encoded = False
+ for test in tests:
+ if not test.enable and not options.run_skipped:
+ test.selfhosted_xdr_mode = "off"
+ elif not selfhosted_is_encoded:
+ test.selfhosted_xdr_mode = "encode"
+ selfhosted_is_encoded = True
+ else:
+ test.selfhosted_xdr_mode = "decode"
+ yield test
diff --git a/js/src/tests/lib/jittests.py b/js/src/tests/lib/jittests.py
new file mode 100755
index 0000000000..9eaa0bf168
--- /dev/null
+++ b/js/src/tests/lib/jittests.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+# jit_test.py -- Python harness for JavaScript trace tests.
+
+import os
+import re
+import sys
+import traceback
+from collections import namedtuple
+
+if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
+ from .tasks_unix import run_all_tests
+else:
+ from .tasks_win import run_all_tests
+
+from .progressbar import NullProgressBar, ProgressBar
+from .results import escape_cmdline
+from .structuredlog import TestLogger
+from .tempfile import TemporaryDirectory
+
+TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
+JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
+TOP_SRC_DIR = os.path.dirname(os.path.dirname(JS_DIR))
+TEST_DIR = os.path.join(JS_DIR, "jit-test", "tests")
+LIB_DIR = os.path.join(JS_DIR, "jit-test", "lib") + os.path.sep
+MODULE_DIR = os.path.join(JS_DIR, "jit-test", "modules") + os.path.sep
+SHELL_XDR = "shell.xdr"
+
+# Backported from Python 3.1 posixpath.py
+
+
+def _relpath(path, start=None):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ if start is None:
+ start = os.curdir
+
+ start_list = os.path.abspath(start).split(os.sep)
+ path_list = os.path.abspath(path).split(os.sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = [os.pardir] * (len(start_list) - i) + path_list[i:]
+ if not rel_list:
+ return os.curdir
+ return os.path.join(*rel_list)
+
+
+# Mapping of Python chars to their javascript string representation.
+QUOTE_MAP = {
+ "\\": "\\\\",
+ "\b": "\\b",
+ "\f": "\\f",
+ "\n": "\\n",
+ "\r": "\\r",
+ "\t": "\\t",
+ "\v": "\\v",
+}
+
+# Quote the string S, javascript style.
+
+
+def js_quote(quote, s):
+ result = quote
+ for c in s:
+ if c == quote:
+ result += "\\" + quote
+ elif c in QUOTE_MAP:
+ result += QUOTE_MAP[c]
+ else:
+ result += c
+ result += quote
+ return result
+
+
+os.path.relpath = _relpath
+
+
+def extend_condition(condition, value):
+ if condition:
+ condition += " || "
+ condition += "({})".format(value)
+ return condition
+
+
+class JitTest:
+
+ VALGRIND_CMD = []
+ paths = (d for d in os.environ["PATH"].split(os.pathsep))
+ valgrinds = (os.path.join(d, "valgrind") for d in paths)
+ if any(os.path.exists(p) for p in valgrinds):
+ VALGRIND_CMD = [
+ "valgrind",
+ "-q",
+ "--smc-check=all-non-file",
+ "--error-exitcode=1",
+ "--gen-suppressions=all",
+ "--show-possibly-lost=no",
+ "--leak-check=full",
+ ]
+ if os.uname()[0] == "Darwin":
+ VALGRIND_CMD.append("--dsymutil=yes")
+
+ del paths
+ del valgrinds
+
+ def __init__(self, path):
+ # Absolute path of the test file.
+ self.path = path
+
+ # Path relative to the top mozilla/ directory.
+ self.relpath_top = os.path.relpath(path, TOP_SRC_DIR)
+
+ # Path relative to mozilla/js/src/jit-test/tests/.
+ self.relpath_tests = os.path.relpath(path, TEST_DIR)
+
+ # jit flags to enable
+ self.jitflags = []
+ # True means the test is slow-running
+ self.slow = False
+ # True means that OOM is not considered a failure
+ self.allow_oom = False
+ # True means CrashAtUnhandlableOOM is not considered a failure
+ self.allow_unhandlable_oom = False
+ # True means that hitting recursion the limits is not considered a failure.
+ self.allow_overrecursed = False
+ # True means run under valgrind
+ self.valgrind = False
+ # True means force Pacific time for the test
+ self.tz_pacific = False
+ # Additional files to include, in addition to prologue.js
+ self.other_lib_includes = []
+ self.other_script_includes = []
+ # List of other configurations to test with.
+ self.test_also = []
+ # List of other configurations to test with all existing variants.
+ self.test_join = []
+ # Errors to expect and consider passing
+ self.expect_error = ""
+ # Exit status to expect from shell
+ self.expect_status = 0
+ # Exit status or error output.
+ self.expect_crash = False
+ self.is_module = False
+ # Reflect.stringify implementation to test
+ self.test_reflect_stringify = None
+ # Use self-hosted XDR instead of parsing the source stored in the binary.
+ self.selfhosted_xdr_path = None
+ self.selfhosted_xdr_mode = "off"
+
+ # Skip-if condition. We don't have a xulrunner, but we can ask the shell
+ # directly.
+ self.skip_if_cond = ""
+ self.skip_variant_if_cond = {}
+
+ # Expected by the test runner. Always true for jit-tests.
+ self.enable = True
+
+ def copy(self):
+ t = JitTest(self.path)
+ t.jitflags = self.jitflags[:]
+ t.slow = self.slow
+ t.allow_oom = self.allow_oom
+ t.allow_unhandlable_oom = self.allow_unhandlable_oom
+ t.allow_overrecursed = self.allow_overrecursed
+ t.valgrind = self.valgrind
+ t.tz_pacific = self.tz_pacific
+ t.other_lib_includes = self.other_lib_includes[:]
+ t.other_script_includes = self.other_script_includes[:]
+ t.test_also = self.test_also
+ t.test_join = self.test_join
+ t.expect_error = self.expect_error
+ t.expect_status = self.expect_status
+ t.expect_crash = self.expect_crash
+ t.test_reflect_stringify = self.test_reflect_stringify
+ t.selfhosted_xdr_path = self.selfhosted_xdr_path
+ t.selfhosted_xdr_mode = self.selfhosted_xdr_mode
+ t.enable = True
+ t.is_module = self.is_module
+ t.skip_if_cond = self.skip_if_cond
+ t.skip_variant_if_cond = self.skip_variant_if_cond
+ return t
+
+ def copy_and_extend_jitflags(self, variant):
+ t = self.copy()
+ t.jitflags.extend(variant)
+ for flags in variant:
+ if flags in self.skip_variant_if_cond:
+ t.skip_if_cond = extend_condition(
+ t.skip_if_cond, self.skip_variant_if_cond[flags]
+ )
+ return t
+
+ def copy_variants(self, variants):
+ # Append variants to be tested in addition to the current set of tests.
+ variants = variants + self.test_also
+
+ # For each existing variant, duplicates it for each list of options in
+ # test_join. This will multiply the number of variants by 2 for set of
+ # options.
+ for join_opts in self.test_join:
+ variants = variants + [opts + join_opts for opts in variants]
+
+ # For each list of jit flags, make a copy of the test.
+ return [self.copy_and_extend_jitflags(v) for v in variants]
+
+ COOKIE = b"|jit-test|"
+
+ # We would use 500019 (5k19), but quit() only accepts values up to 127, due to fuzzers
+ SKIPPED_EXIT_STATUS = 59
+ Directives = {}
+
+ @classmethod
+ def find_directives(cls, file_name):
+ meta = ""
+ line = open(file_name, "rb").readline()
+ i = line.find(cls.COOKIE)
+ if i != -1:
+ meta = ";" + line[i + len(cls.COOKIE) :].decode(errors="strict").strip("\n")
+ return meta
+
+ @classmethod
+ def from_file(cls, path, options):
+ test = cls(path)
+
+ # If directives.txt exists in the test's directory then it may
+ # contain metainformation that will be catenated with
+ # whatever's in the test file. The form of the directive in
+ # the directive file is the same as in the test file. Only
+ # the first line is considered, just as for the test file.
+
+ dir_meta = ""
+ dir_name = os.path.dirname(path)
+ if dir_name in cls.Directives:
+ dir_meta = cls.Directives[dir_name]
+ else:
+ meta_file_name = os.path.join(dir_name, "directives.txt")
+ if os.path.exists(meta_file_name):
+ dir_meta = cls.find_directives(meta_file_name)
+ cls.Directives[dir_name] = dir_meta
+
+ filename, file_extension = os.path.splitext(path)
+ meta = cls.find_directives(path)
+
+ if meta != "" or dir_meta != "":
+ meta = meta + dir_meta
+ parts = meta.split(";")
+ for part in parts:
+ part = part.strip()
+ if not part:
+ continue
+ name, _, value = part.partition(":")
+ if value:
+ value = value.strip()
+ if name == "error":
+ test.expect_error = value
+ elif name == "exitstatus":
+ try:
+ status = int(value, 0)
+ if status == test.SKIPPED_EXIT_STATUS:
+ print(
+ "warning: jit-tests uses {} as a sentinel"
+ " return value {}",
+ test.SKIPPED_EXIT_STATUS,
+ path,
+ )
+ else:
+ test.expect_status = status
+ except ValueError:
+ print(
+ "warning: couldn't parse exit status"
+ " {}".format(value)
+ )
+ elif name == "thread-count":
+ try:
+ test.jitflags.append(
+ "--thread-count={}".format(int(value, 0))
+ )
+ except ValueError:
+ print(
+ "warning: couldn't parse thread-count"
+ " {}".format(value)
+ )
+ elif name == "include":
+ test.other_lib_includes.append(value)
+ elif name == "local-include":
+ test.other_script_includes.append(value)
+ elif name == "skip-if":
+ test.skip_if_cond = extend_condition(test.skip_if_cond, value)
+ elif name == "skip-variant-if":
+ try:
+ [variant, condition] = value.split(",")
+ test.skip_variant_if_cond[variant] = extend_condition(
+ test.skip_if_cond, condition
+ )
+ except ValueError:
+ print("warning: couldn't parse skip-variant-if")
+ else:
+ print(
+ "{}: warning: unrecognized |jit-test| attribute"
+ " {}".format(path, part)
+ )
+ else:
+ if name == "slow":
+ test.slow = True
+ elif name == "allow-oom":
+ test.allow_oom = True
+ elif name == "allow-unhandlable-oom":
+ test.allow_unhandlable_oom = True
+ elif name == "allow-overrecursed":
+ test.allow_overrecursed = True
+ elif name == "valgrind":
+ test.valgrind = options.valgrind
+ elif name == "tz-pacific":
+ test.tz_pacific = True
+ elif name.startswith("test-also="):
+ test.test_also.append(
+ re.split(r"\s+", name[len("test-also=") :])
+ )
+ elif name.startswith("test-join="):
+ test.test_join.append(
+ re.split(r"\s+", name[len("test-join=") :])
+ )
+ elif name == "module":
+ test.is_module = True
+ elif name == "crash":
+ # Crashes are only allowed in self-test, as it is
+ # intended to verify that our testing infrastructure
+ # works, and not meant as a way to accept temporary
+ # failing tests. These tests should either be fixed or
+ # skipped.
+ assert (
+ "self-test" in path
+ ), "{}: has an unexpected crash annotation.".format(path)
+ test.expect_crash = True
+ elif name.startswith("--"):
+ # // |jit-test| --ion-gvn=off; --no-sse4
+ test.jitflags.append(name)
+ else:
+ print(
+ "{}: warning: unrecognized |jit-test| attribute"
+ " {}".format(path, part)
+ )
+
+ if options.valgrind_all:
+ test.valgrind = True
+
+ if options.test_reflect_stringify is not None:
+ test.expect_error = ""
+ test.expect_status = 0
+
+ return test
+
+ def command(self, prefix, libdir, moduledir, tempdir, remote_prefix=None):
+ path = self.path
+ if remote_prefix:
+ path = self.path.replace(TEST_DIR, remote_prefix)
+
+ scriptdir_var = os.path.dirname(path)
+ if not scriptdir_var.endswith("/"):
+ scriptdir_var += "/"
+
+ # Note: The tempdir provided as argument is managed by the caller
+ # should remain alive as long as the test harness. Therefore, the XDR
+ # content of the self-hosted code would be accessible to all JS Shell
+ # instances.
+ self.selfhosted_xdr_path = os.path.join(tempdir, SHELL_XDR)
+
+ # Platforms where subprocess immediately invokes exec do not care
+ # whether we use double or single quotes. On windows and when using
+ # a remote device, however, we have to be careful to use the quote
+ # style that is the opposite of what the exec wrapper uses.
+ if remote_prefix:
+ quotechar = '"'
+ else:
+ quotechar = "'"
+
+ # Don't merge the expressions: We want separate -e arguments to avoid
+ # semicolons in the command line, bug 1351607.
+ exprs = [
+ "const platform={}".format(js_quote(quotechar, sys.platform)),
+ "const libdir={}".format(js_quote(quotechar, libdir)),
+ "const scriptdir={}".format(js_quote(quotechar, scriptdir_var)),
+ ]
+
+ # We may have specified '-a' or '-d' twice: once via --jitflags, once
+ # via the "|jit-test|" line. Remove dups because they are toggles.
+ cmd = prefix + []
+ cmd += list(set(self.jitflags))
+ # Handle selfhosted XDR file.
+ if self.selfhosted_xdr_mode != "off":
+ cmd += [
+ "--selfhosted-xdr-path",
+ self.selfhosted_xdr_path,
+ "--selfhosted-xdr-mode",
+ self.selfhosted_xdr_mode,
+ ]
+ for expr in exprs:
+ cmd += ["-e", expr]
+ for inc in self.other_lib_includes:
+ cmd += ["-f", libdir + inc]
+ for inc in self.other_script_includes:
+ cmd += ["-f", scriptdir_var + inc]
+ if self.skip_if_cond:
+ cmd += [
+ "-e",
+ "if ({}) quit({})".format(self.skip_if_cond, self.SKIPPED_EXIT_STATUS),
+ ]
+ cmd += ["--module-load-path", moduledir]
+ if self.is_module:
+ cmd += ["--module", path]
+ elif self.test_reflect_stringify is None:
+ cmd += ["-f", path]
+ else:
+ cmd += ["--", self.test_reflect_stringify, "--check", path]
+
+ if self.valgrind:
+ cmd = self.VALGRIND_CMD + cmd
+
+ if self.allow_unhandlable_oom or self.expect_crash:
+ cmd += ["--suppress-minidump"]
+
+ return cmd
+
+ # The test runner expects this to be set to give to get_command.
+ js_cmd_prefix = None
+
+ def get_command(self, prefix, tempdir):
+ """Shim for the test runner."""
+ return self.command(prefix, LIB_DIR, MODULE_DIR, tempdir)
+
+
+def find_tests(substring=None):
+ ans = []
+ for dirpath, dirnames, filenames in os.walk(TEST_DIR):
+ dirnames.sort()
+ filenames.sort()
+ if dirpath == ".":
+ continue
+
+ for filename in filenames:
+ if not filename.endswith(".js"):
+ continue
+ if filename in ("shell.js", "browser.js"):
+ continue
+ test = os.path.join(dirpath, filename)
+ if substring is None or substring in os.path.relpath(test, TEST_DIR):
+ ans.append(test)
+ return ans
+
+
+def check_output(out, err, rc, timed_out, test, options):
+ # Allow skipping to compose with other expected results
+ if test.skip_if_cond:
+ if rc == test.SKIPPED_EXIT_STATUS:
+ return True
+
+ if timed_out:
+ relpath = os.path.normpath(test.relpath_tests).replace(os.sep, "/")
+ if relpath in options.ignore_timeouts:
+ return True
+ return False
+
+ if test.expect_error:
+ # The shell exits with code 3 on uncaught exceptions.
+ if rc != 3:
+ return False
+
+ return test.expect_error in err
+
+ for line in out.split("\n"):
+ if line.startswith("Trace stats check failed"):
+ return False
+
+ for line in err.split("\n"):
+ if "Assertion failed:" in line:
+ return False
+
+ if test.expect_crash:
+ # Python 3 on Windows interprets process exit codes as unsigned
+ # integers, where Python 2 used to allow signed integers. Account for
+ # each possibility here.
+ if sys.platform == "win32" and rc in (3 - 2 ** 31, 3 + 2 ** 31):
+ return True
+
+ if sys.platform != "win32" and rc == -11:
+ return True
+
+ # When building with ASan enabled, ASan will convert the -11 returned
+ # value to 1. As a work-around we look for the error output which
+ # includes the crash reason.
+ if rc == 1 and ("Hit MOZ_CRASH" in err or "Assertion failure:" in err):
+ return True
+
+ # When running jittests on Android, SEGV results in a return code of
+ # 128 + 11 = 139. Due to a bug in tinybox, we have to check for 138 as
+ # well.
+ if rc == 139 or rc == 138:
+ return True
+
+ # Crashing test should always crash as expected, otherwise this is an
+ # error. The JS shell crash() function can be used to force the test
+ # case to crash in unexpected configurations.
+ return False
+
+ if rc != test.expect_status:
+ # Allow a non-zero exit code if we want to allow OOM, but only if we
+ # actually got OOM.
+ if (
+ test.allow_oom
+ and "out of memory" in err
+ and "Assertion failure" not in err
+ and "MOZ_CRASH" not in err
+ ):
+ return True
+
+ # Allow a non-zero exit code if we want to allow unhandlable OOM, but
+ # only if we actually got unhandlable OOM.
+ if test.allow_unhandlable_oom and "MOZ_CRASH([unhandlable oom]" in err:
+ return True
+
+ # Allow a non-zero exit code if we want to all too-much-recursion and
+ # the test actually over-recursed.
+ if (
+ test.allow_overrecursed
+ and "too much recursion" in err
+ and "Assertion failure" not in err
+ ):
+ return True
+
+ # Allow a zero exit code if we are running under a sanitizer that
+ # forces the exit status.
+ if test.expect_status != 0 and options.unusable_error_status:
+ return True
+
+ return False
+
+ return True
+
+
+def print_automation_format(ok, res, slog):
+ # Output test failures in a parsable format suitable for automation, eg:
+ # TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
+ #
+ # Example:
+ # TEST-PASS | foo/bar/baz.js | (code 0, args "--ion-eager")
+ # TEST-UNEXPECTED-FAIL | foo/bar/baz.js | TypeError: or something (code -9, args "--no-ion")
+ # INFO exit-status : 3
+ # INFO timed-out : False
+ # INFO stdout > foo
+ # INFO stdout > bar
+ # INFO stdout > baz
+ # INFO stderr 2> TypeError: or something
+ # TEST-UNEXPECTED-FAIL | jit_test.py: Test execution interrupted by user
+ result = "TEST-PASS" if ok else "TEST-UNEXPECTED-FAIL"
+ message = "Success" if ok else res.describe_failure()
+ jitflags = " ".join(res.test.jitflags)
+ print(
+ '{} | {} | {} (code {}, args "{}") [{:.1f} s]'.format(
+ result, res.test.relpath_top, message, res.rc, jitflags, res.dt
+ )
+ )
+
+ details = {
+ "message": message,
+ "extra": {
+ "jitflags": jitflags,
+ },
+ }
+ if res.extra:
+ details["extra"].update(res.extra)
+ slog.test(res.test.relpath_tests, "PASS" if ok else "FAIL", res.dt, **details)
+
+ # For failed tests, print as much information as we have, to aid debugging.
+ if ok:
+ return
+ print("INFO exit-status : {}".format(res.rc))
+ print("INFO timed-out : {}".format(res.timed_out))
+ for line in res.out.splitlines():
+ print("INFO stdout > " + line.strip())
+ for line in res.err.splitlines():
+ print("INFO stderr 2> " + line.strip())
+
+
+def print_test_summary(num_tests, failures, complete, doing, options):
+ if failures:
+ if options.write_failures:
+ try:
+ out = open(options.write_failures, "w")
+ # Don't write duplicate entries when we are doing multiple
+ # failures per job.
+ written = set()
+ for res in failures:
+ if res.test.path not in written:
+ out.write(os.path.relpath(res.test.path, TEST_DIR) + "\n")
+ if options.write_failure_output:
+ out.write(res.out)
+ out.write(res.err)
+ out.write("Exit code: " + str(res.rc) + "\n")
+ written.add(res.test.path)
+ out.close()
+ except IOError:
+ sys.stderr.write(
+ "Exception thrown trying to write failure"
+ " file '{}'\n".format(options.write_failures)
+ )
+ traceback.print_exc()
+ sys.stderr.write("---\n")
+
+ def show_test(res):
+ if options.show_failed:
+ print(" " + escape_cmdline(res.cmd))
+ else:
+ print(" " + " ".join(res.test.jitflags + [res.test.relpath_tests]))
+
+ print("FAILURES:")
+ for res in failures:
+ if not res.timed_out:
+ show_test(res)
+
+ print("TIMEOUTS:")
+ for res in failures:
+ if res.timed_out:
+ show_test(res)
+ else:
+ print(
+ "PASSED ALL"
+ + (
+ ""
+ if complete
+ else " (partial run -- interrupted by user {})".format(doing)
+ )
+ )
+
+ if options.format == "automation":
+ num_failures = len(failures) if failures else 0
+ print("Result summary:")
+ print("Passed: {:d}".format(num_tests - num_failures))
+ print("Failed: {:d}".format(num_failures))
+
+ return not failures
+
+
+def create_progressbar(num_tests, options):
+ if (
+ not options.hide_progress
+ and not options.show_cmd
+ and ProgressBar.conservative_isatty()
+ ):
+ fmt = [
+ {"value": "PASS", "color": "green"},
+ {"value": "FAIL", "color": "red"},
+ {"value": "TIMEOUT", "color": "blue"},
+ {"value": "SKIP", "color": "brightgray"},
+ ]
+ return ProgressBar(num_tests, fmt)
+ return NullProgressBar()
+
+
+def process_test_results(results, num_tests, pb, options, slog):
+ failures = []
+ timeouts = 0
+ complete = False
+ output_dict = {}
+ doing = "before starting"
+
+ if num_tests == 0:
+ pb.finish(True)
+ complete = True
+ return print_test_summary(num_tests, failures, complete, doing, options)
+
+ try:
+ for i, res in enumerate(results):
+ ok = check_output(
+ res.out, res.err, res.rc, res.timed_out, res.test, options
+ )
+
+ if ok:
+ show_output = options.show_output and not options.failed_only
+ else:
+ show_output = options.show_output or not options.no_show_failed
+
+ if show_output:
+ pb.beginline()
+ sys.stdout.write(res.out)
+ sys.stdout.write(res.err)
+ sys.stdout.write("Exit code: {}\n".format(res.rc))
+
+ if res.test.valgrind and not show_output:
+ pb.beginline()
+ sys.stdout.write(res.err)
+
+ if options.check_output:
+ if res.test.path in output_dict.keys():
+ if output_dict[res.test.path] != res.out:
+ pb.message(
+ "FAIL - OUTPUT DIFFERS {}".format(res.test.relpath_tests)
+ )
+ else:
+ output_dict[res.test.path] = res.out
+
+ doing = "after {}".format(res.test.relpath_tests)
+ if not ok:
+ failures.append(res)
+ if res.timed_out:
+ pb.message("TIMEOUT - {}".format(res.test.relpath_tests))
+ timeouts += 1
+ else:
+ pb.message("FAIL - {}".format(res.test.relpath_tests))
+
+ if options.format == "automation":
+ print_automation_format(ok, res, slog)
+
+ n = i + 1
+ pb.update(
+ n,
+ {
+ "PASS": n - len(failures),
+ "FAIL": len(failures),
+ "TIMEOUT": timeouts,
+ "SKIP": 0,
+ },
+ )
+ complete = True
+ except KeyboardInterrupt:
+ print(
+ "TEST-UNEXPECTED-FAIL | jit_test.py"
+ + " : Test execution interrupted by user"
+ )
+
+ pb.finish(True)
+ return print_test_summary(num_tests, failures, complete, doing, options)
+
+
+def run_tests(tests, num_tests, prefix, options, remote=False):
+ slog = None
+ if options.format == "automation":
+ slog = TestLogger("jittests")
+ slog.suite_start()
+
+ if remote:
+ ok = run_tests_remote(tests, num_tests, prefix, options, slog)
+ else:
+ ok = run_tests_local(tests, num_tests, prefix, options, slog)
+
+ if slog:
+ slog.suite_end()
+
+ return ok
+
+
+def run_tests_local(tests, num_tests, prefix, options, slog):
+ # The jstests tasks runner requires the following options. The names are
+ # taken from the jstests options processing code, which are frequently
+ # subtly different from the options jit-tests expects. As such, we wrap
+ # them here, as needed.
+ AdaptorOptions = namedtuple(
+ "AdaptorOptions",
+ [
+ "worker_count",
+ "passthrough",
+ "timeout",
+ "output_fp",
+ "hide_progress",
+ "run_skipped",
+ "show_cmd",
+ "use_xdr",
+ ],
+ )
+ shim_options = AdaptorOptions(
+ options.max_jobs,
+ False,
+ options.timeout,
+ sys.stdout,
+ False,
+ True,
+ options.show_cmd,
+ options.use_xdr,
+ )
+
+ # The test runner wants the prefix as a static on the Test class.
+ JitTest.js_cmd_prefix = prefix
+
+ with TemporaryDirectory() as tempdir:
+ pb = create_progressbar(num_tests, options)
+ gen = run_all_tests(tests, prefix, tempdir, pb, shim_options)
+ ok = process_test_results(gen, num_tests, pb, options, slog)
+ return ok
+
+
+def run_tests_remote(tests, num_tests, prefix, options, slog):
+ # Setup device with everything needed to run our tests.
+ from mozdevice import ADBError, ADBTimeoutError
+
+ from .tasks_adb_remote import get_remote_results
+
+ # Run all tests.
+ pb = create_progressbar(num_tests, options)
+ try:
+ gen = get_remote_results(tests, prefix, pb, options)
+ ok = process_test_results(gen, num_tests, pb, options, slog)
+ except (ADBError, ADBTimeoutError):
+ print("TEST-UNEXPECTED-FAIL | jit_test.py" + " : Device error during test")
+ raise
+ return ok
+
+
+if __name__ == "__main__":
+ print("Use ../jit-test/jit_test.py to run these tests.")
diff --git a/js/src/tests/lib/manifest.py b/js/src/tests/lib/manifest.py
new file mode 100644
index 0000000000..26c6821ed9
--- /dev/null
+++ b/js/src/tests/lib/manifest.py
@@ -0,0 +1,641 @@
+# Library for JSTest manifests.
+#
+# This includes classes for representing and parsing JS manifests.
+
+import io
+import os
+import posixpath
+import re
+import sys
+from subprocess import PIPE, Popen
+
+import six
+
+from .remote import init_device
+from .tests import RefTestCase
+
+
+def split_path_into_dirs(path):
+ dirs = [path]
+
+ while True:
+ path, tail = os.path.split(path)
+ if not tail:
+ break
+ dirs.append(path)
+ return dirs
+
+
+class XULInfo:
+ def __init__(self, abi, os, isdebug):
+ self.abi = abi
+ self.os = os
+ self.isdebug = isdebug
+ self.browserIsRemote = False
+
+ def as_js(self):
+ """Return JS that when executed sets up variables so that JS expression
+ predicates on XUL build info evaluate properly."""
+
+ return (
+ 'var xulRuntime = {{ OS: "{}", XPCOMABI: "{}", shell: true }};'
+ "var release_or_beta = getBuildConfiguration().release_or_beta;"
+ "var isDebugBuild={}; var Android={}; "
+ "var browserIsRemote={}".format(
+ self.os,
+ self.abi,
+ str(self.isdebug).lower(),
+ str(self.os == "Android").lower(),
+ str(self.browserIsRemote).lower(),
+ )
+ )
+
+ @classmethod
+ def create(cls, jsdir):
+ """Create a XULInfo based on the current platform's characteristics."""
+
+ # Our strategy is to find the autoconf.mk generated for the build and
+ # read the values from there.
+
+ # Find config/autoconf.mk.
+ dirs = split_path_into_dirs(os.getcwd()) + split_path_into_dirs(jsdir)
+
+ path = None
+ for dir in dirs:
+ _path = posixpath.join(dir, "config", "autoconf.mk")
+ if os.path.isfile(_path):
+ path = _path
+ break
+
+ if path is None:
+ print(
+ "Can't find config/autoconf.mk on a directory containing"
+ " the JS shell (searched from {})".format(jsdir)
+ )
+ sys.exit(1)
+
+ # Read the values.
+ val_re = re.compile(r"(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)")
+ kw = {"isdebug": False}
+ for line in io.open(path, encoding="utf-8"):
+ m = val_re.match(line)
+ if m:
+ key, val = m.groups()
+ val = val.rstrip()
+ if key == "TARGET_XPCOM_ABI":
+ kw["abi"] = val
+ if key == "OS_TARGET":
+ kw["os"] = val
+ if key == "MOZ_DEBUG":
+ kw["isdebug"] = val == "1"
+ return cls(**kw)
+
+
+class XULInfoTester:
+ def __init__(self, xulinfo, options, js_args):
+ self.js_prologue = xulinfo.as_js()
+ self.js_bin = options.js_shell
+ self.js_args = js_args
+ # options here are the command line options
+ self.options = options
+ # Maps JS expr to evaluation result.
+ self.cache = {}
+
+ if not self.options.remote:
+ return
+ self.device = init_device(options)
+ self.js_bin = posixpath.join(options.remote_test_root, "bin", "js")
+
+ def test(self, cond, options=[]):
+ if self.options.remote:
+ return self._test_remote(cond, options=options)
+ return self._test_local(cond, options=options)
+
+ def _test_remote(self, cond, options=[]):
+ from mozdevice import ADBDevice, ADBProcessError
+
+ ans = self.cache.get(cond, None)
+ if ans is not None:
+ return ans
+
+ env = {
+ "LD_LIBRARY_PATH": posixpath.join(self.options.remote_test_root, "bin"),
+ }
+
+ cmd = (
+ [self.js_bin]
+ + self.js_args
+ + options
+ + [
+ # run in safe configuration, since it is hard to debug
+ # crashes when running code here. In particular, msan will
+ # error out if the jit is active.
+ "--no-baseline",
+ "--no-blinterp",
+ "-e",
+ self.js_prologue,
+ "-e",
+ "print(!!({}))".format(cond),
+ ]
+ )
+ cmd = ADBDevice._escape_command_line(cmd)
+ try:
+ # Allow ADBError or ADBTimeoutError to terminate the test run,
+ # but handle ADBProcessError in order to support the use of
+ # non-zero exit codes in the JavaScript shell tests.
+ out = self.device.shell_output(
+ cmd, env=env, cwd=self.options.remote_test_root, timeout=None
+ )
+ err = ""
+ except ADBProcessError as e:
+ out = ""
+ err = str(e.adb_process.stdout)
+
+ if out == "true":
+ ans = True
+ elif out == "false":
+ ans = False
+ else:
+ raise Exception(
+ "Failed to test XUL condition {!r};"
+ " output was {!r}, stderr was {!r}".format(cond, out, err)
+ )
+ self.cache[cond] = ans
+ return ans
+
+ def _test_local(self, cond, options=[]):
+ """Test a XUL predicate condition against this local info."""
+ ans = self.cache.get(cond, None)
+ if ans is None:
+ cmd = (
+ [self.js_bin]
+ + self.js_args
+ + options
+ + [
+ # run in safe configuration, since it is hard to debug
+ # crashes when running code here. In particular, msan will
+ # error out if the jit is active.
+ "--no-baseline",
+ "--no-blinterp",
+ "-e",
+ self.js_prologue,
+ "-e",
+ "print(!!({}))".format(cond),
+ ]
+ )
+ p = Popen(
+ cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True
+ )
+ out, err = p.communicate()
+ if out in ("true\n", "true\r\n"):
+ ans = True
+ elif out in ("false\n", "false\r\n"):
+ ans = False
+ else:
+ raise Exception(
+ "Failed to test XUL condition {!r};"
+ " output was {!r}, stderr was {!r}".format(cond, out, err)
+ )
+ self.cache[cond] = ans
+ return ans
+
+
+class NullXULInfoTester:
+ """Can be used to parse manifests without a JS shell."""
+
+ def test(self, cond, options=[]):
+ return False
+
+
+def _parse_one(testcase, terms, xul_tester):
+ pos = 0
+ parts = terms.split()
+ while pos < len(parts):
+ if parts[pos] == "fails":
+ testcase.expect = False
+ pos += 1
+ elif parts[pos] == "skip":
+ testcase.expect = testcase.enable = False
+ pos += 1
+ elif parts[pos] == "random":
+ testcase.random = True
+ pos += 1
+ elif parts[pos].startswith("shell-option("):
+ # This directive adds an extra option to pass to the shell.
+ option = parts[pos][len("shell-option(") : -1]
+ testcase.options.append(option)
+ pos += 1
+ elif parts[pos].startswith("fails-if"):
+ cond = parts[pos][len("fails-if(") : -1]
+ if xul_tester.test(cond, testcase.options):
+ testcase.expect = False
+ pos += 1
+ elif parts[pos].startswith("asserts-if"):
+ # This directive means we may flunk some number of
+ # NS_ASSERTIONs in the browser. For the shell, ignore it.
+ pos += 1
+ elif parts[pos].startswith("skip-if"):
+ cond = parts[pos][len("skip-if(") : -1]
+ if xul_tester.test(cond, testcase.options):
+ testcase.expect = testcase.enable = False
+ pos += 1
+ elif parts[pos].startswith("ignore-flag"):
+ flag = parts[pos][len("ignore-flag(") : -1]
+ testcase.ignoredflags.append(flag)
+ pos += 1
+ elif parts[pos].startswith("random-if"):
+ cond = parts[pos][len("random-if(") : -1]
+ if xul_tester.test(cond, testcase.options):
+ testcase.random = True
+ pos += 1
+ elif parts[pos] == "slow":
+ testcase.slow = True
+ pos += 1
+ elif parts[pos].startswith("slow-if"):
+ cond = parts[pos][len("slow-if(") : -1]
+ if xul_tester.test(cond, testcase.options):
+ testcase.slow = True
+ pos += 1
+ elif parts[pos] == "silentfail":
+ # silentfails use tons of memory, and Darwin doesn't support ulimit.
+ if xul_tester.test("xulRuntime.OS == 'Darwin'", testcase.options):
+ testcase.expect = testcase.enable = False
+ pos += 1
+ elif parts[pos].startswith("error:"):
+ # This directive allows to specify an error type.
+ (_, _, errortype) = parts[pos].partition(":")
+ testcase.error = errortype
+ pos += 1
+ elif parts[pos] == "module":
+ # This directive marks the test as module code.
+ testcase.is_module = True
+ pos += 1
+ elif parts[pos] == "test262-raw":
+ testcase.is_test262_raw = True
+ pos += 1
+ elif parts[pos] == "async":
+ # This directive marks the test as async.
+ testcase.is_async = True
+ pos += 1
+ else:
+ print('warning: invalid manifest line element "{}"'.format(parts[pos]))
+ pos += 1
+
+
+def _build_manifest_script_entry(script_name, test):
+ line = []
+ properties = []
+ if test.terms:
+ # Remove jsreftest internal terms.
+ terms = " ".join(
+ [
+ term
+ for term in test.terms.split()
+ if not (
+ term == "module"
+ or term == "async"
+ or term.startswith("error:")
+ or term.startswith("ignore-flag(")
+ or term.startswith("shell-option(")
+ or term == "test262-raw"
+ )
+ ]
+ )
+ if terms:
+ line.append(terms)
+ if test.error:
+ properties.append("error=" + test.error)
+ if test.is_module:
+ properties.append("module")
+ if test.is_async:
+ properties.append("async")
+ if test.is_test262_raw:
+ properties.append("test262_raw")
+ line.append("script")
+ script = script_name
+ if properties:
+ script = ";".join([script] + properties)
+ line.append(script)
+ if test.comment:
+ line.append("#")
+ line.append(test.comment)
+ return " ".join(line)
+
+
+def _map_prefixes_left(test_gen):
+ """
+ Splits tests into a dictionary keyed on the first component of the test
+ path, aggregating tests with a common base path into a list.
+ """
+ byprefix = {}
+ for t in test_gen:
+ left, sep, remainder = t.path.partition(os.sep)
+ if left not in byprefix:
+ byprefix[left] = []
+ if remainder:
+ t.path = remainder
+ byprefix[left].append(t)
+ return byprefix
+
+
+def _emit_manifest_at(location, relative, test_gen, depth):
+ """
+ location - str: absolute path where we want to write the manifest
+ relative - str: relative path from topmost manifest directory to current
+ test_gen - (str): generator of all test paths and directorys
+ depth - int: number of dirs we are below the topmost manifest dir
+ """
+ manifests = _map_prefixes_left(test_gen)
+
+ filename = os.path.join(location, "jstests.list")
+ manifest = []
+ numTestFiles = 0
+ for k, test_list in manifests.items():
+ fullpath = os.path.join(location, k)
+ if os.path.isdir(fullpath):
+ manifest.append("include " + k + "/jstests.list")
+ relpath = os.path.join(relative, k)
+ _emit_manifest_at(fullpath, relpath, test_list, depth + 1)
+ else:
+ numTestFiles += 1
+ assert len(test_list) == 1, test_list
+ line = _build_manifest_script_entry(k, test_list[0])
+ manifest.append(line)
+
+ # Always present our manifest in sorted order.
+ manifest.sort()
+
+ # If we have tests, we have to set the url-prefix so reftest can find them.
+ if numTestFiles > 0:
+ manifest = [
+ "url-prefix {}jsreftest.html?test={}/".format("../" * depth, relative)
+ ] + manifest
+
+ fp = io.open(filename, "w", encoding="utf-8", newline="\n")
+ try:
+ fp.write("\n".join(manifest) + "\n")
+ finally:
+ fp.close()
+
+
+def make_manifests(location, test_gen):
+ _emit_manifest_at(location, "", test_gen, 0)
+
+
+def _find_all_js_files(location):
+ for root, dirs, files in os.walk(location):
+ root = root[len(location) + 1 :]
+ for fn in files:
+ if fn.endswith(".js"):
+ yield root, fn
+
+
+# The pattern for test header lines.
+TEST_HEADER_PATTERN = r"""
+# Ignore any space before the tag.
+\s*
+
+# The reftest tag is enclosed in pipes.
+\|(?P<tag>.*?)\|
+
+# Ignore any space before the options.
+\s*
+
+# Accept some options.
+(?P<options>.*?)
+
+# Ignore space before the comments.
+\s*
+
+# Accept an optional comment starting with "--".
+(?:
+ # Unless "--" is directly preceded by "(".
+ (?<!\()
+ --
+
+ # Ignore more space.
+ \s*
+
+ # The actual comment.
+ (?P<comment>.*)
+)?
+"""
+
+
+TEST_HEADER_PATTERN_INLINE = re.compile(
+ r"""
+# Start a single line comment
+//
+"""
+ + TEST_HEADER_PATTERN
+ + r"""
+# Match the end of line.
+$
+""",
+ re.VERBOSE,
+)
+TEST_HEADER_PATTERN_MULTI = re.compile(
+ r"""
+# Start a multi line comment
+/\*
+"""
+ + TEST_HEADER_PATTERN
+ + r"""
+# Match the end of comment.
+\*/
+""",
+ re.VERBOSE,
+)
+
+
+def _append_terms_and_comment(testcase, terms, comment):
+ if testcase.terms is None:
+ testcase.terms = terms
+ else:
+ testcase.terms += " " + terms
+
+ if testcase.comment is None:
+ testcase.comment = comment
+ elif comment:
+ testcase.comment += "; " + comment
+
+
+def _parse_test_header(fullpath, testcase, xul_tester):
+ """
+ This looks a bit weird. The reason is that it needs to be efficient, since
+ it has to be done on every test
+ """
+ if six.PY3:
+ fp = open(fullpath, encoding="utf-8")
+ else:
+ fp = open(fullpath)
+ try:
+ buf = fp.read(512)
+ finally:
+ fp.close()
+
+ # Bail early if we do not start with a single comment.
+ if not buf.startswith("//"):
+ return
+
+ # Extract the token.
+ buf, _, _ = buf.partition("\n")
+ matches = TEST_HEADER_PATTERN_INLINE.match(buf)
+
+ if not matches:
+ matches = TEST_HEADER_PATTERN_MULTI.match(buf)
+ if not matches:
+ return
+
+ testcase.tag = matches.group("tag")
+ _append_terms_and_comment(
+ testcase, matches.group("options"), matches.group("comment")
+ )
+ _parse_one(testcase, matches.group("options"), xul_tester)
+
+
+def _parse_external_manifest(filename, relpath):
+ """
+ Reads an external manifest file for test suites whose individual test cases
+ can't be decorated with reftest comments.
+ filename - str: name of the manifest file
+ relpath - str: relative path of the directory containing the manifest
+ within the test suite
+ """
+ if not os.path.exists(filename):
+ return []
+
+ entries = []
+
+ with io.open(filename, "r", encoding="utf-8") as fp:
+ manifest_re = re.compile(
+ r"^\s*(?P<terms>.*)\s+(?P<type>include|script)\s+(?P<path>\S+)$"
+ )
+ include_re = re.compile(r"^\s*include\s+(?P<path>\S+)$")
+ for line in fp:
+ line, _, comment = line.partition("#")
+ line = line.strip()
+ if not line:
+ continue
+ matches = manifest_re.match(line)
+ if not matches:
+ matches = include_re.match(line)
+ if not matches:
+ print(
+ "warning: unrecognized line in jstests.list:"
+ " {0}".format(line)
+ )
+ continue
+
+ include_file = matches.group("path")
+ include_filename = os.path.join(os.path.dirname(filename), include_file)
+ include_relpath = os.path.join(relpath, os.path.dirname(include_file))
+ include_entries = _parse_external_manifest(
+ include_filename, include_relpath
+ )
+ entries.extend(include_entries)
+ continue
+
+ path = os.path.normpath(os.path.join(relpath, matches.group("path")))
+ if matches.group("type") == "include":
+ # The manifest spec wants a reference to another manifest here,
+ # but we need just the directory. We do need the trailing
+ # separator so we don't accidentally match other paths of which
+ # this one is a prefix.
+ assert path.endswith("jstests.list")
+ path = path[: -len("jstests.list")]
+
+ entries.append(
+ {
+ "path": path,
+ "terms": matches.group("terms"),
+ "comment": comment.strip(),
+ }
+ )
+
+ # if one directory name is a prefix of another, we want the shorter one
+ # first
+ entries.sort(key=lambda x: x["path"])
+ return entries
+
+
+def _apply_external_manifests(filename, testcase, entries, xul_tester):
+ for entry in entries:
+ if filename.startswith(entry["path"]):
+ # The reftest spec would require combining the terms (failure types)
+ # that may already be defined in the test case with the terms
+ # specified in entry; for example, a skip overrides a random, which
+ # overrides a fails. Since we don't necessarily know yet in which
+ # environment the test cases will be run, we'd also have to
+ # consider skip-if, random-if, and fails-if with as-yet unresolved
+ # conditions.
+ # At this point, we use external manifests only for test cases
+ # that can't have their own failure type comments, so we simply
+ # use the terms for the most specific path.
+ _append_terms_and_comment(testcase, entry["terms"], entry["comment"])
+ _parse_one(testcase, entry["terms"], xul_tester)
+
+
+def _is_test_file(path_from_root, basename, filename, path_options):
+ # Any file whose basename matches something in this set is ignored.
+ EXCLUDED = set(
+ (
+ "browser.js",
+ "shell.js",
+ "template.js",
+ "user.js",
+ "js-test-driver-begin.js",
+ "js-test-driver-end.js",
+ )
+ )
+
+ # Skip js files in the root test directory.
+ if not path_from_root:
+ return False
+
+ # Skip files that we know are not tests.
+ if basename in EXCLUDED:
+ return False
+
+ if not path_options.should_run(filename):
+ return False
+
+ return True
+
+
+def count_tests(location, path_options):
+ count = 0
+ for root, basename in _find_all_js_files(location):
+ filename = os.path.join(root, basename)
+ if _is_test_file(root, basename, filename, path_options):
+ count += 1
+ return count
+
+
+def load_reftests(location, path_options, xul_tester):
+ """
+ Locates all tests by walking the filesystem starting at |location|.
+ Uses xul_tester to evaluate any test conditions in the test header.
+ Failure type and comment for a test case can come from
+ - an external manifest entry for the test case,
+ - an external manifest entry for a containing directory,
+ - most commonly: the header of the test case itself.
+ """
+ manifestFile = os.path.join(location, "jstests.list")
+ externalManifestEntries = _parse_external_manifest(manifestFile, "")
+
+ for root, basename in _find_all_js_files(location):
+ # Get the full path and relative location of the file.
+ filename = os.path.join(root, basename)
+ if not _is_test_file(root, basename, filename, path_options):
+ continue
+
+ # Skip empty files.
+ fullpath = os.path.join(location, filename)
+
+ testcase = RefTestCase(location, filename)
+ _apply_external_manifests(
+ filename, testcase, externalManifestEntries, xul_tester
+ )
+ _parse_test_header(fullpath, testcase, xul_tester)
+ yield testcase
diff --git a/js/src/tests/lib/progressbar.py b/js/src/tests/lib/progressbar.py
new file mode 100644
index 0000000000..a37f74af19
--- /dev/null
+++ b/js/src/tests/lib/progressbar.py
@@ -0,0 +1,136 @@
+# Text progress bar library, like curl or scp.
+
+import math
+import sys
+from datetime import datetime, timedelta
+
+if sys.platform.startswith("win"):
+ from .terminal_win import Terminal
+else:
+ from .terminal_unix import Terminal
+
+
+class NullProgressBar(object):
+ def update(self, current, data):
+ pass
+
+ def poke(self):
+ pass
+
+ def finish(self, complete=True):
+ pass
+
+ def beginline(self):
+ pass
+
+ def message(self, msg):
+ sys.stdout.write(msg + "\n")
+
+ @staticmethod
+ def update_granularity():
+ return timedelta.max
+
+
+class ProgressBar(object):
+ def __init__(self, limit, fmt):
+ assert self.conservative_isatty()
+
+ self.prior = None
+ self.atLineStart = True
+ # [{str:str}] Describtion of how to lay out each field in the counters map.
+ self.counters_fmt = fmt
+ # int: The value of 'current' equal to 100%.
+ self.limit = limit
+ # int: max digits in limit
+ self.limit_digits = int(math.ceil(math.log10(self.limit)))
+ # datetime: The start time.
+ self.t0 = datetime.now()
+ # datetime: Optional, the last time update() ran.
+ self.last_update_time = None
+
+ # Compute the width of the counters and build the format string.
+ self.counters_width = 1 # [
+ for layout in self.counters_fmt:
+ self.counters_width += self.limit_digits
+ # | (or ']' for the last one)
+ self.counters_width += 1
+
+ self.barlen = 64 - self.counters_width
+
+ @staticmethod
+ def update_granularity():
+ return timedelta(seconds=0.1)
+
+ def update(self, current, data):
+ # Record prior for poke.
+ self.prior = (current, data)
+ self.atLineStart = False
+
+ # Build counters string.
+ sys.stdout.write("\r[")
+ for layout in self.counters_fmt:
+ Terminal.set_color(layout["color"])
+ sys.stdout.write(
+ ("{:" + str(self.limit_digits) + "d}").format(data[layout["value"]])
+ )
+ Terminal.reset_color()
+ if layout != self.counters_fmt[-1]:
+ sys.stdout.write("|")
+ else:
+ sys.stdout.write("] ")
+
+ # Build the bar.
+ pct = int(100.0 * current / self.limit)
+ sys.stdout.write("{:3d}% ".format(pct))
+
+ barlen = int(1.0 * self.barlen * current / self.limit) - 1
+ bar = "=" * barlen + ">" + " " * (self.barlen - barlen - 1)
+ sys.stdout.write(bar + "|")
+
+ # Update the bar.
+ now = datetime.now()
+ dt = now - self.t0
+ dt = dt.seconds + dt.microseconds * 1e-6
+ sys.stdout.write("{:6.1f}s".format(dt))
+ Terminal.clear_right()
+
+ # Force redisplay, since we didn't write a \n.
+ sys.stdout.flush()
+
+ self.last_update_time = now
+
+ def poke(self):
+ if not self.prior:
+ return
+ if datetime.now() - self.last_update_time < self.update_granularity():
+ return
+ self.update(*self.prior)
+
+ def finish(self, complete=True):
+ if not self.prior:
+ sys.stdout.write(
+ "No test run... You can try adding"
+ " --run-slow-tests or --run-skipped to run more tests\n"
+ )
+ return
+ final_count = self.limit if complete else self.prior[0]
+ self.update(final_count, self.prior[1])
+ sys.stdout.write("\n")
+
+ def beginline(self):
+ if not self.atLineStart:
+ sys.stdout.write("\n")
+ self.atLineStart = True
+
+ def message(self, msg):
+ self.beginline()
+ sys.stdout.write(msg)
+ sys.stdout.write("\n")
+
+ @staticmethod
+ def conservative_isatty():
+ """
+ Prefer erring on the side of caution and not using terminal commands if
+ the current output stream may be a file.
+ """
+ return sys.stdout.isatty()
diff --git a/js/src/tests/lib/remote.py b/js/src/tests/lib/remote.py
new file mode 100644
index 0000000000..71cedf42ec
--- /dev/null
+++ b/js/src/tests/lib/remote.py
@@ -0,0 +1,106 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+# remote.py -- Python library for Android devices.
+
+import os
+import posixpath
+
+
+def push_libs(options, device, dest_dir):
+ # This saves considerable time in pushing unnecessary libraries
+ # to the device but needs to be updated if the dependencies change.
+ required_libs = [
+ "libnss3.so",
+ "libmozglue.so",
+ "libnspr4.so",
+ "libplc4.so",
+ "libplds4.so",
+ ]
+
+ for file in os.listdir(options.local_lib):
+ if file in required_libs:
+ local_file = os.path.join(options.local_lib, file)
+ remote_file = posixpath.join(dest_dir, file)
+ assert os.path.isfile(local_file)
+ device.push(local_file, remote_file)
+ device.chmod(remote_file)
+
+
+def push_progs(options, device, progs, dest_dir):
+ assert isinstance(progs, list)
+ for local_file in progs:
+ remote_file = posixpath.join(dest_dir, os.path.basename(local_file))
+ assert os.path.isfile(local_file)
+ device.push(local_file, remote_file)
+ device.chmod(remote_file)
+
+
+def init_remote_dir(device, path):
+ device.rm(path, recursive=True, force=True)
+ device.mkdir(path, parents=True)
+
+
+# We only have one device per test run.
+DEVICE = None
+
+
+def init_device(options):
+ # Initialize the device
+ global DEVICE
+
+ assert options.remote and options.js_shell
+
+ if DEVICE is not None:
+ return DEVICE
+
+ from mozdevice import ADBDeviceFactory, ADBError, ADBTimeoutError
+
+ try:
+ if not options.local_lib:
+ # if not specified, use the local directory containing
+ # the js binary to find the necessary libraries.
+ options.local_lib = posixpath.dirname(options.js_shell)
+
+ # Try to find 'adb' off the build environment to automatically use the
+ # .mozbuild version if possible. In test automation, we don't have
+ # mozbuild available so use the default 'adb' that automation provides.
+ try:
+ from mozbuild.base import MozbuildObject
+ from mozrunner.devices.android_device import get_adb_path
+
+ context = MozbuildObject.from_environment()
+ adb_path = get_adb_path(context)
+ except (ImportError):
+ adb_path = "adb"
+
+ DEVICE = ADBDeviceFactory(
+ adb=adb_path,
+ device=options.device_serial,
+ test_root=options.remote_test_root,
+ )
+
+ bin_dir = posixpath.join(options.remote_test_root, "bin")
+ tests_dir = posixpath.join(options.remote_test_root, "tests")
+ temp_dir = posixpath.join(options.remote_test_root, "tmp")
+
+ # Create directory structure on device
+ init_remote_dir(DEVICE, options.remote_test_root)
+ init_remote_dir(DEVICE, tests_dir)
+ init_remote_dir(DEVICE, bin_dir)
+ init_remote_dir(DEVICE, temp_dir)
+
+ # Push js shell and libraries.
+ push_libs(options, DEVICE, bin_dir)
+ push_progs(options, DEVICE, [options.js_shell], bin_dir)
+
+ # update options.js_shell to point to the js binary on the device
+ options.js_shell = os.path.join(bin_dir, "js")
+
+ return DEVICE
+
+ except (ADBError, ADBTimeoutError):
+ print("TEST-UNEXPECTED-FAIL | remote.py : Device initialization failed")
+ raise
diff --git a/js/src/tests/lib/results.py b/js/src/tests/lib/results.py
new file mode 100644
index 0000000000..f902060354
--- /dev/null
+++ b/js/src/tests/lib/results.py
@@ -0,0 +1,471 @@
+import json
+import pipes
+import re
+
+from .progressbar import NullProgressBar, ProgressBar
+from .structuredlog import TestLogger
+
+# subprocess.list2cmdline does not properly escape for sh-like shells
+
+
+def escape_cmdline(args):
+ return " ".join([pipes.quote(a) for a in args])
+
+
+class TestOutput:
+ """Output from a test run."""
+
+ def __init__(self, test, cmd, out, err, rc, dt, timed_out, extra=None):
+ self.test = test # Test
+ self.cmd = cmd # str: command line of test
+ self.out = out # str: stdout
+ self.err = err # str: stderr
+ self.rc = rc # int: return code
+ self.dt = dt # float: run time
+ self.timed_out = timed_out # bool: did the test time out
+ self.extra = extra # includes the pid on some platforms
+
+ def describe_failure(self):
+ if self.timed_out:
+ return "Timeout"
+ lines = self.err.splitlines()
+ for line in lines:
+ # Skip the asm.js compilation success message.
+ if "Successfully compiled asm.js code" not in line:
+ return line
+ return "Unknown"
+
+
+class NullTestOutput:
+ """Variant of TestOutput that indicates a test was not run."""
+
+ def __init__(self, test):
+ self.test = test
+ self.cmd = ""
+ self.out = ""
+ self.err = ""
+ self.rc = 0
+ self.dt = 0.0
+ self.timed_out = False
+
+
+class TestResult:
+ PASS = "PASS"
+ FAIL = "FAIL"
+ CRASH = "CRASH"
+
+ """Classified result from a test run."""
+
+ def __init__(self, test, result, results, wpt_results=None):
+ self.test = test
+ self.result = result
+ self.results = results
+ self.wpt_results = wpt_results # Only used for wpt tests.
+
+ @classmethod
+ def from_wpt_output(cls, output):
+ """Parse the output from a web-platform test that uses testharness.js.
+ (The output is written to stdout in js/src/tests/testharnessreport.js.)
+ """
+ from wptrunner.executors.base import testharness_result_converter
+
+ rc = output.rc
+ stdout = output.out.split("\n")
+ if rc != 0:
+ if rc == 3:
+ harness_status = "ERROR"
+ harness_message = "Exit code reported exception"
+ else:
+ harness_status = "CRASH"
+ harness_message = "Exit code reported crash"
+ tests = []
+ else:
+ for (idx, line) in enumerate(stdout):
+ if line.startswith("WPT OUTPUT: "):
+ msg = line[len("WPT OUTPUT: ") :]
+ data = [output.test.wpt.url] + json.loads(msg)
+ harness_status_obj, tests = testharness_result_converter(
+ output.test.wpt, data
+ )
+ harness_status = harness_status_obj.status
+ harness_message = "Reported by harness: %s" % (
+ harness_status_obj.message,
+ )
+ del stdout[idx]
+ break
+ else:
+ harness_status = "ERROR"
+ harness_message = "No harness output found"
+ tests = []
+ stdout.append("Harness status: %s (%s)" % (harness_status, harness_message))
+
+ result = cls.PASS
+ results = []
+ subtests = []
+ expected_harness_status = output.test.wpt.expected()
+ if harness_status != expected_harness_status:
+ if harness_status == "CRASH":
+ result = cls.CRASH
+ else:
+ result = cls.FAIL
+ else:
+ for test in tests:
+ test_output = 'Subtest "%s": ' % (test.name,)
+ expected = output.test.wpt.expected(test.name)
+ if test.status == expected:
+ test_result = (cls.PASS, "")
+ test_output += "as expected: %s" % (test.status,)
+ else:
+ test_result = (cls.FAIL, test.message)
+ result = cls.FAIL
+ test_output += "expected %s, found %s" % (expected, test.status)
+ if test.message:
+ test_output += ' (with message: "%s")' % (test.message,)
+ subtests.append(
+ {
+ "test": output.test.wpt.id,
+ "subtest": test.name,
+ "status": test.status,
+ "expected": expected,
+ }
+ )
+ results.append(test_result)
+ stdout.append(test_output)
+
+ output.out = "\n".join(stdout) + "\n"
+
+ wpt_results = {
+ "name": output.test.wpt.id,
+ "status": harness_status,
+ "expected": expected_harness_status,
+ "subtests": subtests,
+ }
+
+ return cls(output.test, result, results, wpt_results)
+
+ @classmethod
+ def from_output(cls, output):
+ test = output.test
+ result = None # str: overall result, see class-level variables
+ results = [] # (str,str) list: subtest results (pass/fail, message)
+
+ if test.wpt:
+ return cls.from_wpt_output(output)
+
+ out, err, rc = output.out, output.err, output.rc
+
+ failures = 0
+ passes = 0
+
+ expected_rcs = []
+ if test.path.endswith("-n.js"):
+ expected_rcs.append(3)
+
+ for line in out.split("\n"):
+ if line.startswith(" FAILED!"):
+ failures += 1
+ msg = line[len(" FAILED! ") :]
+ results.append((cls.FAIL, msg))
+ elif line.startswith(" PASSED!"):
+ passes += 1
+ msg = line[len(" PASSED! ") :]
+ results.append((cls.PASS, msg))
+ else:
+ m = re.match(
+ "--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE"
+ " ((?:-|\\d)+) ---",
+ line,
+ )
+ if m:
+ expected_rcs.append(int(m.group(1)))
+
+ if test.error is not None:
+ expected_rcs.append(3)
+ if test.error not in err:
+ failures += 1
+ results.append(
+ (cls.FAIL, "Expected uncaught error: {}".format(test.error))
+ )
+
+ if rc and rc not in expected_rcs:
+ if rc == 3:
+ result = cls.FAIL
+ else:
+ result = cls.CRASH
+ else:
+ if (rc or passes > 0) and failures == 0:
+ result = cls.PASS
+ else:
+ result = cls.FAIL
+
+ return cls(test, result, results)
+
+
+class TestDuration:
+ def __init__(self, test, duration):
+ self.test = test
+ self.duration = duration
+
+
+class ResultsSink:
+ def __init__(self, testsuite, options, testcount):
+ self.options = options
+ self.fp = options.output_fp
+ if self.options.format == "automation":
+ self.slog = TestLogger(testsuite)
+ self.slog.suite_start()
+
+ self.wptreport = None
+ if self.options.wptreport:
+ try:
+ from .wptreport import WptreportHandler
+
+ self.wptreport = WptreportHandler(self.options.wptreport)
+ self.wptreport.suite_start()
+ except ImportError:
+ pass
+
+ self.groups = {}
+ self.output_dict = {}
+ self.counts = {"PASS": 0, "FAIL": 0, "TIMEOUT": 0, "SKIP": 0}
+ self.slow_tests = []
+ self.n = 0
+
+ if options.hide_progress:
+ self.pb = NullProgressBar()
+ else:
+ fmt = [
+ {"value": "PASS", "color": "green"},
+ {"value": "FAIL", "color": "red"},
+ {"value": "TIMEOUT", "color": "blue"},
+ {"value": "SKIP", "color": "brightgray"},
+ ]
+ self.pb = ProgressBar(testcount, fmt)
+
+ def push(self, output):
+ if self.options.show_slow and output.dt >= self.options.slow_test_threshold:
+ self.slow_tests.append(TestDuration(output.test, output.dt))
+ if output.timed_out:
+ self.counts["TIMEOUT"] += 1
+ if isinstance(output, NullTestOutput):
+ if self.options.format == "automation":
+ self.print_automation_result(
+ "TEST-KNOWN-FAIL", output.test, time=output.dt, skip=True
+ )
+ self.counts["SKIP"] += 1
+ self.n += 1
+ else:
+ result = TestResult.from_output(output)
+
+ if self.wptreport is not None and result.wpt_results:
+ self.wptreport.test(result.wpt_results, output.dt)
+
+ tup = (result.result, result.test.expect, result.test.random)
+ dev_label = self.LABELS[tup][1]
+
+ if self.options.check_output:
+ if output.test.path in self.output_dict.keys():
+ if self.output_dict[output.test.path] != output:
+ self.counts["FAIL"] += 1
+ self.print_automation_result(
+ "TEST-UNEXPECTED-FAIL",
+ result.test,
+ time=output.dt,
+ message="Same test with different flag producing different output",
+ )
+ else:
+ self.output_dict[output.test.path] = output
+
+ if output.timed_out:
+ dev_label = "TIMEOUTS"
+ self.groups.setdefault(dev_label, []).append(result)
+
+ if dev_label == "REGRESSIONS":
+ show_output = (
+ self.options.show_output or not self.options.no_show_failed
+ )
+ elif dev_label == "TIMEOUTS":
+ show_output = self.options.show_output
+ else:
+ show_output = self.options.show_output and not self.options.failed_only
+
+ if dev_label in ("REGRESSIONS", "TIMEOUTS"):
+ show_cmd = self.options.show_cmd
+ else:
+ show_cmd = self.options.show_cmd and not self.options.failed_only
+
+ if show_output or show_cmd:
+ self.pb.beginline()
+
+ if show_output:
+ print(
+ "## {}: rc = {:d}, run time = {}".format(
+ output.test.path, output.rc, output.dt
+ ),
+ file=self.fp,
+ )
+
+ if show_cmd:
+ print(escape_cmdline(output.cmd), file=self.fp)
+
+ if show_output:
+
+ def write_with_fallback(fp, data):
+ try:
+ fp.write(data)
+ except UnicodeEncodeError as e:
+ # In case the data contains something not directly
+ # encodable, use \uXXXX.
+ fp.write(
+ "WARNING: Falling back from exception: {}\n".format(e)
+ )
+ fp.write("WARNING: The following output is escaped, ")
+ fp.write("and may be different than original one.\n")
+ fp.write(
+ data.encode("ascii", "namereplace").decode("ascii")
+ )
+
+ write_with_fallback(self.fp, output.out)
+ write_with_fallback(self.fp, output.err)
+
+ self.n += 1
+
+ if result.result == TestResult.PASS and not result.test.random:
+ self.counts["PASS"] += 1
+ elif result.test.expect and not result.test.random:
+ self.counts["FAIL"] += 1
+ else:
+ self.counts["SKIP"] += 1
+
+ if self.options.format == "automation":
+ if result.result != TestResult.PASS and len(result.results) > 1:
+ for sub_ok, msg in result.results:
+ tup = (sub_ok, result.test.expect, result.test.random)
+ label = self.LABELS[tup][0]
+ if label == "TEST-UNEXPECTED-PASS":
+ label = "TEST-PASS (EXPECTED RANDOM)"
+ self.print_automation_result(
+ label, result.test, time=output.dt, message=msg
+ )
+ tup = (result.result, result.test.expect, result.test.random)
+ self.print_automation_result(
+ self.LABELS[tup][0],
+ result.test,
+ time=output.dt,
+ extra=getattr(output, "extra", None),
+ )
+ return
+
+ if dev_label:
+
+ def singular(label):
+ return "FIXED" if label == "FIXES" else label[:-1]
+
+ self.pb.message("{} - {}".format(singular(dev_label), output.test.path))
+
+ self.pb.update(self.n, self.counts)
+
+ def finish(self, completed):
+ self.pb.finish(completed)
+ if self.options.format == "automation":
+ self.slog.suite_end()
+ else:
+ self.list(completed)
+
+ if self.wptreport is not None:
+ self.wptreport.suite_end()
+
+ # Conceptually, this maps (test result x test expectation) to text labels.
+ # key is (result, expect, random)
+ # value is (automation label, dev test category)
+ LABELS = {
+ (TestResult.CRASH, False, False): ("TEST-UNEXPECTED-FAIL", "REGRESSIONS"),
+ (TestResult.CRASH, False, True): ("TEST-UNEXPECTED-FAIL", "REGRESSIONS"),
+ (TestResult.CRASH, True, False): ("TEST-UNEXPECTED-FAIL", "REGRESSIONS"),
+ (TestResult.CRASH, True, True): ("TEST-UNEXPECTED-FAIL", "REGRESSIONS"),
+ (TestResult.FAIL, False, False): ("TEST-KNOWN-FAIL", ""),
+ (TestResult.FAIL, False, True): ("TEST-KNOWN-FAIL (EXPECTED RANDOM)", ""),
+ (TestResult.FAIL, True, False): ("TEST-UNEXPECTED-FAIL", "REGRESSIONS"),
+ (TestResult.FAIL, True, True): ("TEST-KNOWN-FAIL (EXPECTED RANDOM)", ""),
+ (TestResult.PASS, False, False): ("TEST-UNEXPECTED-PASS", "FIXES"),
+ (TestResult.PASS, False, True): ("TEST-PASS (EXPECTED RANDOM)", ""),
+ (TestResult.PASS, True, False): ("TEST-PASS", ""),
+ (TestResult.PASS, True, True): ("TEST-PASS (EXPECTED RANDOM)", ""),
+ }
+
+ def list(self, completed):
+ for label, results in sorted(self.groups.items()):
+ if label == "":
+ continue
+
+ print(label)
+ for result in results:
+ print(
+ " {}".format(
+ " ".join(
+ result.test.jitflags
+ + result.test.options
+ + [result.test.path]
+ )
+ )
+ )
+
+ if self.options.failure_file:
+ failure_file = open(self.options.failure_file, "w")
+ if not self.all_passed():
+ if "REGRESSIONS" in self.groups:
+ for result in self.groups["REGRESSIONS"]:
+ print(result.test.path, file=failure_file)
+ if "TIMEOUTS" in self.groups:
+ for result in self.groups["TIMEOUTS"]:
+ print(result.test.path, file=failure_file)
+ failure_file.close()
+
+ suffix = "" if completed else " (partial run -- interrupted by user)"
+ if self.all_passed():
+ print("PASS" + suffix)
+ else:
+ print("FAIL" + suffix)
+
+ if self.options.show_slow:
+ min_duration = self.options.slow_test_threshold
+ print("Slow tests (duration > {}s)".format(min_duration))
+ slow_tests = sorted(self.slow_tests, key=lambda x: x.duration, reverse=True)
+ any = False
+ for test in slow_tests:
+ print("{:>5} {}".format(round(test.duration, 2), test.test))
+ any = True
+ if not any:
+ print("None")
+
+ def all_passed(self):
+ return "REGRESSIONS" not in self.groups and "TIMEOUTS" not in self.groups
+
+ def print_automation_result(
+ self, label, test, message=None, skip=False, time=None, extra=None
+ ):
+ result = label
+ result += " | " + test.path
+ args = []
+ if self.options.shell_args:
+ args.append(self.options.shell_args)
+ args += test.jitflags
+ result += ' | (args: "{}")'.format(" ".join(args))
+ if message:
+ result += " | " + message
+ if skip:
+ result += " | (SKIP)"
+ if time > self.options.timeout:
+ result += " | (TIMEOUT)"
+ result += " [{:.1f} s]".format(time)
+ print(result)
+
+ details = {"extra": extra.copy() if extra else {}}
+ if self.options.shell_args:
+ details["extra"]["shell_args"] = self.options.shell_args
+ details["extra"]["jitflags"] = test.jitflags
+ if message:
+ details["message"] = message
+ status = "FAIL" if "TEST-UNEXPECTED" in label else "PASS"
+
+ self.slog.test(test.path, status, time or 0, **details)
diff --git a/js/src/tests/lib/structuredlog.py b/js/src/tests/lib/structuredlog.py
new file mode 100644
index 0000000000..2f2d317d02
--- /dev/null
+++ b/js/src/tests/lib/structuredlog.py
@@ -0,0 +1,56 @@
+# produce mozlog-compatible log messages, following the spec at
+# https://mozbase.readthedocs.io/en/latest/mozlog.html
+
+import json
+import os
+from time import time
+
+
+class TestLogger(object):
+ def __init__(self, source, threadname="main"):
+ self.template = {
+ "source": source,
+ "thread": threadname,
+ "pid": os.getpid(),
+ }
+ directory = os.environ.get("MOZ_UPLOAD_DIR", ".")
+ self.fh = open(os.path.join(directory, threadname + "_raw.log"), "a")
+
+ def _record(self, **kwargs):
+ record = self.template.copy()
+ record.update(**kwargs)
+ if "time" not in record:
+ record["time"] = time()
+ return record
+
+ def _log_obj(self, obj):
+ print(json.dumps(obj, sort_keys=True), file=self.fh)
+
+ def _log(self, **kwargs):
+ self._log_obj(self._record(**kwargs))
+
+ def suite_start(self):
+ self._log(action="suite_start", tests=[])
+
+ def suite_end(self):
+ self._log(action="suite_end")
+
+ def test_start(self, testname):
+ self._log(action="test_start", test=testname)
+
+ def test_end(self, testname, status):
+ self._log(action="test_end", test=testname, status=status)
+
+ def test(self, testname, status, duration, **details):
+ record = self._record(
+ action="test_start", test=testname, **details.get("extra", {})
+ )
+ end_time = record["time"]
+ record["time"] -= duration
+ self._log_obj(record)
+
+ record["action"] = "test_end"
+ record["time"] = end_time
+ record["status"] = status
+ record.update(**details)
+ self._log_obj(record)
diff --git a/js/src/tests/lib/tasks_adb_remote.py b/js/src/tests/lib/tasks_adb_remote.py
new file mode 100644
index 0000000000..2d2739a281
--- /dev/null
+++ b/js/src/tests/lib/tasks_adb_remote.py
@@ -0,0 +1,284 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+import os
+import posixpath
+import sys
+import tempfile
+from datetime import timedelta
+
+from mozdevice import ADBDevice, ADBError, ADBProcessError, ADBTimeoutError
+
+from .adaptor import xdr_annotate
+from .remote import init_device
+from .results import TestOutput, escape_cmdline
+
+TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
+JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
+JS_TESTS_DIR = posixpath.join(JS_DIR, "tests")
+TEST_DIR = os.path.join(JS_DIR, "jit-test", "tests")
+
+
+def aggregate_script_stdout(stdout_lines, prefix, tempdir, uniq_tag, tests, options):
+ test = None
+ tStart = None
+ cmd = ""
+ stdout = ""
+
+ # Use to debug this script in case of assertion failure.
+ meta_history = []
+ last_line = ""
+
+ # Assert that the streamed content is not interrupted.
+ ended = False
+
+ # Check if the tag is present, if so, this is controlled output
+ # produced by the test runner, otherwise this is stdout content.
+ try:
+ for line in stdout_lines:
+ last_line = line
+ if line.startswith(uniq_tag):
+ meta = line[len(uniq_tag) :].strip()
+ meta_history.append(meta)
+ if meta.startswith("START="):
+ assert test is None
+ params = meta[len("START=") :].split(",")
+ test_idx = int(params[0])
+ test = tests[test_idx]
+ tStart = timedelta(seconds=float(params[1]))
+ cmd = test.command(
+ prefix,
+ posixpath.join(options.remote_test_root, "lib/"),
+ posixpath.join(options.remote_test_root, "modules/"),
+ tempdir,
+ posixpath.join(options.remote_test_root, "tests"),
+ )
+ stdout = ""
+ if options.show_cmd:
+ print(escape_cmdline(cmd))
+ elif meta.startswith("STOP="):
+ assert test is not None
+ params = meta[len("STOP=") :].split(",")
+ exitcode = int(params[0])
+ dt = timedelta(seconds=float(params[1])) - tStart
+ yield TestOutput(
+ test,
+ cmd,
+ stdout,
+ # NOTE: mozdevice fuse stdout and stderr. Thus, we are
+ # using stdout for both stdout and stderr. So far,
+ # doing so did not cause any issues.
+ stdout,
+ exitcode,
+ dt.total_seconds(),
+ dt > timedelta(seconds=int(options.timeout)),
+ )
+ stdout = ""
+ cmd = ""
+ test = None
+ elif meta.startswith("RETRY="):
+ # On timeout, we discard the first timeout to avoid a
+ # random hang on pthread_join.
+ assert test is not None
+ stdout = ""
+ cmd = ""
+ test = None
+ else:
+ assert meta.startswith("THE_END")
+ ended = True
+ else:
+ assert uniq_tag not in line
+ stdout += line
+
+ # This assertion fails if the streamed content is interrupted, either
+ # by unplugging the phone or some adb failures.
+ assert ended
+ except AssertionError as e:
+ sys.stderr.write("Metadata history:\n{}\n".format("\n".join(meta_history)))
+ sys.stderr.write("Last line: {}\n".format(last_line))
+ raise e
+
+
+def setup_device(prefix, options):
+ try:
+ device = init_device(options)
+
+ def replace_lib_file(path, name):
+ localfile = os.path.join(JS_TESTS_DIR, *path)
+ remotefile = posixpath.join(options.remote_test_root, "lib", name)
+ device.push(localfile, remotefile, timeout=10)
+
+ prefix[0] = posixpath.join(options.remote_test_root, "bin", "js")
+ tempdir = posixpath.join(options.remote_test_root, "tmp")
+
+ print("tasks_adb_remote.py : Transfering test files")
+
+ # Push tests & lib directories.
+ device.push(os.path.dirname(TEST_DIR), options.remote_test_root, timeout=600)
+
+ # Substitute lib files which are aliasing non262 files.
+ replace_lib_file(["non262", "shell.js"], "non262.js")
+ replace_lib_file(["non262", "reflect-parse", "Match.js"], "match.js")
+ replace_lib_file(["non262", "Math", "shell.js"], "math.js")
+ device.chmod(options.remote_test_root, recursive=True)
+
+ print("tasks_adb_remote.py : Device initialization completed")
+ return device, tempdir
+ except (ADBError, ADBTimeoutError):
+ print(
+ "TEST-UNEXPECTED-FAIL | tasks_adb_remote.py : "
+ + "Device initialization failed"
+ )
+ raise
+
+
+def script_preamble(tag, prefix, options):
+ timeout = int(options.timeout)
+ retry = int(options.timeout_retry)
+ lib_path = os.path.dirname(prefix[0])
+ return """
+export LD_LIBRARY_PATH={lib_path}
+
+do_test()
+{{
+ local idx=$1; shift;
+ local attempt=$1; shift;
+
+ # Read 10ms timestamp in seconds using shell builtins and /proc/uptime.
+ local time;
+ local unused;
+
+ # When printing the tag, we prefix by a new line, in case the
+ # previous command output did not contain any new line.
+ read time unused < /proc/uptime
+ echo '\\n{tag}START='$idx,$time
+ timeout {timeout}s "$@"
+ local rc=$?
+ read time unused < /proc/uptime
+
+ # Retry on timeout, to mute unlikely pthread_join hang issue.
+ #
+ # The timeout command send a SIGTERM signal, which should return 143
+ # (=128+15). However, due to a bug in tinybox, it returns 142.
+ if test \( $rc -eq 143 -o $rc -eq 142 \) -a $attempt -lt {retry}; then
+ echo '\\n{tag}RETRY='$rc,$time
+ attempt=$((attempt + 1))
+ do_test $idx $attempt "$@"
+ else
+ echo '\\n{tag}STOP='$rc,$time
+ fi
+}}
+
+do_end()
+{{
+ echo '\\n{tag}THE_END'
+}}
+""".format(
+ tag=tag, lib_path=lib_path, timeout=timeout, retry=retry
+ )
+
+
+def setup_script(device, prefix, tempdir, options, uniq_tag, tests):
+ timeout = int(options.timeout)
+ script_timeout = 0
+ try:
+ tmpf = tempfile.NamedTemporaryFile(mode="w", delete=False)
+ tmpf.write(script_preamble(uniq_tag, prefix, options))
+ for i, test in enumerate(tests):
+ # This test is common to all tasks_*.py files, however, jit-test do
+ # not provide the `run_skipped` option, and all tests are always
+ # enabled.
+ assert test.enable # and not options.run_skipped
+ if options.test_reflect_stringify:
+ raise ValueError("can't run Reflect.stringify tests remotely")
+
+ cmd = test.command(
+ prefix,
+ posixpath.join(options.remote_test_root, "lib/"),
+ posixpath.join(options.remote_test_root, "modules/"),
+ tempdir,
+ posixpath.join(options.remote_test_root, "tests"),
+ )
+
+ # replace with shlex.join when move to Python 3.8+
+ cmd = ADBDevice._escape_command_line(cmd)
+
+ env = {}
+ if test.tz_pacific:
+ env["TZ"] = "PST8PDT"
+ envStr = "".join(key + "='" + val + "' " for key, val in env.items())
+
+ tmpf.write("{}do_test {} 0 {};\n".format(envStr, i, cmd))
+ script_timeout += timeout
+ tmpf.write("do_end;\n")
+ tmpf.close()
+ script = posixpath.join(options.remote_test_root, "test_manifest.sh")
+ device.push(tmpf.name, script)
+ device.chmod(script)
+ print("tasks_adb_remote.py : Batch script created")
+ except Exception as e:
+ print("tasks_adb_remote.py : Batch script failed")
+ raise e
+ finally:
+ if tmpf:
+ os.unlink(tmpf.name)
+ return script, script_timeout
+
+
+def start_script(
+ device, prefix, tempdir, script, uniq_tag, script_timeout, tests, options
+):
+ env = {}
+
+ # Allow ADBError or ADBTimeoutError to terminate the test run, but handle
+ # ADBProcessError in order to support the use of non-zero exit codes in the
+ # JavaScript shell tests.
+ #
+ # The stdout_callback will aggregate each output line, and reconstruct the
+ # output produced by each test, and queue TestOutput in the qResult queue.
+ try:
+ adb_process = device.shell(
+ "sh {}".format(script),
+ env=env,
+ cwd=options.remote_test_root,
+ timeout=script_timeout,
+ yield_stdout=True,
+ )
+ for test_output in aggregate_script_stdout(
+ adb_process, prefix, tempdir, uniq_tag, tests, options
+ ):
+ yield test_output
+ except ADBProcessError as e:
+ # After a device error, the device is typically in a
+ # state where all further tests will fail so there is no point in
+ # continuing here.
+ sys.stderr.write("Error running remote tests: {}".format(repr(e)))
+
+
+def get_remote_results(tests, prefix, pb, options):
+ """Create a script which batches the run of all tests, and spawn a thread to
+ reconstruct the TestOutput for each test. This is made to avoid multiple
+ `adb.shell` commands which has a high latency.
+ """
+ device, tempdir = setup_device(prefix, options)
+
+ # Tests are sequentially executed in a batch. The first test executed is in
+ # charge of creating the xdr file for the self-hosted code.
+ if options.use_xdr:
+ tests = xdr_annotate(tests, options)
+
+ # We need tests to be subscriptable to find the test structure matching the
+ # index within the generated script.
+ tests = list(tests)
+
+ # Create a script which spawn each test one after the other, and upload the
+ # script
+ uniq_tag = "@@@TASKS_ADB_REMOTE@@@"
+ script, script_timeout = setup_script(
+ device, prefix, tempdir, options, uniq_tag, tests
+ )
+
+ for test_output in start_script(
+ device, prefix, tempdir, script, uniq_tag, script_timeout, tests, options
+ ):
+ yield test_output
diff --git a/js/src/tests/lib/tasks_unix.py b/js/src/tests/lib/tasks_unix.py
new file mode 100644
index 0000000000..bf47695a7f
--- /dev/null
+++ b/js/src/tests/lib/tasks_unix.py
@@ -0,0 +1,273 @@
+# A unix-oriented process dispatcher. Uses a single thread with select and
+# waitpid to dispatch tasks. This avoids several deadlocks that are possible
+# with fork/exec + threads + Python.
+
+import errno
+import os
+import select
+import signal
+import sys
+from datetime import datetime, timedelta
+
+from .adaptor import xdr_annotate
+from .progressbar import ProgressBar
+from .results import NullTestOutput, TestOutput, escape_cmdline
+
+
+class Task(object):
+ def __init__(self, test, prefix, tempdir, pid, stdout, stderr):
+ self.test = test
+ self.cmd = test.get_command(prefix, tempdir)
+ self.pid = pid
+ self.stdout = stdout
+ self.stderr = stderr
+ self.start = datetime.now()
+ self.out = []
+ self.err = []
+
+
+def spawn_test(test, prefix, tempdir, passthrough, run_skipped, show_cmd):
+ """Spawn one child, return a task struct."""
+ if not test.enable and not run_skipped:
+ return None
+
+ cmd = test.get_command(prefix, tempdir)
+ if show_cmd:
+ print(escape_cmdline(cmd))
+
+ if not passthrough:
+ (rout, wout) = os.pipe()
+ (rerr, werr) = os.pipe()
+
+ rv = os.fork()
+
+ # Parent.
+ if rv:
+ os.close(wout)
+ os.close(werr)
+ return Task(test, prefix, tempdir, rv, rout, rerr)
+
+ # Child.
+ os.close(rout)
+ os.close(rerr)
+
+ os.dup2(wout, 1)
+ os.dup2(werr, 2)
+
+ os.execvp(cmd[0], cmd)
+
+
+def get_max_wait(tasks, timeout):
+ """
+ Return the maximum time we can wait before any task should time out.
+ """
+
+ # If we have a progress-meter, we need to wake up to update it frequently.
+ wait = ProgressBar.update_granularity()
+
+ # If a timeout is supplied, we need to wake up for the first task to
+ # timeout if that is sooner.
+ if timeout:
+ now = datetime.now()
+ timeout_delta = timedelta(seconds=timeout)
+ for task in tasks:
+ remaining = task.start + timeout_delta - now
+ if remaining < wait:
+ wait = remaining
+
+ # Return the wait time in seconds, clamped between zero and max_wait.
+ return max(wait.total_seconds(), 0)
+
+
+def flush_input(fd, frags):
+ """
+ Read any pages sitting in the file descriptor 'fd' into the list 'frags'.
+ """
+ rv = os.read(fd, 4096)
+ frags.append(rv)
+ while len(rv) == 4096:
+ # If read() returns a full buffer, it may indicate there was 1 buffer
+ # worth of data, or that there is more data to read. Poll the socket
+ # before we read again to ensure that we will not block indefinitly.
+ readable, _, _ = select.select([fd], [], [], 0)
+ if not readable:
+ return
+
+ rv = os.read(fd, 4096)
+ frags.append(rv)
+
+
+def read_input(tasks, timeout):
+ """
+ Select on input or errors from the given task list for a max of timeout
+ seconds.
+ """
+ rlist = []
+ exlist = []
+ outmap = {} # Fast access to fragment list given fd.
+ for t in tasks:
+ rlist.append(t.stdout)
+ rlist.append(t.stderr)
+ outmap[t.stdout] = t.out
+ outmap[t.stderr] = t.err
+ # This will trigger with a close event when the child dies, allowing
+ # us to respond immediately and not leave cores idle.
+ exlist.append(t.stdout)
+
+ readable = []
+ try:
+ readable, _, _ = select.select(rlist, [], exlist, timeout)
+ except OverflowError:
+ print >>sys.stderr, "timeout value", timeout
+ raise
+
+ for fd in readable:
+ flush_input(fd, outmap[fd])
+
+
+def remove_task(tasks, pid):
+ """
+ Return a pair with the removed task and the new, modified tasks list.
+ """
+ index = None
+ for i, t in enumerate(tasks):
+ if t.pid == pid:
+ index = i
+ break
+ else:
+ raise KeyError("No such pid: {}".format(pid))
+
+ out = tasks[index]
+ tasks.pop(index)
+ return out
+
+
+def timed_out(task, timeout):
+ """
+ Return a timedelta with the amount we are overdue, or False if the timeout
+ has not yet been reached (or timeout is falsy, indicating there is no
+ timeout.)
+ """
+ if not timeout:
+ return False
+
+ elapsed = datetime.now() - task.start
+ over = elapsed - timedelta(seconds=timeout)
+ return over if over.total_seconds() > 0 else False
+
+
+def reap_zombies(tasks, timeout):
+ """
+ Search for children of this process that have finished. If they are tasks,
+ then this routine will clean up the child. This method returns a new task
+ list that has had the ended tasks removed, followed by the list of finished
+ tasks.
+ """
+ finished = []
+ while True:
+ try:
+ pid, status = os.waitpid(0, os.WNOHANG)
+ if pid == 0:
+ break
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ break
+ raise e
+
+ ended = remove_task(tasks, pid)
+ flush_input(ended.stdout, ended.out)
+ flush_input(ended.stderr, ended.err)
+ os.close(ended.stdout)
+ os.close(ended.stderr)
+
+ returncode = os.WEXITSTATUS(status)
+ if os.WIFSIGNALED(status):
+ returncode = -os.WTERMSIG(status)
+
+ finished.append(
+ TestOutput(
+ ended.test,
+ ended.cmd,
+ b"".join(ended.out).decode("utf-8", "replace"),
+ b"".join(ended.err).decode("utf-8", "replace"),
+ returncode,
+ (datetime.now() - ended.start).total_seconds(),
+ timed_out(ended, timeout),
+ {"pid": ended.pid},
+ )
+ )
+ return tasks, finished
+
+
+def kill_undead(tasks, timeout):
+ """
+ Signal all children that are over the given timeout. Use SIGABRT first to
+ generate a stack dump. If it still doesn't die for another 30 seconds, kill
+ with SIGKILL.
+ """
+ for task in tasks:
+ over = timed_out(task, timeout)
+ if over:
+ if over.total_seconds() < 30:
+ os.kill(task.pid, signal.SIGABRT)
+ else:
+ os.kill(task.pid, signal.SIGKILL)
+
+
+def run_all_tests(tests, prefix, tempdir, pb, options):
+ # Copy and reverse for fast pop off end.
+ tests = list(tests)
+ tests = tests[:]
+ tests.reverse()
+
+ # The set of currently running tests.
+ tasks = []
+
+ # Piggy back on the first test to generate the XDR content needed for all
+ # other tests to run. To avoid read/write races, we temporarily limit the
+ # number of workers.
+ wait_for_encoding = False
+ worker_count = options.worker_count
+ if options.use_xdr and len(tests) > 1:
+ # The next loop pops tests, thus we iterate over the tests in reversed
+ # order.
+ tests = list(xdr_annotate(reversed(tests), options))
+ tests = tests[:]
+ tests.reverse()
+ wait_for_encoding = True
+ worker_count = 1
+
+ while len(tests) or len(tasks):
+ while len(tests) and len(tasks) < worker_count:
+ test = tests.pop()
+ task = spawn_test(
+ test,
+ prefix,
+ tempdir,
+ options.passthrough,
+ options.run_skipped,
+ options.show_cmd,
+ )
+ if task:
+ tasks.append(task)
+ else:
+ yield NullTestOutput(test)
+
+ timeout = get_max_wait(tasks, options.timeout)
+ read_input(tasks, timeout)
+
+ kill_undead(tasks, options.timeout)
+ tasks, finished = reap_zombies(tasks, options.timeout)
+
+ # With Python3.4+ we could use yield from to remove this loop.
+ for out in finished:
+ yield out
+ if wait_for_encoding and out.test == test:
+ assert test.selfhosted_xdr_mode == "encode"
+ wait_for_encoding = False
+ worker_count = options.worker_count
+
+ # If we did not finish any tasks, poke the progress bar to show that
+ # the test harness is at least not frozen.
+ if len(finished) == 0:
+ pb.poke()
diff --git a/js/src/tests/lib/tasks_win.py b/js/src/tests/lib/tasks_win.py
new file mode 100644
index 0000000000..3a5b20298f
--- /dev/null
+++ b/js/src/tests/lib/tasks_win.py
@@ -0,0 +1,177 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+import subprocess
+import sys
+from datetime import datetime, timedelta
+from threading import Thread
+
+from six.moves.queue import Empty, Queue
+
+from .adaptor import xdr_annotate
+from .progressbar import ProgressBar
+from .results import NullTestOutput, TestOutput, escape_cmdline
+
+
+class EndMarker:
+ pass
+
+
+class TaskFinishedMarker:
+ pass
+
+
+def _do_work(qTasks, qResults, qWatch, prefix, tempdir, run_skipped, timeout, show_cmd):
+ while True:
+ test = qTasks.get()
+ if test is EndMarker:
+ qWatch.put(EndMarker)
+ qResults.put(EndMarker)
+ return
+
+ if not test.enable and not run_skipped:
+ qResults.put(NullTestOutput(test))
+ continue
+
+ # Spawn the test task.
+ cmd = test.get_command(prefix, tempdir)
+ if show_cmd:
+ print(escape_cmdline(cmd))
+ tStart = datetime.now()
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # Push the task to the watchdog -- it will kill the task
+ # if it goes over the timeout while we keep its stdout
+ # buffer clear on the "main" worker thread.
+ qWatch.put(proc)
+ out, err = proc.communicate()
+ # We're not setting universal_newlines=True in subprocess.Popen due to
+ # still needing to support Python 3.5, which doesn't have the "encoding"
+ # parameter to the Popen constructor, so we have to decode the output
+ # here.
+ system_encoding = "mbcs" if sys.platform == "win32" else "utf-8"
+ out = out.decode(system_encoding)
+ err = err.decode(system_encoding)
+ qWatch.put(TaskFinishedMarker)
+
+ # Create a result record and forward to result processing.
+ dt = datetime.now() - tStart
+ result = TestOutput(
+ test,
+ cmd,
+ out,
+ err,
+ proc.returncode,
+ dt.total_seconds(),
+ dt > timedelta(seconds=timeout),
+ )
+ qResults.put(result)
+
+
+def _do_watch(qWatch, timeout):
+ while True:
+ proc = qWatch.get(True)
+ if proc == EndMarker:
+ return
+ try:
+ fin = qWatch.get(block=True, timeout=timeout)
+ assert fin is TaskFinishedMarker, "invalid finish marker"
+ except Empty:
+ # Timed out, force-kill the test.
+ try:
+ proc.terminate()
+ except WindowsError as ex:
+ # If the process finishes after we time out but before we
+ # terminate, the terminate call will fail. We can safely
+ # ignore this.
+ if ex.winerror != 5:
+ raise
+ fin = qWatch.get()
+ assert fin is TaskFinishedMarker, "invalid finish marker"
+
+
+def run_all_tests(tests, prefix, tempdir, pb, options):
+ """
+ Uses scatter-gather to a thread-pool to manage children.
+ """
+ qTasks, qResults = Queue(), Queue()
+
+ workers = []
+ watchdogs = []
+ for _ in range(options.worker_count):
+ qWatch = Queue()
+ watcher = Thread(target=_do_watch, args=(qWatch, options.timeout))
+ watcher.setDaemon(True)
+ watcher.start()
+ watchdogs.append(watcher)
+ worker = Thread(
+ target=_do_work,
+ args=(
+ qTasks,
+ qResults,
+ qWatch,
+ prefix,
+ tempdir,
+ options.run_skipped,
+ options.timeout,
+ options.show_cmd,
+ ),
+ )
+ worker.setDaemon(True)
+ worker.start()
+ workers.append(worker)
+
+ delay = ProgressBar.update_granularity().total_seconds()
+
+ # Before inserting all the tests cases, to be checked in parallel, we are
+ # only queueing the XDR encoding test case which would be responsible for
+ # recording the self-hosted code. Once completed, we will proceed by
+ # queueing the rest of the test cases.
+ if options.use_xdr:
+ tests = xdr_annotate(tests, options)
+ # This loop consumes the first elements of the `tests` iterator, until
+ # it reaches the self-hosted encoding test case, and leave the
+ # remaining tests in the iterator to be scheduled on multiple threads.
+ for test in tests:
+ if test.selfhosted_xdr_mode == "encode":
+ qTasks.put(test)
+ yield qResults.get(block=True)
+ break
+ assert not test.enable and not options.run_skipped
+ yield NullTestOutput(test)
+
+ # Insert all jobs into the queue, followed by the queue-end
+ # marker, one per worker. This will not block on growing the
+ # queue, only on waiting for more items in the generator. The
+ # workers are already started, however, so this will process as
+ # fast as we can produce tests from the filesystem.
+ def _do_push(num_workers, qTasks):
+ for test in tests:
+ qTasks.put(test)
+ for _ in range(num_workers):
+ qTasks.put(EndMarker)
+
+ pusher = Thread(target=_do_push, args=(len(workers), qTasks))
+ pusher.setDaemon(True)
+ pusher.start()
+
+ # Read from the results.
+ ended = 0
+ while ended < len(workers):
+ try:
+ result = qResults.get(block=True, timeout=delay)
+ if result is EndMarker:
+ ended += 1
+ else:
+ yield result
+ except Empty:
+ pb.poke()
+
+ # Cleanup and exit.
+ pusher.join()
+ for worker in workers:
+ worker.join()
+ for watcher in watchdogs:
+ watcher.join()
+ assert qTasks.empty(), "Send queue not drained"
+ assert qResults.empty(), "Result queue not drained"
diff --git a/js/src/tests/lib/tempfile.py b/js/src/tests/lib/tempfile.py
new file mode 100644
index 0000000000..604864dce1
--- /dev/null
+++ b/js/src/tests/lib/tempfile.py
@@ -0,0 +1,18 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+try:
+ # Python 3.2
+ from tempfile import TemporaryDirectory
+except ImportError:
+ import shutil
+ import tempfile
+ from contextlib import contextmanager
+
+ @contextmanager
+ def TemporaryDirectory(*args, **kwds):
+ d = tempfile.mkdtemp(*args, **kwds)
+ try:
+ yield d
+ finally:
+ shutil.rmtree(d)
diff --git a/js/src/tests/lib/terminal_unix.py b/js/src/tests/lib/terminal_unix.py
new file mode 100644
index 0000000000..cdf7995776
--- /dev/null
+++ b/js/src/tests/lib/terminal_unix.py
@@ -0,0 +1,33 @@
+import sys
+
+
+class Terminal(object):
+ COLOR = {"red": "31", "green": "32", "blue": "34", "gray": "37"}
+ NORMAL_INTENSITY = "1"
+ BRIGHT_INTENSITY = "2"
+ ESCAPE = "\x1b["
+ RESET = "0"
+ SEPARATOR = ";"
+ COLOR_CODE = "m"
+ CLEAR_RIGHT_CODE = "K"
+
+ @classmethod
+ def set_color(cls, color):
+ """
+ color: str - color definition string
+ """
+ mod = Terminal.NORMAL_INTENSITY
+ if color.startswith("bright"):
+ mod = Terminal.BRIGHT_INTENSITY
+ color = color[len("bright") :]
+ color_code = Terminal.COLOR[color]
+
+ sys.stdout.write(cls.ESCAPE + color_code + cls.SEPARATOR + mod + cls.COLOR_CODE)
+
+ @classmethod
+ def reset_color(cls):
+ sys.stdout.write(cls.ESCAPE + cls.RESET + cls.COLOR_CODE)
+
+ @classmethod
+ def clear_right(cls):
+ sys.stdout.write(cls.ESCAPE + cls.CLEAR_RIGHT_CODE)
diff --git a/js/src/tests/lib/terminal_win.py b/js/src/tests/lib/terminal_win.py
new file mode 100644
index 0000000000..a9af46b351
--- /dev/null
+++ b/js/src/tests/lib/terminal_win.py
@@ -0,0 +1,114 @@
+"""
+From Andre Burgaud's Blog, from the CTypes Wiki:
+http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
+
+Colors text in console mode application (win32).
+Uses ctypes and Win32 methods SetConsoleTextAttribute and
+GetConsoleScreenBufferInfo.
+
+$Id: color_console.py 534 2009-05-10 04:00:59Z andre $
+"""
+
+from ctypes import Structure, byref, c_short, c_ushort, windll
+
+SHORT = c_short
+WORD = c_ushort
+
+
+class COORD(Structure):
+ """struct in wincon.h."""
+
+ _fields_ = [("X", SHORT), ("Y", SHORT)]
+
+
+class SMALL_RECT(Structure):
+ """struct in wincon.h."""
+
+ _fields_ = [("Left", SHORT), ("Top", SHORT), ("Right", SHORT), ("Bottom", SHORT)]
+
+
+class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+ """struct in wincon.h."""
+
+ _fields_ = [
+ ("dwSize", COORD),
+ ("dwCursorPosition", COORD),
+ ("wAttributes", WORD),
+ ("srWindow", SMALL_RECT),
+ ("dwMaximumWindowSize", COORD),
+ ]
+
+
+# winbase.h
+STD_INPUT_HANDLE = -10
+STD_OUTPUT_HANDLE = -11
+STD_ERROR_HANDLE = -12
+
+# wincon.h
+FOREGROUND_BLACK = 0x0000
+FOREGROUND_BLUE = 0x0001
+FOREGROUND_GREEN = 0x0002
+FOREGROUND_CYAN = 0x0003
+FOREGROUND_RED = 0x0004
+FOREGROUND_MAGENTA = 0x0005
+FOREGROUND_YELLOW = 0x0006
+FOREGROUND_GREY = 0x0007
+FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
+
+BACKGROUND_BLACK = 0x0000
+BACKGROUND_BLUE = 0x0010
+BACKGROUND_GREEN = 0x0020
+BACKGROUND_CYAN = 0x0030
+BACKGROUND_RED = 0x0040
+BACKGROUND_MAGENTA = 0x0050
+BACKGROUND_YELLOW = 0x0060
+BACKGROUND_GREY = 0x0070
+BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+
+stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
+SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
+GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
+
+
+def get_text_attr():
+ csbi = CONSOLE_SCREEN_BUFFER_INFO()
+ GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
+ return csbi.wAttributes
+
+
+DEFAULT_COLORS = get_text_attr()
+
+
+class Terminal(object):
+ COLOR = {
+ "black": 0x0000,
+ "blue": 0x0001,
+ "green": 0x0002,
+ "cyan": 0x0003,
+ "red": 0x0004,
+ "magenta": 0x0005,
+ "yellow": 0x0006,
+ "gray": 0x0007,
+ }
+ BRIGHT_INTENSITY = 0x0008
+ BACKGROUND_SHIFT = 4
+
+ @classmethod
+ def set_color(cls, color):
+ """
+ color: str - color definition string
+ """
+ color_code = 0
+ if color.startswith("bright"):
+ color_code |= cls.BRIGHT_INTENSITY
+ color = color[len("bright") :]
+ color_code |= Terminal.COLOR[color]
+ SetConsoleTextAttribute(stdout_handle, color_code)
+
+ @classmethod
+ def reset_color(cls):
+ SetConsoleTextAttribute(stdout_handle, DEFAULT_COLORS)
+
+ @classmethod
+ def clear_right(cls):
+ pass
diff --git a/js/src/tests/lib/tests.py b/js/src/tests/lib/tests.py
new file mode 100644
index 0000000000..bc86b24d5c
--- /dev/null
+++ b/js/src/tests/lib/tests.py
@@ -0,0 +1,334 @@
+# Library for JSTest tests.
+#
+# This contains classes that represent an individual test, including
+# metadata, and know how to run the tests and determine failures.
+
+import os
+import sys
+from contextlib import contextmanager
+
+# When run on tbpl, we run each test multiple times with the following
+# arguments.
+JITFLAGS = {
+ "all": [
+ [], # no flags, normal baseline and ion
+ [
+ "--ion-eager",
+ "--ion-offthread-compile=off", # implies --baseline-eager
+ "--more-compartments",
+ ],
+ [
+ "--ion-eager",
+ "--ion-offthread-compile=off",
+ "--ion-check-range-analysis",
+ "--ion-extra-checks",
+ "--no-sse3",
+ "--no-threads",
+ ],
+ ["--baseline-eager"],
+ ["--no-blinterp", "--no-baseline", "--no-ion", "--more-compartments"],
+ ["--blinterp-eager"],
+ ],
+ # Like 'all' above but for jstests. This has fewer jit-specific
+ # configurations.
+ "jstests": [
+ [], # no flags, normal baseline and ion
+ [
+ "--ion-eager",
+ "--ion-offthread-compile=off", # implies --baseline-eager
+ "--more-compartments",
+ ],
+ ["--baseline-eager"],
+ ["--no-blinterp", "--no-baseline", "--no-ion", "--more-compartments"],
+ ],
+ # used by jit_test.py
+ "ion": [
+ ["--baseline-eager"],
+ ["--ion-eager", "--ion-offthread-compile=off", "--more-compartments"],
+ ],
+ # Run reduced variants on debug builds, since they take longer time.
+ "debug": [
+ [], # no flags, normal baseline and ion
+ [
+ "--ion-eager",
+ "--ion-offthread-compile=off", # implies --baseline-eager
+ "--more-compartments",
+ ],
+ ["--baseline-eager"],
+ ],
+ # Cover cases useful for tsan. Note that we test --ion-eager without
+ # --ion-offthread-compile=off here, because it helps catch races.
+ "tsan": [
+ [],
+ [
+ "--ion-eager",
+ "--ion-check-range-analysis",
+ "--ion-extra-checks",
+ "--no-sse3",
+ ],
+ ["--no-blinterp", "--no-baseline", "--no-ion"],
+ ],
+ "baseline": [
+ ["--no-ion"],
+ ],
+ # Interpreter-only, for tools that cannot handle binary code generation.
+ "interp": [
+ [
+ "--no-blinterp",
+ "--no-baseline",
+ "--no-asmjs",
+ "--wasm-compiler=none",
+ "--no-native-regexp",
+ ]
+ ],
+ "none": [[]], # no flags, normal baseline and ion
+}
+
+
+def get_jitflags(variant, **kwargs):
+ if variant not in JITFLAGS:
+ print('Invalid jitflag: "{}"'.format(variant))
+ sys.exit(1)
+ if variant == "none" and "none" in kwargs:
+ return kwargs["none"]
+ return JITFLAGS[variant]
+
+
+def valid_jitflags():
+ return JITFLAGS.keys()
+
+
+def get_environment_overlay(js_shell, gc_zeal):
+ """
+ Build a dict of additional environment variables that must be set to run
+ tests successfully.
+ """
+
+ # When updating this also update |buildBrowserEnv| in layout/tools/reftest/runreftest.py.
+ env = {
+ # Force Pacific time zone to avoid failures in Date tests.
+ "TZ": "PST8PDT",
+ # Force date strings to English.
+ "LC_ALL": "en_US.UTF-8",
+ # Tell the shell to disable crash dialogs on windows.
+ "XRE_NO_WINDOWS_CRASH_DIALOG": "1",
+ }
+
+ # Add the binary's directory to the library search path so that we find the
+ # nspr and icu we built, instead of the platform supplied ones (or none at
+ # all on windows).
+ if sys.platform.startswith("linux"):
+ env["LD_LIBRARY_PATH"] = os.path.dirname(js_shell)
+ elif sys.platform.startswith("darwin"):
+ env["DYLD_LIBRARY_PATH"] = os.path.dirname(js_shell)
+ elif sys.platform.startswith("win"):
+ env["PATH"] = os.path.dirname(js_shell)
+
+ if gc_zeal:
+ env["JS_GC_ZEAL"] = gc_zeal
+
+ return env
+
+
+@contextmanager
+def change_env(env_overlay):
+ # Apply the overlaid environment and record the current state.
+ prior_env = {}
+ for key, val in env_overlay.items():
+ prior_env[key] = os.environ.get(key, None)
+ if "PATH" in key and key in os.environ:
+ os.environ[key] = "{}{}{}".format(val, os.pathsep, os.environ[key])
+ else:
+ os.environ[key] = val
+
+ try:
+ # Execute with the new environment.
+ yield
+
+ finally:
+ # Restore the prior environment.
+ for key, val in prior_env.items():
+ if val is not None:
+ os.environ[key] = val
+ else:
+ del os.environ[key]
+
+
+def get_cpu_count():
+ """
+ Guess at a reasonable parallelism count to set as the default for the
+ current machine and run.
+ """
+ # Python 2.6+
+ try:
+ import multiprocessing
+
+ return multiprocessing.cpu_count()
+ except (ImportError, NotImplementedError):
+ pass
+
+ # POSIX
+ try:
+ res = int(os.sysconf("SC_NPROCESSORS_ONLN"))
+ if res > 0:
+ return res
+ except (AttributeError, ValueError):
+ pass
+
+ # Windows
+ try:
+ res = int(os.environ["NUMBER_OF_PROCESSORS"])
+ if res > 0:
+ return res
+ except (KeyError, ValueError):
+ pass
+
+ return 1
+
+
+class RefTestCase(object):
+ """A test case consisting of a test and an expected result."""
+
+ def __init__(self, root, path, extra_helper_paths=None, wpt=None):
+ # str: path of the tests root dir
+ self.root = root
+ # str: path of JS file relative to tests root dir
+ self.path = path
+ # [str]: Extra options to pass to the shell
+ self.options = []
+ # [str]: JIT flags to pass to the shell
+ self.jitflags = []
+ # [str]: flags to never pass to the shell for this test
+ self.ignoredflags = []
+ # str or None: path to reflect-stringify.js file to test
+ # instead of actually running tests
+ self.test_reflect_stringify = None
+ # bool: True => test is module code
+ self.is_module = False
+ # bool: True => test is asynchronous and runs additional code after completing the first
+ # turn of the event loop.
+ self.is_async = False
+ # bool: True => run test, False => don't run
+ self.enable = True
+ # str?: Optional error type
+ self.error = None
+ # bool: expected result, True => pass
+ self.expect = True
+ # bool: True => ignore output as 'random'
+ self.random = False
+ # bool: True => test may run slowly
+ self.slow = False
+ # bool: True => test is test262 testcase with raw flag, that turns off
+ # running shell.js files inside test262
+ self.is_test262_raw = False
+
+ # Use self-hosted XDR instead of parsing the source stored in the binary.
+ # str?: Path computed when generating the command
+ self.selfhosted_xdr_path = None
+ # str: XDR mode (= "off", "encode", "decode") to use with the
+ # self-hosted code.
+ self.selfhosted_xdr_mode = "off"
+
+ # The terms parsed to produce the above properties.
+ self.terms = None
+
+ # The tag between |...| in the test header.
+ self.tag = None
+
+ # Anything occuring after -- in the test header.
+ self.comment = None
+
+ self.extra_helper_paths = extra_helper_paths or []
+ self.wpt = wpt
+
+ def prefix_command(self):
+ """Return the '-f' options needed to run a test with the given path."""
+ path = self.path
+ prefix = []
+ while path != "":
+ assert path != "/"
+ path = os.path.dirname(path)
+
+ if self.is_test262_raw and path != "":
+ # Skip running shell.js under test262 if the test has raw flag.
+ # Top-level shell.js is still necessary to define reportCompare.
+ continue
+
+ shell_path = os.path.join(self.root, path, "shell.js")
+ if os.path.exists(shell_path):
+ prefix.append(shell_path)
+ prefix.append("-f")
+ prefix.reverse()
+
+ for extra_path in self.extra_helper_paths:
+ prefix.append("-f")
+ prefix.append(extra_path)
+
+ return prefix
+
+ def abs_path(self):
+ return os.path.join(self.root, self.path)
+
+ def get_command(self, prefix, tempdir):
+ cmd = prefix + self.jitflags + self.options + self.prefix_command()
+ # Note: The tempdir provided as argument is managed by the caller
+ # should remain alive as long as the test harness. Therefore, the XDR
+ # content of the self-hosted code would be accessible to all JS Shell
+ # instances.
+ if self.selfhosted_xdr_mode != "off":
+ self.selfhosted_xdr_path = os.path.join(tempdir, "shell.xdr")
+ cmd += [
+ "--selfhosted-xdr-path",
+ self.selfhosted_xdr_path,
+ "--selfhosted-xdr-mode",
+ self.selfhosted_xdr_mode,
+ ]
+ if self.test_reflect_stringify is not None:
+ cmd += [self.test_reflect_stringify, "--check", self.abs_path()]
+ elif self.is_module:
+ cmd += ["--module", self.abs_path()]
+ else:
+ cmd += ["-f", self.abs_path()]
+ for flag in self.ignoredflags:
+ if flag in cmd:
+ cmd.remove(flag)
+ return cmd
+
+ def __str__(self):
+ ans = self.path
+ if not self.enable:
+ ans += ", skip"
+ if self.error is not None:
+ ans += ", error=" + self.error
+ if not self.expect:
+ ans += ", fails"
+ if self.random:
+ ans += ", random"
+ if self.slow:
+ ans += ", slow"
+ if "-d" in self.options:
+ ans += ", debugMode"
+ return ans
+
+ @staticmethod
+ def build_js_cmd_prefix(js_path, js_args, debugger_prefix):
+ parts = []
+ if debugger_prefix:
+ parts += debugger_prefix
+ parts.append(js_path)
+ if js_args:
+ parts += js_args
+ return parts
+
+ def __cmp__(self, other):
+ if self.path == other.path:
+ return 0
+ elif self.path < other.path:
+ return -1
+ return 1
+
+ def __hash__(self):
+ return self.path.__hash__()
+
+ def __repr__(self):
+ return "<lib.tests.RefTestCase %s>" % (self.path,)
diff --git a/js/src/tests/lib/wptreport.py b/js/src/tests/lib/wptreport.py
new file mode 100644
index 0000000000..95fa9b1ed7
--- /dev/null
+++ b/js/src/tests/lib/wptreport.py
@@ -0,0 +1,85 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Integration between the jstests harness and `WptreportFormatter`.
+#
+# `WptreportFormatter` uses the data format specified in
+# <https://firefox-source-docs.mozilla.org/mozbase/mozlog.html>.
+
+from time import time
+
+from wptrunner.formatters.wptreport import WptreportFormatter
+
+
+class WptreportHandler(object):
+ def __init__(self, out):
+ """
+ Initialize the WptreportHandler handler.
+
+ :param str out: path to a file to write output to.
+ """
+ self.out = out
+ self.formatter = WptreportFormatter()
+
+ def suite_start(self):
+ """
+ Produce the "suite_start" message at the present time.
+ """
+ self.formatter.suite_start(
+ {
+ "time": time(),
+ "run_info": {},
+ }
+ )
+
+ def suite_end(self):
+ """
+ Produce the "suite_end" message at the present time and write the
+ results to the file path given in the constructor.
+ """
+ result = self.formatter.suite_end(
+ {
+ "time": time(),
+ }
+ )
+ with open(self.out, "w") as fp:
+ fp.write(result)
+
+ def test(self, result, duration):
+ """
+ Produce the "test_start", "test_status" and "test_end" messages, as
+ appropriate.
+
+ :param dict result: a dictionary with the test results. It should
+ include the following keys:
+ * "name": the ID of the test;
+ * "status": the actual status of the whole test;
+ * "expected": the expected status of the whole test;
+ * "subtests": a list of dicts with keys "test",
+ "subtest", "status" and "expected".
+ :param float duration: the runtime of the test
+ """
+ testname = result["name"]
+
+ end_time = time()
+ start_time = end_time - duration
+
+ self.formatter.test_start(
+ {
+ "test": testname,
+ "time": start_time,
+ }
+ )
+
+ for subtest in result["subtests"]:
+ self.formatter.test_status(subtest)
+
+ self.formatter.test_end(
+ {
+ "test": testname,
+ "time": end_time,
+ "status": result["status"],
+ "expected": result["expected"],
+ }
+ )