summaryrefslogtreecommitdiffstats
path: root/testing/mozbase/moztest
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--testing/mozbase/moztest/moztest/__init__.py7
-rw-r--r--testing/mozbase/moztest/moztest/adapters/__init__.py7
-rw-r--r--testing/mozbase/moztest/moztest/adapters/unit.py217
-rw-r--r--testing/mozbase/moztest/moztest/resolve.py1032
-rw-r--r--testing/mozbase/moztest/moztest/results.py366
-rw-r--r--testing/mozbase/moztest/moztest/selftest/__init__.py0
-rw-r--r--testing/mozbase/moztest/moztest/selftest/fixtures.py116
-rw-r--r--testing/mozbase/moztest/moztest/selftest/output.py52
-rw-r--r--testing/mozbase/moztest/setup.py33
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/apple/a11y.ini1
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/apple/moz.build1
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/banana/moz.build1
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/banana/xpcshell.ini2
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/carrot/moz.build1
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-one.ini5
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-shared.ini4
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-two.ini5
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/dragonfruit/elderberry/xpcshell_updater.ini7
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/dragonfruit/moz.build1
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/dragonfruit/xpcshell.ini4
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/fig/grape/instrumentation.ini2
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/fig/huckleberry/instrumentation.ini2
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/fig/moz.build4
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/juniper/browser.ini1
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/kiwi/browser.ini3
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/moz.build4
-rw-r--r--testing/mozbase/moztest/tests/data/srcdir/wpt_manifest_data.json8
-rw-r--r--testing/mozbase/moztest/tests/manifest.ini6
-rw-r--r--testing/mozbase/moztest/tests/test.py54
-rw-r--r--testing/mozbase/moztest/tests/test_resolve.py577
30 files changed, 2523 insertions, 0 deletions
diff --git a/testing/mozbase/moztest/moztest/__init__.py b/testing/mozbase/moztest/moztest/__init__.py
new file mode 100644
index 0000000000..c2366466cf
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/__init__.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from moztest import adapters
+
+__all__ = ["adapters"]
diff --git a/testing/mozbase/moztest/moztest/adapters/__init__.py b/testing/mozbase/moztest/moztest/adapters/__init__.py
new file mode 100644
index 0000000000..5bd3a52844
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/adapters/__init__.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from moztest.adapters import unit
+
+__all__ = ["unit"]
diff --git a/testing/mozbase/moztest/moztest/adapters/unit.py b/testing/mozbase/moztest/moztest/adapters/unit.py
new file mode 100644
index 0000000000..72c2f30052
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/adapters/unit.py
@@ -0,0 +1,217 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+import time
+import traceback
+import unittest
+from unittest import TextTestResult
+
+"""Adapter used to output structuredlog messages from unittest
+testsuites"""
+
+
+def get_test_class_name(test):
+ """
+ This method is used to return the full class name from a
+ :class:`unittest.TestCase` instance.
+
+ It is used as a default to define the "class_name" extra value
+ passed in structured loggers. You can override the default by
+ implementing a "get_test_class_name" method on you TestCase subclass.
+ """
+ return "%s.%s" % (test.__class__.__module__, test.__class__.__name__)
+
+
+def get_test_method_name(test):
+ """
+ This method is used to return the full method name from a
+ :class:`unittest.TestCase` instance.
+
+ It is used as a default to define the "method_name" extra value
+ passed in structured loggers. You can override the default by
+ implementing a "get_test_method_name" method on you TestCase subclass.
+ """
+ return test._testMethodName
+
+
+class StructuredTestResult(TextTestResult):
+ def __init__(self, *args, **kwargs):
+ self.logger = kwargs.pop("logger")
+ self.test_list = kwargs.pop("test_list", [])
+ self.result_callbacks = kwargs.pop("result_callbacks", [])
+ self.passed = 0
+ self.testsRun = 0
+ TextTestResult.__init__(self, *args, **kwargs)
+
+ def call_callbacks(self, test, status):
+ debug_info = {}
+ for callback in self.result_callbacks:
+ info = callback(test, status)
+ if info is not None:
+ debug_info.update(info)
+ return debug_info
+
+ def startTestRun(self):
+ # This would be an opportunity to call the logger's suite_start action,
+ # however some users may use multiple suites, and per the structured
+ # logging protocol, this action should only be called once.
+ pass
+
+ def startTest(self, test):
+ self.testsRun += 1
+ self.logger.test_start(test.id())
+
+ def stopTest(self, test):
+ pass
+
+ def stopTestRun(self):
+ # This would be an opportunity to call the logger's suite_end action,
+ # however some users may use multiple suites, and per the structured
+ # logging protocol, this action should only be called once.
+ pass
+
+ def _extract_err_message(self, err):
+ # Format an exception message in the style of unittest's _exc_info_to_string
+ # while maintaining a division between a traceback and a message.
+ exc_ty, val, _ = err
+ exc_msg = "".join(traceback.format_exception_only(exc_ty, val))
+ if self.buffer:
+ output_msg = "\n".join([sys.stdout.getvalue(), sys.stderr.getvalue()])
+ return "".join([exc_msg, output_msg])
+ return exc_msg.rstrip()
+
+ def _extract_stacktrace(self, err, test):
+ # Format an exception stack in the style of unittest's _exc_info_to_string
+ # while maintaining a division between a traceback and a message.
+ # This is mostly borrowed from unittest.result._exc_info_to_string.
+
+ exctype, value, tb = err
+ while tb and self._is_relevant_tb_level(tb):
+ tb = tb.tb_next
+ # Header usually included by print_exception
+ lines = ["Traceback (most recent call last):\n"]
+ if exctype is test.failureException and hasattr(
+ self, "_count_relevant_tb_levels"
+ ):
+ length = self._count_relevant_tb_levels(tb)
+ lines += traceback.format_tb(tb, length)
+ else:
+ lines += traceback.format_tb(tb)
+ return "".join(lines)
+
+ def _get_class_method_name(self, test):
+ if hasattr(test, "get_test_class_name"):
+ class_name = test.get_test_class_name()
+ else:
+ class_name = get_test_class_name(test)
+
+ if hasattr(test, "get_test_method_name"):
+ method_name = test.get_test_method_name()
+ else:
+ method_name = get_test_method_name(test)
+
+ return {"class_name": class_name, "method_name": method_name}
+
+ def addError(self, test, err):
+ self.errors.append((test, self._exc_info_to_string(err, test)))
+ extra = self.call_callbacks(test, "ERROR")
+ extra.update(self._get_class_method_name(test))
+ self.logger.test_end(
+ test.id(),
+ "ERROR",
+ message=self._extract_err_message(err),
+ expected="PASS",
+ stack=self._extract_stacktrace(err, test),
+ extra=extra,
+ )
+
+ def addFailure(self, test, err):
+ extra = self.call_callbacks(test, "FAIL")
+ extra.update(self._get_class_method_name(test))
+ self.logger.test_end(
+ test.id(),
+ "FAIL",
+ message=self._extract_err_message(err),
+ expected="PASS",
+ stack=self._extract_stacktrace(err, test),
+ extra=extra,
+ )
+
+ def addSuccess(self, test):
+ extra = self._get_class_method_name(test)
+ self.logger.test_end(test.id(), "PASS", expected="PASS", extra=extra)
+
+ def addExpectedFailure(self, test, err):
+ extra = self.call_callbacks(test, "FAIL")
+ extra.update(self._get_class_method_name(test))
+ self.logger.test_end(
+ test.id(),
+ "FAIL",
+ message=self._extract_err_message(err),
+ expected="FAIL",
+ stack=self._extract_stacktrace(err, test),
+ extra=extra,
+ )
+
+ def addUnexpectedSuccess(self, test):
+ extra = self.call_callbacks(test, "PASS")
+ extra.update(self._get_class_method_name(test))
+ self.logger.test_end(test.id(), "PASS", expected="FAIL", extra=extra)
+
+ def addSkip(self, test, reason):
+ extra = self.call_callbacks(test, "SKIP")
+ extra.update(self._get_class_method_name(test))
+ self.logger.test_end(
+ test.id(), "SKIP", message=reason, expected="PASS", extra=extra
+ )
+
+
+class StructuredTestRunner(unittest.TextTestRunner):
+
+ resultclass = StructuredTestResult
+
+ def __init__(self, **kwargs):
+ """TestRunner subclass designed for structured logging.
+
+ :params logger: A ``StructuredLogger`` to use for logging the test run.
+ :params test_list: An optional list of tests that will be passed along
+ the `suite_start` message.
+
+ """
+
+ self.logger = kwargs.pop("logger")
+ self.test_list = kwargs.pop("test_list", [])
+ self.result_callbacks = kwargs.pop("result_callbacks", [])
+ unittest.TextTestRunner.__init__(self, **kwargs)
+
+ def _makeResult(self):
+ return self.resultclass(
+ self.stream,
+ self.descriptions,
+ self.verbosity,
+ logger=self.logger,
+ test_list=self.test_list,
+ )
+
+ def run(self, test):
+ """Run the given test case or test suite."""
+ result = self._makeResult()
+ result.failfast = self.failfast
+ result.buffer = self.buffer
+ startTime = time.time()
+ startTestRun = getattr(result, "startTestRun", None)
+ if startTestRun is not None:
+ startTestRun()
+ try:
+ test(result)
+ finally:
+ stopTestRun = getattr(result, "stopTestRun", None)
+ if stopTestRun is not None:
+ stopTestRun()
+ stopTime = time.time()
+ if hasattr(result, "time_taken"):
+ result.time_taken = stopTime - startTime
+
+ return result
diff --git a/testing/mozbase/moztest/moztest/resolve.py b/testing/mozbase/moztest/moztest/resolve.py
new file mode 100644
index 0000000000..1076d52865
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/resolve.py
@@ -0,0 +1,1032 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import fnmatch
+import os
+import pickle
+import sys
+from abc import ABCMeta, abstractmethod
+from collections import defaultdict
+
+import mozpack.path as mozpath
+import six
+from manifestparser import TestManifest, combine_fields
+from mozbuild.base import MozbuildObject
+from mozbuild.testing import REFTEST_FLAVORS, TEST_MANIFESTS
+from mozbuild.util import OrderedDefaultDict
+from mozpack.files import FileFinder
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+MOCHITEST_CHUNK_BY_DIR = 4
+MOCHITEST_TOTAL_CHUNKS = 5
+
+
+def WebglSuite(name):
+ return {
+ "aliases": (name,),
+ "build_flavor": "mochitest",
+ "mach_command": "mochitest",
+ "kwargs": {"flavor": "plain", "subsuite": name, "test_paths": None},
+ "task_regex": [
+ "mochitest-" + name + "($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ }
+
+
+TEST_SUITES = {
+ "cppunittest": {
+ "aliases": ("cpp",),
+ "mach_command": "cppunittest",
+ "kwargs": {"test_files": None},
+ },
+ "crashtest": {
+ "aliases": ("c", "rc"),
+ "build_flavor": "crashtest",
+ "mach_command": "crashtest",
+ "kwargs": {"test_file": None},
+ "task_regex": ["crashtest($|.*(-1|[^0-9])$)", "test-verify($|.*(-1|[^0-9])$)"],
+ },
+ "crashtest-qr": {
+ "aliases": ("c", "rc"),
+ "build_flavor": "crashtest",
+ "mach_command": "crashtest",
+ "kwargs": {"test_file": None},
+ "task_regex": [
+ "crashtest-qr($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "firefox-ui-functional": {
+ "aliases": ("fxfn",),
+ "mach_command": "firefox-ui-functional",
+ "kwargs": {},
+ },
+ "firefox-ui-update": {
+ "aliases": ("fxup",),
+ "mach_command": "firefox-ui-update",
+ "kwargs": {},
+ },
+ "marionette": {
+ "aliases": ("mn",),
+ "mach_command": "marionette-test",
+ "kwargs": {"tests": None},
+ "task_regex": ["marionette($|.*(-1|[^0-9])$)"],
+ },
+ "mochitest-a11y": {
+ "aliases": ("a11y", "ally"),
+ "build_flavor": "a11y",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "a11y",
+ "test_paths": None,
+ "e10s": False,
+ "enable_fission": False,
+ },
+ "task_regex": [
+ "mochitest-a11y($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-browser-chrome": {
+ "aliases": ("bc", "browser"),
+ "build_flavor": "browser-chrome",
+ "mach_command": "mochitest",
+ "kwargs": {"flavor": "browser-chrome", "test_paths": None},
+ "task_regex": [
+ "mochitest-browser-chrome($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-browser-chrome-screenshots": {
+ "aliases": ("ss", "screenshots-chrome"),
+ "build_flavor": "browser-chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "browser-chrome",
+ "subsuite": "screenshots",
+ "test_paths": None,
+ },
+ "task_regex": ["browser-screenshots($|.*(-1|[^0-9])$)"],
+ },
+ "mochitest-chrome": {
+ "aliases": ("mc",),
+ "build_flavor": "chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "chrome",
+ "test_paths": None,
+ "e10s": False,
+ "enable_fission": False,
+ },
+ "task_regex": [
+ "mochitest-chrome($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-chrome-gpu": {
+ "aliases": ("gpu",),
+ "build_flavor": "chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "chrome",
+ "subsuite": "gpu",
+ "test_paths": None,
+ "e10s": False,
+ "enable_fission": False,
+ },
+ "task_regex": [
+ "mochitest-gpu($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-devtools-chrome": {
+ "aliases": ("dt", "devtools"),
+ "build_flavor": "browser-chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "browser-chrome",
+ "subsuite": "devtools",
+ "test_paths": None,
+ },
+ "task_regex": [
+ "mochitest-devtools-chrome($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-browser-a11y": {
+ "aliases": ("ba", "browser-a11y"),
+ "build_flavor": "browser-chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "browser-chrome",
+ "subsuite": "a11y",
+ "test_paths": None,
+ },
+ "task_regex": [
+ "mochitest-browser-a11y($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-media": {
+ "aliases": ("mpm", "plain-media"),
+ "build_flavor": "mochitest",
+ "mach_command": "mochitest",
+ "kwargs": {"flavor": "plain", "subsuite": "media", "test_paths": None},
+ "task_regex": [
+ "mochitest-media($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-browser-media": {
+ "aliases": ("bmda", "browser-mda"),
+ "build_flavor": "browser-chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "browser-chrome",
+ "subsuite": "media-bc",
+ "test_paths": None,
+ },
+ "task_regex": [
+ "mochitest-browser-media($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-plain": {
+ "aliases": (
+ "mp",
+ "plain",
+ ),
+ "build_flavor": "mochitest",
+ "mach_command": "mochitest",
+ "kwargs": {"flavor": "plain", "test_paths": None},
+ "task_regex": [
+ "mochitest-plain($|.*(-1|[^0-9])$)", # noqa
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-plain-gpu": {
+ "aliases": ("gpu",),
+ "build_flavor": "mochitest",
+ "mach_command": "mochitest",
+ "kwargs": {"flavor": "plain", "subsuite": "gpu", "test_paths": None},
+ "task_regex": [
+ "mochitest-gpu($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-remote": {
+ "aliases": ("remote",),
+ "build_flavor": "browser-chrome",
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "browser-chrome",
+ "subsuite": "remote",
+ "test_paths": None,
+ },
+ "task_regex": [
+ "mochitest-remote($|.*(-1|[^0-9])$)",
+ "test-verify($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "mochitest-webgl1-core": WebglSuite("webgl1-core"),
+ "mochitest-webgl1-ext": WebglSuite("webgl1-ext"),
+ "mochitest-webgl2-core": WebglSuite("webgl2-core"),
+ "mochitest-webgl2-ext": WebglSuite("webgl2-ext"),
+ "mochitest-webgl2-deqp": WebglSuite("webgl2-deqp"),
+ "mochitest-webgpu": WebglSuite("webgpu"),
+ "puppeteer": {
+ "aliases": ("remote/test/puppeteer",),
+ "mach_command": "puppeteer-test",
+ "kwargs": {"headless": False},
+ },
+ "python": {
+ "build_flavor": "python",
+ "mach_command": "python-test",
+ "kwargs": {"tests": None},
+ },
+ "telemetry-tests-client": {
+ "aliases": ("ttc",),
+ "build_flavor": "telemetry-tests-client",
+ "mach_command": "telemetry-tests-client",
+ "kwargs": {},
+ "task_regex": ["telemetry-tests-client($|.*(-1|[^0-9])$)"],
+ },
+ "reftest": {
+ "aliases": ("rr",),
+ "build_flavor": "reftest",
+ "mach_command": "reftest",
+ "kwargs": {"tests": None},
+ "task_regex": [
+ "(opt|debug)(-geckoview)?-reftest($|.*(-1|[^0-9])$)",
+ "test-verify-gpu($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "reftest-qr": {
+ "aliases": ("rr",),
+ "build_flavor": "reftest",
+ "mach_command": "reftest",
+ "kwargs": {"tests": None},
+ "task_regex": [
+ "(opt|debug)(-geckoview)?-reftest-qr($|.*(-1|[^0-9])$)",
+ "test-verify-gpu($|.*(-1|[^0-9])$)",
+ ],
+ },
+ "robocop": {
+ "mach_command": "robocop",
+ "kwargs": {"test_paths": None},
+ "task_regex": ["robocop($|.*(-1|[^0-9])$)"],
+ },
+ "web-platform-tests": {
+ "aliases": ("wpt",),
+ "mach_command": "web-platform-tests",
+ "build_flavor": "web-platform-tests",
+ "kwargs": {"subsuite": "testharness"},
+ "task_regex": [
+ "web-platform-tests(?!-crashtest|-reftest|-wdspec|-print)"
+ "($|.*(-1|[^0-9])$)",
+ "test-verify-wpt",
+ ],
+ },
+ "web-platform-tests-crashtest": {
+ "aliases": ("wpt",),
+ "mach_command": "web-platform-tests",
+ "build_flavor": "web-platform-tests",
+ "kwargs": {"subsuite": "crashtest"},
+ "task_regex": [
+ "web-platform-tests-crashtest($|.*(-1|[^0-9])$)",
+ "test-verify-wpt",
+ ],
+ },
+ "web-platform-tests-print-reftest": {
+ "aliases": ("wpt",),
+ "mach_command": "web-platform-tests",
+ "kwargs": {"subsuite": "print-reftest"},
+ "task_regex": [
+ "web-platform-tests-print-reftest($|.*(-1|[^0-9])$)",
+ "test-verify-wpt",
+ ],
+ },
+ "web-platform-tests-reftest": {
+ "aliases": ("wpt",),
+ "mach_command": "web-platform-tests",
+ "build_flavor": "web-platform-tests",
+ "kwargs": {"subsuite": "reftest"},
+ "task_regex": [
+ "web-platform-tests-reftest($|.*(-1|[^0-9])$)",
+ "test-verify-wpt",
+ ],
+ },
+ "web-platform-tests-wdspec": {
+ "aliases": ("wpt",),
+ "mach_command": "web-platform-tests",
+ "build_flavor": "web-platform-tests",
+ "kwargs": {"subsuite": "wdspec"},
+ "task_regex": [
+ "web-platform-tests-wdspec($|.*(-1|[^0-9])$)",
+ "test-verify-wpt",
+ ],
+ },
+ "valgrind": {
+ "aliases": ("v",),
+ "mach_command": "valgrind-test",
+ "kwargs": {},
+ },
+ "xpcshell": {
+ "aliases": ("x",),
+ "build_flavor": "xpcshell",
+ "mach_command": "xpcshell-test",
+ "kwargs": {"test_file": "all"},
+ "task_regex": ["xpcshell($|.*(-1|[^0-9])$)", "test-verify($|.*(-1|[^0-9])$)"],
+ },
+ "xpcshell-msix": {
+ "aliases": ("x",),
+ "build_flavor": "xpcshell",
+ "mach_command": "xpcshell-test",
+ "kwargs": {"test_file": "all"},
+ "task_regex": ["xpcshell($|.*(-1|[^0-9])$)", "test-verify($|.*(-1|[^0-9])$)"],
+ },
+}
+"""Definitions of all test suites and the metadata needed to run and process
+them. Each test suite definition can contain the following keys.
+
+Arguments:
+ aliases (tuple): A tuple containing shorthands used to refer to this suite.
+ build_flavor (str): The flavor assigned to this suite by the build system
+ in `mozbuild.testing.TEST_MANIFESTS` (or similar).
+ mach_command (str): Name of the mach command used to run this suite.
+ kwargs (dict): Arguments needed to pass into the mach command.
+ task_regex (list): A list of regexes used to filter task labels that run
+ this suite.
+"""
+
+for i in range(1, MOCHITEST_TOTAL_CHUNKS + 1):
+ TEST_SUITES["mochitest-%d" % i] = {
+ "aliases": ("m%d" % i,),
+ "mach_command": "mochitest",
+ "kwargs": {
+ "flavor": "mochitest",
+ "subsuite": "default",
+ "chunk_by_dir": MOCHITEST_CHUNK_BY_DIR,
+ "total_chunks": MOCHITEST_TOTAL_CHUNKS,
+ "this_chunk": i,
+ "test_paths": None,
+ },
+ }
+
+
+WPT_TYPES = set()
+for suite, data in TEST_SUITES.items():
+ if suite.startswith("web-platform-tests"):
+ WPT_TYPES.add(data["kwargs"]["subsuite"])
+
+
+_test_flavors = {
+ "a11y": "mochitest-a11y",
+ "browser-chrome": "mochitest-browser-chrome",
+ "chrome": "mochitest-chrome",
+ "crashtest": "crashtest",
+ "firefox-ui-functional": "firefox-ui-functional",
+ "firefox-ui-update": "firefox-ui-update",
+ "marionette": "marionette",
+ "mochitest": "mochitest-plain",
+ "puppeteer": "puppeteer",
+ "python": "python",
+ "reftest": "reftest",
+ "telemetry-tests-client": "telemetry-tests-client",
+ "web-platform-tests": "web-platform-tests",
+ "xpcshell": "xpcshell",
+}
+
+_test_subsuites = {
+ ("browser-chrome", "a11y"): "mochitest-browser-a11y",
+ ("browser-chrome", "devtools"): "mochitest-devtools-chrome",
+ ("browser-chrome", "media"): "mochitest-browser-media",
+ ("browser-chrome", "remote"): "mochitest-remote",
+ ("browser-chrome", "screenshots"): "mochitest-browser-chrome-screenshots",
+ ("chrome", "gpu"): "mochitest-chrome-gpu",
+ ("mochitest", "gpu"): "mochitest-plain-gpu",
+ ("mochitest", "media"): "mochitest-media",
+ ("mochitest", "robocop"): "robocop",
+ ("mochitest", "webgl1-core"): "mochitest-webgl1-core",
+ ("mochitest", "webgl1-ext"): "mochitest-webgl1-ext",
+ ("mochitest", "webgl2-core"): "mochitest-webgl2-core",
+ ("mochitest", "webgl2-ext"): "mochitest-webgl2-ext",
+ ("mochitest", "webgl2-deqp"): "mochitest-webgl2-deqp",
+ ("mochitest", "webgpu"): "mochitest-webgpu",
+ ("web-platform-tests", "testharness"): "web-platform-tests",
+ ("web-platform-tests", "crashtest"): "web-platform-tests-crashtest",
+ ("web-platform-tests", "print-reftest"): "web-platform-tests-print-reftest",
+ ("web-platform-tests", "reftest"): "web-platform-tests-reftest",
+ ("web-platform-tests", "wdspec"): "web-platform-tests-wdspec",
+}
+
+
+def get_suite_definition(flavor, subsuite=None, strict=False):
+ """Return a suite definition given a flavor and optional subsuite.
+
+ If strict is True, a subsuite must have its own entry in TEST_SUITES.
+ Otherwise, the entry for 'flavor' will be returned with the 'subsuite'
+ keyword arg set.
+
+ With or without strict mode, an empty dict will be returned if no
+ matching suite definition was found.
+ """
+ if not subsuite:
+ suite_name = _test_flavors.get(flavor)
+ return suite_name, TEST_SUITES.get(suite_name, {}).copy()
+
+ suite_name = _test_subsuites.get((flavor, subsuite))
+ if suite_name or strict:
+ return suite_name, TEST_SUITES.get(suite_name, {}).copy()
+
+ suite_name = _test_flavors.get(flavor)
+ if suite_name not in TEST_SUITES:
+ return suite_name, {}
+
+ suite = TEST_SUITES[suite_name].copy()
+ suite.setdefault("kwargs", {})
+ suite["kwargs"]["subsuite"] = subsuite
+ return suite_name, suite
+
+
+def rewrite_test_base(test, new_base):
+ """Rewrite paths in a test to be under a new base path.
+
+ This is useful for running tests from a separate location from where they
+ were defined.
+ """
+ test["here"] = mozpath.join(new_base, test["dir_relpath"])
+ test["path"] = mozpath.join(new_base, test["file_relpath"])
+ return test
+
+
+@six.add_metaclass(ABCMeta)
+class TestLoader(MozbuildObject):
+ @abstractmethod
+ def __call__(self):
+ """Generate test metadata."""
+
+
+class BuildBackendLoader(TestLoader):
+ def __call__(self):
+ """Loads the test metadata generated by the TestManifest build backend.
+
+ The data is stored in two files:
+
+ - <objdir>/all-tests.pkl
+ - <objdir>/test-defaults.pkl
+
+ The 'all-tests.pkl' file is a mapping of source path to test objects. The
+ 'test-defaults.pkl' file maps manifests to their DEFAULT configuration.
+ These manifest defaults will be merged into the test configuration of the
+ contained tests.
+ """
+ # If installing tests is going to result in re-generating the build
+ # backend, we need to do this here, so that the updated contents of
+ # all-tests.pkl make it to the set of tests to run.
+ if self.backend_out_of_date(
+ mozpath.join(self.topobjdir, "backend.TestManifestBackend")
+ ):
+ print("Test configuration changed. Regenerating backend.")
+ from mozbuild.gen_test_backend import gen_test_backend
+
+ gen_test_backend()
+
+ all_tests = os.path.join(self.topobjdir, "all-tests.pkl")
+ test_defaults = os.path.join(self.topobjdir, "test-defaults.pkl")
+
+ with open(all_tests, "rb") as fh:
+ test_data = pickle.load(fh)
+
+ with open(test_defaults, "rb") as fh:
+ defaults = pickle.load(fh)
+
+ # The keys in defaults use platform-specific path separators.
+ # self.topsrcdir was normalized to use /, revert back to \ if needed.
+ topsrcdir = os.path.normpath(self.topsrcdir)
+
+ for path, tests in six.iteritems(test_data):
+ for metadata in tests:
+ defaults_manifests = [metadata["manifest"]]
+
+ ancestor_manifest = metadata.get("ancestor_manifest")
+ if ancestor_manifest:
+ # The (ancestor manifest, included manifest) tuple
+ # contains the defaults of the included manifest, so
+ # use it instead of [metadata['manifest']].
+ ancestor_manifest = os.path.join(topsrcdir, ancestor_manifest)
+ defaults_manifests[0] = (ancestor_manifest, metadata["manifest"])
+ defaults_manifests.append(ancestor_manifest)
+
+ for manifest in defaults_manifests:
+ manifest_defaults = defaults.get(manifest)
+ if manifest_defaults:
+ metadata = combine_fields(manifest_defaults, metadata)
+
+ yield metadata
+
+
+class TestManifestLoader(TestLoader):
+ def __init__(self, *args, **kwargs):
+ super(TestManifestLoader, self).__init__(*args, **kwargs)
+ self.finder = FileFinder(self.topsrcdir)
+ self.reader = self.mozbuild_reader(config_mode="empty")
+ self.variables = {
+ "{}_MANIFESTS".format(k): v[0] for k, v in six.iteritems(TEST_MANIFESTS)
+ }
+ self.variables.update(
+ {"{}_MANIFESTS".format(f.upper()): f for f in REFTEST_FLAVORS}
+ )
+
+ def _load_manifestparser_manifest(self, mpath):
+ mp = TestManifest(
+ manifests=[mpath],
+ strict=True,
+ rootdir=self.topsrcdir,
+ finder=self.finder,
+ handle_defaults=True,
+ )
+ return (test for test in mp.tests)
+
+ def _load_reftest_manifest(self, mpath):
+ import reftest
+
+ manifest = reftest.ReftestManifest(finder=self.finder)
+ manifest.load(mpath)
+
+ for test in sorted(manifest.tests, key=lambda x: x.get("path")):
+ test["manifest_relpath"] = test["manifest"][len(self.topsrcdir) + 1 :]
+ yield test
+
+ def __call__(self):
+ for path, name, key, value in self.reader.find_variables_from_ast(
+ self.variables
+ ):
+ mpath = os.path.join(self.topsrcdir, os.path.dirname(path), value)
+ flavor = self.variables[name]
+
+ if name.rsplit("_", 1)[0].lower() in REFTEST_FLAVORS:
+ tests = self._load_reftest_manifest(mpath)
+ else:
+ tests = self._load_manifestparser_manifest(mpath)
+
+ for test in tests:
+ path = mozpath.normpath(test["path"])
+ assert mozpath.basedir(path, [self.topsrcdir])
+ relpath = path[len(self.topsrcdir) + 1 :]
+
+ # Add these keys for compatibility with the build backend loader.
+ test["flavor"] = flavor
+ test["file_relpath"] = relpath
+ test["srcdir_relpath"] = relpath
+ test["dir_relpath"] = mozpath.dirname(relpath)
+
+ yield test
+
+
+class TestResolver(MozbuildObject):
+ """Helper to resolve tests from the current environment to test files."""
+
+ test_rewrites = {
+ "a11y": "_tests/testing/mochitest/a11y",
+ "browser-chrome": "_tests/testing/mochitest/browser",
+ "chrome": "_tests/testing/mochitest/chrome",
+ "mochitest": "_tests/testing/mochitest/tests",
+ "xpcshell": "_tests/xpcshell",
+ }
+
+ def __init__(self, *args, **kwargs):
+ loader_cls = kwargs.pop("loader_cls", BuildBackendLoader)
+ super(TestResolver, self).__init__(*args, **kwargs)
+
+ self.load_tests = self._spawn(loader_cls)
+ self._tests = []
+ self._reset_state()
+
+ # These suites aren't registered in moz.build so require special handling.
+ self._puppeteer_loaded = False
+ self._tests_loaded = False
+ self._wpt_loaded = False
+
+ def _reset_state(self):
+ self._tests_by_path = OrderedDefaultDict(list)
+ self._tests_by_flavor = defaultdict(set)
+ self._tests_by_manifest = defaultdict(list)
+ self._test_dirs = set()
+
+ @property
+ def tests(self):
+ if not self._tests_loaded:
+ self._reset_state()
+ for test in self.load_tests():
+ self._tests.append(test)
+ self._tests_loaded = True
+ return self._tests
+
+ @property
+ def tests_by_path(self):
+ if not self._tests_by_path:
+ for test in self.tests:
+ self._tests_by_path[test["file_relpath"]].append(test)
+ return self._tests_by_path
+
+ @property
+ def tests_by_flavor(self):
+ if not self._tests_by_flavor:
+ for test in self.tests:
+ self._tests_by_flavor[test["flavor"]].add(test["file_relpath"])
+ return self._tests_by_flavor
+
+ @property
+ def tests_by_manifest(self):
+ if not self._tests_by_manifest:
+ for test in self.tests:
+ if test["flavor"] == "web-platform-tests":
+ # Use test ids instead of paths for WPT.
+ self._tests_by_manifest[test["manifest"]].append(test["name"])
+ else:
+ relpath = mozpath.relpath(
+ test["path"], mozpath.dirname(test["manifest"])
+ )
+ self._tests_by_manifest[test["manifest_relpath"]].append(relpath)
+ return self._tests_by_manifest
+
+ @property
+ def test_dirs(self):
+ if not self._test_dirs:
+ for test in self.tests:
+ self._test_dirs.add(test["dir_relpath"])
+ return self._test_dirs
+
+ def _resolve(
+ self, paths=None, flavor="", subsuite=None, under_path=None, tags=None
+ ):
+ """Given parameters, resolve them to produce an appropriate list of tests.
+
+ Args:
+ paths (list):
+ By default, set to None. If provided as a list of paths, then
+ this method will attempt to load the appropriate set of tests
+ that live in this path.
+
+ flavor (string):
+ By default, an empty string. If provided as a string, then this
+ method will attempt to load tests that belong to this flavor.
+ Additional filtering also takes the flavor into consideration.
+
+ subsuite (string):
+ By default, set to None. If provided as a string, then this value
+ is used to perform filtering of a candidate set of tests.
+ """
+ if tags:
+ tags = set(tags)
+
+ def fltr(tests):
+ """Filters tests based on several criteria.
+
+ Args:
+ tests (list):
+ List of tests that belong to the same candidate path.
+
+ Returns:
+ test (dict):
+ If the test survived the filtering process, it is returned
+ as a valid test.
+ """
+ for test in tests:
+ if flavor:
+ if flavor == "devtools" and test.get("flavor") != "browser-chrome":
+ continue
+ if flavor != "devtools" and test.get("flavor") != flavor:
+ continue
+
+ if subsuite and test.get("subsuite", "undefined") != subsuite:
+ continue
+
+ if tags and not (tags & set(test.get("tags", "").split())):
+ continue
+
+ if under_path and not test["file_relpath"].startswith(under_path):
+ continue
+
+ # Make a copy so modifications don't change the source.
+ yield dict(test)
+
+ paths = paths or []
+ paths = [mozpath.normpath(p) for p in paths]
+ if not paths:
+ paths = [None]
+
+ if flavor in ("", "puppeteer", None) and (
+ any(self.is_puppeteer_path(p) for p in paths) or paths == [None]
+ ):
+ self.add_puppeteer_manifest_data()
+
+ if flavor in ("", "web-platform-tests", None) and (
+ any(self.is_wpt_path(p) for p in paths) or paths == [None]
+ ):
+ self.add_wpt_manifest_data()
+
+ candidate_paths = set()
+
+ for path in sorted(paths):
+ if path is None:
+ candidate_paths |= set(self.tests_by_path.keys())
+ continue
+
+ if "*" in path:
+ candidate_paths |= {
+ p for p in self.tests_by_path if mozpath.match(p, path)
+ }
+ continue
+
+ # If the path is a directory, or the path is a prefix of a directory
+ # containing tests, pull in all tests in that directory.
+ if path in self.test_dirs or any(
+ p.startswith(path) for p in self.tests_by_path
+ ):
+ candidate_paths |= {p for p in self.tests_by_path if p.startswith(path)}
+ continue
+
+ # If the path is a manifest, add all tests defined in that manifest.
+ if any(path.endswith(e) for e in (".ini", ".list")):
+ key = "manifest" if os.path.isabs(path) else "manifest_relpath"
+ candidate_paths |= {
+ t["file_relpath"]
+ for t in self.tests
+ if mozpath.normpath(t[key]) == path
+ }
+ continue
+
+ # If it's a test file, add just that file.
+ candidate_paths |= {p for p in self.tests_by_path if path in p}
+
+ for p in sorted(candidate_paths):
+ tests = self.tests_by_path[p]
+ for test in fltr(tests):
+ yield test
+
+ def is_puppeteer_path(self, path):
+ if path is None:
+ return True
+ return mozpath.match(path, "remote/test/puppeteer/test/**")
+
+ def add_puppeteer_manifest_data(self):
+ if self._puppeteer_loaded:
+ return
+
+ self._reset_state()
+
+ test_path = os.path.join(self.topsrcdir, "remote", "test", "puppeteer", "test")
+ for root, dirs, paths in os.walk(test_path):
+ for filename in fnmatch.filter(paths, "*.spec.js"):
+ path = os.path.join(root, filename)
+ self._tests.append(
+ {
+ "path": os.path.abspath(path),
+ "flavor": "puppeteer",
+ "here": os.path.dirname(path),
+ "manifest": None,
+ "name": path,
+ "file_relpath": path,
+ "head": "",
+ "support-files": "",
+ "subsuite": "puppeteer",
+ "dir_relpath": os.path.dirname(path),
+ "srcdir_relpath": path,
+ }
+ )
+
+ self._puppeteer_loaded = True
+
+ def is_wpt_path(self, path):
+ """Checks if path forms part of the known web-platform-test paths.
+
+ Args:
+ path (str or None):
+ Path to check against the list of known web-platform-test paths.
+
+ Returns:
+ Boolean value. True if path is part of web-platform-tests path, or
+ path is None. False otherwise.
+ """
+ if path is None:
+ return True
+ if mozpath.match(path, "testing/web-platform/tests/**"):
+ return True
+ if mozpath.match(path, "testing/web-platform/mozilla/tests/**"):
+ return True
+ return False
+
+ def get_wpt_group(self, test, depth=3):
+ """Given a test object set the group (aka manifest) that it belongs to.
+
+ If a custom value for `depth` is provided, it will override the default
+ value of 3 path components.
+
+ Args:
+ test (dict): Test object for the particular suite and subsuite.
+ depth (int, optional): Custom number of path elements.
+
+ Returns:
+ str: The group the given test belongs to.
+ """
+ # This takes into account that for mozilla-specific WPT tests, the path
+ # contains an extra '/_mozilla' prefix that must be accounted for.
+ if test["name"].startswith("/_mozilla"):
+ depth = depth + 1
+
+ # Webdriver tests are nested in "classic" and "bidi" folders. Increase
+ # the depth to avoid grouping all classic or bidi tests in one chunk.
+ if test["name"].startswith(("/webdriver", "/_mozilla/webdriver")):
+ depth = depth + 1
+
+ group = os.path.dirname(test["name"])
+ while group.count("/") > depth:
+ group = os.path.dirname(group)
+ return group
+
+ def add_wpt_manifest_data(self):
+ """Adds manifest data for web-platform-tests into the list of available tests.
+
+ Upon invocation, this method will download from firefox-ci the most recent
+ version of the web-platform-tests manifests.
+
+ Once manifest is downloaded, this method will add details about each test
+ into the list of available tests.
+ """
+ if self._wpt_loaded:
+ return
+
+ self._reset_state()
+
+ wpt_path = os.path.join(self.topsrcdir, "testing", "web-platform")
+ sys.path = [wpt_path] + sys.path
+
+ import logging
+
+ import manifestupdate
+
+ logger = logging.getLogger("manifestupdate")
+ logger.disabled = True
+
+ manifests = manifestupdate.run(
+ self.topsrcdir,
+ self.topobjdir,
+ rebuild=False,
+ download=True,
+ config_path=None,
+ rewrite_config=True,
+ update=True,
+ logger=logger,
+ )
+ if not manifests:
+ print("Loading wpt manifest failed")
+ return
+
+ for manifest, data in six.iteritems(manifests):
+ tests_root = data[
+ "tests_path"
+ ] # full path on disk until web-platform tests directory
+
+ for test_type, path, tests in manifest:
+ full_path = mozpath.join(tests_root, path)
+ src_path = mozpath.relpath(full_path, self.topsrcdir)
+ if test_type not in WPT_TYPES:
+ continue
+
+ full_path = mozpath.join(tests_root, path) # absolute path on disk
+ src_path = mozpath.relpath(full_path, self.topsrcdir)
+
+ for test in tests:
+ testobj = {
+ "head": "",
+ "support-files": "",
+ "path": full_path,
+ "flavor": "web-platform-tests",
+ "subsuite": test_type,
+ "here": mozpath.dirname(path),
+ "name": test.id,
+ "file_relpath": src_path,
+ "srcdir_relpath": src_path,
+ "dir_relpath": mozpath.dirname(src_path),
+ }
+ group = self.get_wpt_group(testobj)
+ testobj["manifest"] = group
+
+ test_root = "tests"
+ if group.startswith("/_mozilla"):
+ test_root = os.path.join("mozilla", "tests")
+ group = group[len("/_mozilla") :]
+
+ group = group.lstrip("/")
+ testobj["manifest_relpath"] = os.path.join(
+ wpt_path, test_root, group
+ )
+ self._tests.append(testobj)
+
+ self._wpt_loaded = True
+
+ def resolve_tests(self, cwd=None, **kwargs):
+ """Resolve tests from an identifier.
+
+ This is a generator of dicts describing each test. All arguments are
+ optional.
+
+ Paths in returned tests are automatically translated to the paths in
+ the _tests directory under the object directory.
+
+ Args:
+ cwd (str):
+ If specified, we will limit our results to tests under this
+ directory. The directory should be defined as an absolute path
+ under topsrcdir or topobjdir.
+
+ paths (list):
+ An iterable of values to use to identify tests to run. If an
+ entry is a known test file, tests associated with that file are
+ returned (there may be multiple configurations for a single
+ file). If an entry is a directory, or a prefix of a directory
+ containing tests, all tests in that directory are returned. If
+ the string appears in a known test file, that test file is
+ considered. If the path contains a wildcard pattern, tests
+ matching that pattern are returned.
+
+ under_path (str):
+ If specified, will be used to filter out tests that aren't in
+ the specified path prefix relative to topsrcdir or the test's
+ installed dir.
+
+ flavor (str):
+ If specified, will be used to filter returned tests to only be
+ the flavor specified. A flavor is something like ``xpcshell``.
+
+ subsuite (str):
+ If specified will be used to filter returned tests to only be
+ in the subsuite specified. To filter only tests that *don't*
+ have any subsuite, pass the string 'undefined'.
+
+ tags (list):
+ If specified, will be used to filter out tests that don't contain
+ a matching tag.
+ """
+ if cwd:
+ norm_cwd = mozpath.normpath(cwd)
+ norm_srcdir = mozpath.normpath(self.topsrcdir)
+ norm_objdir = mozpath.normpath(self.topobjdir)
+
+ reldir = None
+
+ if norm_cwd.startswith(norm_objdir):
+ reldir = norm_cwd[len(norm_objdir) + 1 :]
+ elif norm_cwd.startswith(norm_srcdir):
+ reldir = norm_cwd[len(norm_srcdir) + 1 :]
+
+ kwargs["under_path"] = reldir
+
+ rewrite_base = None
+ for test in self._resolve(**kwargs):
+ rewrite_base = self.test_rewrites.get(test["flavor"], None)
+
+ if rewrite_base:
+ rewrite_base = os.path.join(
+ self.topobjdir, os.path.normpath(rewrite_base)
+ )
+ yield rewrite_test_base(test, rewrite_base)
+ else:
+ yield test
+
+ def resolve_metadata(self, what):
+ """Resolve tests based on the given metadata. If not specified, metadata
+ from outgoing files will be used instead.
+ """
+ # Parse arguments and assemble a test "plan."
+ run_suites = set()
+ run_tests = []
+
+ for entry in what:
+ # If the path matches the name or alias of an entire suite, run
+ # the entire suite.
+ if entry in TEST_SUITES:
+ run_suites.add(entry)
+ continue
+ suitefound = False
+ for suite, v in six.iteritems(TEST_SUITES):
+ if entry.lower() in v.get("aliases", []):
+ run_suites.add(suite)
+ suitefound = True
+ if suitefound:
+ continue
+
+ # Now look for file/directory matches in the TestResolver.
+ relpath = self._wrap_path_argument(entry).relpath()
+ tests = list(self.resolve_tests(paths=[relpath]))
+ run_tests.extend(tests)
+
+ if not tests:
+ print("UNKNOWN TEST: %s" % entry, file=sys.stderr)
+
+ return run_suites, run_tests
diff --git a/testing/mozbase/moztest/moztest/results.py b/testing/mozbase/moztest/moztest/results.py
new file mode 100644
index 0000000000..5193a9db2b
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/results.py
@@ -0,0 +1,366 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import time
+
+import mozinfo
+import six
+
+
+class TestContext(object):
+ """Stores context data about the test"""
+
+ attrs = [
+ "hostname",
+ "arch",
+ "env",
+ "os",
+ "os_version",
+ "tree",
+ "revision",
+ "product",
+ "logfile",
+ "testgroup",
+ "harness",
+ "buildtype",
+ ]
+
+ def __init__(
+ self,
+ hostname="localhost",
+ tree="",
+ revision="",
+ product="",
+ logfile=None,
+ arch="",
+ operating_system="",
+ testgroup="",
+ harness="moztest",
+ buildtype="",
+ ):
+ self.hostname = hostname
+ self.arch = arch or mozinfo.processor
+ self.env = os.environ.copy()
+ self.os = operating_system or mozinfo.os
+ self.os_version = mozinfo.version
+ self.tree = tree
+ self.revision = revision
+ self.product = product
+ self.logfile = logfile
+ self.testgroup = testgroup
+ self.harness = harness
+ self.buildtype = buildtype
+
+ def __str__(self):
+ return "%s (%s, %s)" % (self.hostname, self.os, self.arch)
+
+ def __repr__(self):
+ return "<%s>" % self.__str__()
+
+ def __eq__(self, other):
+ if not isinstance(other, TestContext):
+ return False
+ diffs = [a for a in self.attrs if getattr(self, a) != getattr(other, a)]
+ return len(diffs) == 0
+
+ def __hash__(self):
+ def get(attr):
+ value = getattr(self, attr)
+ if isinstance(value, dict):
+ value = frozenset(six.iteritems(value))
+ return value
+
+ return hash(frozenset([get(a) for a in self.attrs]))
+
+
+class TestResult(object):
+ """Stores test result data"""
+
+ FAIL_RESULTS = [
+ "UNEXPECTED-PASS",
+ "UNEXPECTED-FAIL",
+ "ERROR",
+ ]
+ COMPUTED_RESULTS = FAIL_RESULTS + [
+ "PASS",
+ "KNOWN-FAIL",
+ "SKIPPED",
+ ]
+ POSSIBLE_RESULTS = [
+ "PASS",
+ "FAIL",
+ "SKIP",
+ "ERROR",
+ ]
+
+ def __init__(
+ self, name, test_class="", time_start=None, context=None, result_expected="PASS"
+ ):
+ """Create a TestResult instance.
+ name = name of the test that is running
+ test_class = the class that the test belongs to
+ time_start = timestamp (seconds since UNIX epoch) of when the test started
+ running; if not provided, defaults to the current time
+ ! Provide 0 if you only have the duration
+ context = TestContext instance; can be None
+ result_expected = string representing the expected outcome of the test"""
+
+ msg = "Result '%s' not in possible results: %s" % (
+ result_expected,
+ ", ".join(self.POSSIBLE_RESULTS),
+ )
+ assert isinstance(name, six.string_types), "name has to be a string"
+ assert result_expected in self.POSSIBLE_RESULTS, msg
+
+ self.name = name
+ self.test_class = test_class
+ self.context = context
+ self.time_start = time_start if time_start is not None else time.time()
+ self.time_end = None
+ self._result_expected = result_expected
+ self._result_actual = None
+ self.result = None
+ self.filename = None
+ self.description = None
+ self.output = []
+ self.reason = None
+
+ @property
+ def test_name(self):
+ return "%s.py %s.%s" % (
+ self.test_class.split(".")[0],
+ self.test_class,
+ self.name,
+ )
+
+ def __str__(self):
+ return "%s | %s (%s) | %s" % (
+ self.result or "PENDING",
+ self.name,
+ self.test_class,
+ self.reason,
+ )
+
+ def __repr__(self):
+ return "<%s>" % self.__str__()
+
+ def calculate_result(self, expected, actual):
+ if actual == "ERROR":
+ return "ERROR"
+ if actual == "SKIP":
+ return "SKIPPED"
+
+ if expected == "PASS":
+ if actual == "PASS":
+ return "PASS"
+ if actual == "FAIL":
+ return "UNEXPECTED-FAIL"
+
+ if expected == "FAIL":
+ if actual == "PASS":
+ return "UNEXPECTED-PASS"
+ if actual == "FAIL":
+ return "KNOWN-FAIL"
+
+ # if actual is skip or error, we return at the beginning, so if we get
+ # here it is definitely some kind of error
+ return "ERROR"
+
+ def infer_results(self, computed_result):
+ assert computed_result in self.COMPUTED_RESULTS
+ if computed_result == "UNEXPECTED-PASS":
+ expected = "FAIL"
+ actual = "PASS"
+ elif computed_result == "UNEXPECTED-FAIL":
+ expected = "PASS"
+ actual = "FAIL"
+ elif computed_result == "KNOWN-FAIL":
+ expected = actual = "FAIL"
+ elif computed_result == "SKIPPED":
+ expected = actual = "SKIP"
+ else:
+ return
+ self._result_expected = expected
+ self._result_actual = actual
+
+ def finish(self, result, time_end=None, output=None, reason=None):
+ """Marks the test as finished, storing its end time and status
+ ! Provide the duration as time_end if you only have that."""
+
+ if result in self.POSSIBLE_RESULTS:
+ self._result_actual = result
+ self.result = self.calculate_result(
+ self._result_expected, self._result_actual
+ )
+ elif result in self.COMPUTED_RESULTS:
+ self.infer_results(result)
+ self.result = result
+ else:
+ valid = self.POSSIBLE_RESULTS + self.COMPUTED_RESULTS
+ msg = "Result '%s' not valid. Need one of: %s" % (result, ", ".join(valid))
+ raise ValueError(msg)
+
+ # use lists instead of multiline strings
+ if isinstance(output, six.string_types):
+ output = output.splitlines()
+
+ self.time_end = time_end if time_end is not None else time.time()
+ self.output = output or self.output
+ self.reason = reason
+
+ @property
+ def finished(self):
+ """Boolean saying if the test is finished or not"""
+ return self.result is not None
+
+ @property
+ def duration(self):
+ """Returns the time it took for the test to finish. If the test is
+ not finished, returns the elapsed time so far"""
+ if self.result is not None:
+ return self.time_end - self.time_start
+ else:
+ # returns the elapsed time
+ return time.time() - self.time_start
+
+
+class TestResultCollection(list):
+ """Container class that stores test results"""
+
+ resultClass = TestResult
+
+ def __init__(self, suite_name, time_taken=0, resultClass=None):
+ list.__init__(self)
+ self.suite_name = suite_name
+ self.time_taken = time_taken
+ if resultClass is not None:
+ self.resultClass = resultClass
+
+ def __str__(self):
+ return "%s (%.2fs)\n%s" % (self.suite_name, self.time_taken, list.__str__(self))
+
+ def subset(self, predicate):
+ tests = self.filter(predicate)
+ duration = 0
+ sub = TestResultCollection(self.suite_name)
+ for t in tests:
+ sub.append(t)
+ duration += t.duration
+ sub.time_taken = duration
+ return sub
+
+ @property
+ def contexts(self):
+ """List of unique contexts for the test results contained"""
+ cs = [tr.context for tr in self]
+ return list(set(cs))
+
+ def filter(self, predicate):
+ """Returns a generator of TestResults that satisfy a given predicate"""
+ return (tr for tr in self if predicate(tr))
+
+ def tests_with_result(self, result):
+ """Returns a generator of TestResults with the given result"""
+ msg = "Result '%s' not in possible results: %s" % (
+ result,
+ ", ".join(self.resultClass.COMPUTED_RESULTS),
+ )
+ assert result in self.resultClass.COMPUTED_RESULTS, msg
+ return self.filter(lambda t: t.result == result)
+
+ @property
+ def tests(self):
+ """Generator of all tests in the collection"""
+ return (t for t in self)
+
+ def add_result(
+ self,
+ test,
+ result_expected="PASS",
+ result_actual="PASS",
+ output="",
+ context=None,
+ ):
+ def get_class(test):
+ return test.__class__.__module__ + "." + test.__class__.__name__
+
+ t = self.resultClass(
+ name=str(test).split()[0],
+ test_class=get_class(test),
+ time_start=0,
+ result_expected=result_expected,
+ context=context,
+ )
+ t.finish(result_actual, time_end=0, reason=relevant_line(output), output=output)
+ self.append(t)
+
+ @property
+ def num_failures(self):
+ fails = 0
+ for t in self:
+ if t.result in self.resultClass.FAIL_RESULTS:
+ fails += 1
+ return fails
+
+ def add_unittest_result(self, result, context=None):
+ """Adds the python unittest result provided to the collection"""
+ if hasattr(result, "time_taken"):
+ self.time_taken += result.time_taken
+
+ for test, output in result.errors:
+ self.add_result(test, result_actual="ERROR", output=output)
+
+ for test, output in result.failures:
+ self.add_result(test, result_actual="FAIL", output=output)
+
+ if hasattr(result, "unexpectedSuccesses"):
+ for test in result.unexpectedSuccesses:
+ self.add_result(test, result_expected="FAIL", result_actual="PASS")
+
+ if hasattr(result, "skipped"):
+ for test, output in result.skipped:
+ self.add_result(
+ test, result_expected="SKIP", result_actual="SKIP", output=output
+ )
+
+ if hasattr(result, "expectedFailures"):
+ for test, output in result.expectedFailures:
+ self.add_result(
+ test, result_expected="FAIL", result_actual="FAIL", output=output
+ )
+
+ # unittest does not store these by default
+ if hasattr(result, "tests_passed"):
+ for test in result.tests_passed:
+ self.add_result(test)
+
+ @classmethod
+ def from_unittest_results(cls, context, *results):
+ """Creates a TestResultCollection containing the given python
+ unittest results"""
+
+ if not results:
+ return cls("from unittest")
+
+ # all the TestResult instances share the same context
+ context = context or TestContext()
+
+ collection = cls("from %s" % results[0].__class__.__name__)
+
+ for result in results:
+ collection.add_unittest_result(result, context)
+
+ return collection
+
+
+# used to get exceptions/errors from tracebacks
+def relevant_line(s):
+ KEYWORDS = ("Error:", "Exception:", "error:", "exception:")
+ lines = s.splitlines()
+ for line in lines:
+ for keyword in KEYWORDS:
+ if keyword in line:
+ return line
+ return "N/A"
diff --git a/testing/mozbase/moztest/moztest/selftest/__init__.py b/testing/mozbase/moztest/moztest/selftest/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/selftest/__init__.py
diff --git a/testing/mozbase/moztest/moztest/selftest/fixtures.py b/testing/mozbase/moztest/moztest/selftest/fixtures.py
new file mode 100644
index 0000000000..5d21e7aa63
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/selftest/fixtures.py
@@ -0,0 +1,116 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""Pytest fixtures to help set up Firefox and a tests archive
+in test harness selftests.
+"""
+
+import os
+import shutil
+import sys
+
+import mozinstall
+import pytest
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+try:
+ from mozbuild.base import MozbuildObject
+
+ build = MozbuildObject.from_environment(cwd=here)
+except ImportError:
+ build = None
+
+
+HARNESS_ROOT_NOT_FOUND = """
+Could not find test harness root. Either a build or the 'GECKO_INSTALLER_URL'
+environment variable is required.
+""".lstrip()
+
+
+def _get_test_harness(suite, install_dir, flavor="plain"):
+ # Check if there is a local build
+ if build:
+ harness_root = os.path.join(build.topobjdir, "_tests", install_dir)
+ if os.path.isdir(harness_root):
+ return harness_root
+
+ if "TEST_HARNESS_ROOT" in os.environ:
+ harness_root = os.path.join(os.environ["TEST_HARNESS_ROOT"], suite)
+ if os.path.isdir(harness_root):
+ return harness_root
+
+ # Couldn't find a harness root, let caller do error handling.
+ return None
+
+
+@pytest.fixture(scope="session")
+def setup_test_harness(request, flavor="plain"):
+ """Fixture for setting up a mozharness-based test harness like
+ mochitest or reftest"""
+
+ def inner(files_dir, *args, **kwargs):
+ harness_root = _get_test_harness(*args, **kwargs)
+ test_root = None
+ if harness_root:
+ sys.path.insert(0, harness_root)
+
+ # Link the test files to the test package so updates are automatically
+ # picked up. Fallback to copy on Windows.
+ if files_dir:
+ test_root = os.path.join(harness_root, "tests", "selftests")
+ if kwargs.get("flavor") == "browser-chrome":
+ test_root = os.path.join(
+ harness_root, "browser", "tests", "selftests"
+ )
+ if not os.path.exists(test_root):
+ if os.path.lexists(test_root):
+ os.remove(test_root)
+
+ if hasattr(os, "symlink"):
+ if not os.path.isdir(os.path.dirname(test_root)):
+ os.makedirs(os.path.dirname(test_root))
+ try:
+ os.symlink(files_dir, test_root)
+ except FileExistsError:
+ # another pytest job set up the symlink - no problem
+ pass
+ else:
+ shutil.copytree(files_dir, test_root)
+ elif "TEST_HARNESS_ROOT" in os.environ:
+ # The mochitest tests will run regardless of whether a build exists or not.
+ # In a local environment, they should simply be skipped if setup fails. But
+ # in automation, we'll need to make sure an error is propagated up.
+ pytest.fail(HARNESS_ROOT_NOT_FOUND)
+ else:
+ # Tests will be marked skipped by the calls to pytest.importorskip() below.
+ # We are purposefully not failing here because running |mach python-test|
+ # without a build is a perfectly valid use case.
+ pass
+ return test_root
+
+ return inner
+
+
+def binary():
+ """Return a Firefox binary"""
+ try:
+ return build.get_binary_path()
+ except Exception:
+ pass
+
+ app = "firefox"
+ bindir = os.path.join(os.environ["PYTHON_TEST_TMP"], app)
+ if os.path.isdir(bindir):
+ try:
+ return mozinstall.get_binary(bindir, app_name=app)
+ except Exception:
+ pass
+
+ if "GECKO_BINARY_PATH" in os.environ:
+ return os.environ["GECKO_BINARY_PATH"]
+
+
+@pytest.fixture(name="binary", scope="session")
+def binary_fixture():
+ return binary()
diff --git a/testing/mozbase/moztest/moztest/selftest/output.py b/testing/mozbase/moztest/moztest/selftest/output.py
new file mode 100644
index 0000000000..cdc6600f41
--- /dev/null
+++ b/testing/mozbase/moztest/moztest/selftest/output.py
@@ -0,0 +1,52 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Methods for testing interactions with mozharness."""
+
+import json
+import os
+import sys
+
+from mozbuild.base import MozbuildObject
+from six import string_types
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+
+sys.path.insert(0, os.path.join(build.topsrcdir, "testing", "mozharness"))
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import INFO
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.errors import HarnessErrorList
+
+
+def get_mozharness_status(suite, lines, status, formatter=None, buf=None):
+ """Given list of log lines, determine what the mozharness status would be."""
+ parser = StructuredOutputParser(
+ config={"log_level": INFO},
+ error_list=BaseErrorList + HarnessErrorList,
+ strict=False,
+ suite_category=suite,
+ )
+
+ if formatter:
+ parser.formatter = formatter
+
+ # Processing the log with mozharness will re-print all the output to stdout
+ # Since this exact same output has already been printed by the actual test
+ # run, temporarily redirect stdout to devnull.
+ buf = buf or open(os.devnull, "w")
+ orig = sys.stdout
+ sys.stdout = buf
+ for line in lines:
+ parser.parse_single_line(json.dumps(line))
+ sys.stdout = orig
+ return parser.evaluate_parser(status)
+
+
+def filter_action(actions, lines):
+ if isinstance(actions, string_types):
+ actions = (actions,)
+ # pylint --py3k: W1639
+ return list(filter(lambda x: x["action"] in actions, lines))
diff --git a/testing/mozbase/moztest/setup.py b/testing/mozbase/moztest/setup.py
new file mode 100644
index 0000000000..f6749128d7
--- /dev/null
+++ b/testing/mozbase/moztest/setup.py
@@ -0,0 +1,33 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from setuptools import find_packages, setup
+
+PACKAGE_VERSION = "1.1.0"
+
+# dependencies
+deps = ["mozinfo"]
+
+setup(
+ name="moztest",
+ version=PACKAGE_VERSION,
+ description="Package for storing and outputting Mozilla test results",
+ long_description="see https://firefox-source-docs.mozilla.org/mozbase/index.html",
+ classifiers=[
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.5",
+ "Development Status :: 5 - Production/Stable",
+ ],
+ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ keywords="mozilla",
+ author="Mozilla Automation and Tools team",
+ author_email="tools@lists.mozilla.org",
+ url="https://wiki.mozilla.org/Auto-tools/Projects/Mozbase",
+ license="MPL",
+ packages=find_packages(),
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=deps,
+)
diff --git a/testing/mozbase/moztest/tests/data/srcdir/apple/a11y.ini b/testing/mozbase/moztest/tests/data/srcdir/apple/a11y.ini
new file mode 100644
index 0000000000..a4411233f1
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/apple/a11y.ini
@@ -0,0 +1 @@
+[test_a11y.html]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/apple/moz.build b/testing/mozbase/moztest/tests/data/srcdir/apple/moz.build
new file mode 100644
index 0000000000..8b13149112
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/apple/moz.build
@@ -0,0 +1 @@
+A11Y_MANIFESTS += ["a11y.ini"]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/banana/moz.build b/testing/mozbase/moztest/tests/data/srcdir/banana/moz.build
new file mode 100644
index 0000000000..110a4ce91e
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/banana/moz.build
@@ -0,0 +1 @@
+XPCSHELL_TESTS_MANIFESTS += ["xpcshell.ini"]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/banana/xpcshell.ini b/testing/mozbase/moztest/tests/data/srcdir/banana/xpcshell.ini
new file mode 100644
index 0000000000..bef13e69bb
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/banana/xpcshell.ini
@@ -0,0 +1,2 @@
+[currant/test_xpcshell_A.js]
+[currant/test_xpcshell_B.js]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/carrot/moz.build b/testing/mozbase/moztest/tests/data/srcdir/carrot/moz.build
new file mode 100644
index 0000000000..64099c2457
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/carrot/moz.build
@@ -0,0 +1 @@
+XPCSHELL_TESTS_MANIFESTS += ["xpcshell-one.ini", "xpcshell-two.ini"]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-one.ini b/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-one.ini
new file mode 100644
index 0000000000..947cdec88a
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-one.ini
@@ -0,0 +1,5 @@
+[DEFAULT]
+head = head_one.js
+
+[include:xpcshell-shared.ini]
+stick = one
diff --git a/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-shared.ini b/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-shared.ini
new file mode 100644
index 0000000000..580cdfcc32
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-shared.ini
@@ -0,0 +1,4 @@
+# This is not in moz.build, so it is not referenced on its own.
+# It is however included by xpcshell-one.ini and xpcshell-two.ini.
+
+[test_included.js]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-two.ini b/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-two.ini
new file mode 100644
index 0000000000..d619f2ef6e
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/carrot/xpcshell-two.ini
@@ -0,0 +1,5 @@
+[DEFAULT]
+head = head_two.js
+
+[include:xpcshell-shared.ini]
+stick = two
diff --git a/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/elderberry/xpcshell_updater.ini b/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/elderberry/xpcshell_updater.ini
new file mode 100644
index 0000000000..6d48338528
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/elderberry/xpcshell_updater.ini
@@ -0,0 +1,7 @@
+[DEFAULT]
+support-files =
+ data/**
+ xpcshell_updater.ini
+
+[test_xpcshell_C.js]
+head=head_updates.js head2.js
diff --git a/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/moz.build b/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/moz.build
new file mode 100644
index 0000000000..110a4ce91e
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/moz.build
@@ -0,0 +1 @@
+XPCSHELL_TESTS_MANIFESTS += ["xpcshell.ini"]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/xpcshell.ini b/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/xpcshell.ini
new file mode 100644
index 0000000000..e9c43a4f7e
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/dragonfruit/xpcshell.ini
@@ -0,0 +1,4 @@
+[include:elderberry/xpcshell_updater.ini]
+
+[elderberry/test_xpcshell_C.js]
+head=head_update.js
diff --git a/testing/mozbase/moztest/tests/data/srcdir/fig/grape/instrumentation.ini b/testing/mozbase/moztest/tests/data/srcdir/fig/grape/instrumentation.ini
new file mode 100644
index 0000000000..ac567351e0
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/fig/grape/instrumentation.ini
@@ -0,0 +1,2 @@
+[src/TestInstrumentationA.java]
+subsuite=background
diff --git a/testing/mozbase/moztest/tests/data/srcdir/fig/huckleberry/instrumentation.ini b/testing/mozbase/moztest/tests/data/srcdir/fig/huckleberry/instrumentation.ini
new file mode 100644
index 0000000000..043752dd89
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/fig/huckleberry/instrumentation.ini
@@ -0,0 +1,2 @@
+[src/TestInstrumentationB.java]
+subsuite=browser
diff --git a/testing/mozbase/moztest/tests/data/srcdir/fig/moz.build b/testing/mozbase/moztest/tests/data/srcdir/fig/moz.build
new file mode 100644
index 0000000000..34abb7dd8c
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/fig/moz.build
@@ -0,0 +1,4 @@
+ANDROID_INSTRUMENTATION_MANIFESTS += [
+ "grape/instrumentation.ini",
+ "huckleberry/instrumentation.ini",
+]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/juniper/browser.ini b/testing/mozbase/moztest/tests/data/srcdir/juniper/browser.ini
new file mode 100644
index 0000000000..be54fdb225
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/juniper/browser.ini
@@ -0,0 +1 @@
+[browser_chrome.js]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/kiwi/browser.ini b/testing/mozbase/moztest/tests/data/srcdir/kiwi/browser.ini
new file mode 100644
index 0000000000..f2225b828a
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/kiwi/browser.ini
@@ -0,0 +1,3 @@
+[browser_devtools.js]
+subsuite=devtools
+tags=devtools
diff --git a/testing/mozbase/moztest/tests/data/srcdir/moz.build b/testing/mozbase/moztest/tests/data/srcdir/moz.build
new file mode 100644
index 0000000000..2e97b2901c
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/moz.build
@@ -0,0 +1,4 @@
+BROWSER_CHROME_MANIFESTS += [
+ "juniper/browser.ini",
+ "kiwi/browser.ini",
+]
diff --git a/testing/mozbase/moztest/tests/data/srcdir/wpt_manifest_data.json b/testing/mozbase/moztest/tests/data/srcdir/wpt_manifest_data.json
new file mode 100644
index 0000000000..9067b0fad7
--- /dev/null
+++ b/testing/mozbase/moztest/tests/data/srcdir/wpt_manifest_data.json
@@ -0,0 +1,8 @@
+{
+ "loganberry/web-platform/tests": {
+ "testharness": ["html/test_wpt.html"]
+ },
+ "loganberry/web-platform/mozilla/tests": {
+ "testharness": ["html/test_wpt.html"]
+ }
+}
diff --git a/testing/mozbase/moztest/tests/manifest.ini b/testing/mozbase/moztest/tests/manifest.ini
new file mode 100644
index 0000000000..292bb99adb
--- /dev/null
+++ b/testing/mozbase/moztest/tests/manifest.ini
@@ -0,0 +1,6 @@
+[DEFAULT]
+subsuite = mozbase
+
+[test.py]
+[test_resolve.py]
+
diff --git a/testing/mozbase/moztest/tests/test.py b/testing/mozbase/moztest/tests/test.py
new file mode 100644
index 0000000000..b2c2e7ff18
--- /dev/null
+++ b/testing/mozbase/moztest/tests/test.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import math
+import time
+
+import mozunit
+import pytest
+from moztest.results import TestContext, TestResult, TestResultCollection
+
+
+def test_results():
+ with pytest.raises(AssertionError):
+ TestResult("test", result_expected="hello")
+ t = TestResult("test")
+ with pytest.raises(ValueError):
+ t.finish(result="good bye")
+
+
+def test_time():
+ now = time.time()
+ t = TestResult("test")
+ time.sleep(1)
+ t.finish("PASS")
+ duration = time.time() - now
+ assert math.fabs(duration - t.duration) < 1
+
+
+def test_custom_time():
+ t = TestResult("test", time_start=0)
+ t.finish(result="PASS", time_end=1000)
+ assert t.duration == 1000
+
+
+def test_unique_contexts():
+ c1 = TestContext("host1")
+ c2 = TestContext("host2")
+ c3 = TestContext("host2")
+ c4 = TestContext("host1")
+
+ t1 = TestResult("t1", context=c1)
+ t2 = TestResult("t2", context=c2)
+ t3 = TestResult("t3", context=c3)
+ t4 = TestResult("t4", context=c4)
+
+ collection = TestResultCollection("tests")
+ collection.extend([t1, t2, t3, t4])
+
+ assert len(collection.contexts) == 2
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mozbase/moztest/tests/test_resolve.py b/testing/mozbase/moztest/tests/test_resolve.py
new file mode 100644
index 0000000000..acf7cbc6fa
--- /dev/null
+++ b/testing/mozbase/moztest/tests/test_resolve.py
@@ -0,0 +1,577 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+# flake8: noqa: E501
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import json
+import os
+import re
+import shutil
+import tempfile
+from collections import defaultdict
+
+import manifestupdate
+import mozpack.path as mozpath
+import mozunit
+import pytest
+from mozbuild.base import MozbuildObject
+from mozbuild.frontend.reader import BuildReader
+from mozbuild.test.common import MockConfig
+from mozfile import NamedTemporaryFile
+from moztest.resolve import (
+ TEST_SUITES,
+ BuildBackendLoader,
+ TestManifestLoader,
+ TestResolver,
+)
+
+here = os.path.abspath(os.path.dirname(__file__))
+data_path = os.path.join(here, "data")
+
+
+@pytest.fixture(scope="module")
+def topsrcdir():
+ return mozpath.join(data_path, "srcdir")
+
+
+@pytest.fixture(scope="module")
+def create_tests(topsrcdir):
+ def inner(*paths, **defaults):
+ tests = defaultdict(list)
+ for path in paths:
+ if isinstance(path, tuple):
+ path, kwargs = path
+ else:
+ kwargs = {}
+
+ path = mozpath.normpath(path)
+ manifest_name = kwargs.get("flavor", defaults.get("flavor", "manifest"))
+ manifest = kwargs.pop(
+ "manifest",
+ defaults.pop(
+ "manifest",
+ mozpath.join(mozpath.dirname(path), manifest_name + ".ini"),
+ ),
+ )
+
+ manifest_abspath = mozpath.join(topsrcdir, manifest)
+ relpath = mozpath.relpath(path, mozpath.dirname(manifest))
+ test = {
+ "name": relpath,
+ "path": mozpath.join(topsrcdir, path),
+ "relpath": relpath,
+ "file_relpath": path,
+ "flavor": "faketest",
+ "dir_relpath": mozpath.dirname(path),
+ "here": mozpath.dirname(manifest_abspath),
+ "manifest": manifest_abspath,
+ "manifest_relpath": manifest,
+ }
+ test.update(**defaults)
+ test.update(**kwargs)
+
+ # Normalize paths to ensure that the fixture matches reality.
+ for k in [
+ "ancestor_manifest",
+ "manifest",
+ "manifest_relpath",
+ "path",
+ "relpath",
+ ]:
+ p = test.get(k)
+ if p:
+ test[k] = p.replace("/", os.path.sep)
+
+ tests[path].append(test)
+
+ # dump tests to stdout for easier debugging on failure
+ print("The 'create_tests' fixture returned:")
+ print(json.dumps(dict(tests), indent=2, sort_keys=True))
+ return tests
+
+ return inner
+
+
+@pytest.fixture(scope="module")
+def all_tests(create_tests):
+ return create_tests(
+ *[
+ (
+ "apple/test_a11y.html",
+ {
+ "expected": "pass",
+ "flavor": "a11y",
+ },
+ ),
+ (
+ "banana/currant/test_xpcshell_A.js",
+ {
+ "firefox-appdir": "browser",
+ "flavor": "xpcshell",
+ "head": "head_global.js head_helpers.js head_http.js",
+ },
+ ),
+ (
+ "banana/currant/test_xpcshell_B.js",
+ {
+ "firefox-appdir": "browser",
+ "flavor": "xpcshell",
+ "head": "head_global.js head_helpers.js head_http.js",
+ },
+ ),
+ (
+ "carrot/test_included.js",
+ {
+ "ancestor_manifest": "carrot/xpcshell-one.ini",
+ "manifest": "carrot/xpcshell-shared.ini",
+ "flavor": "xpcshell",
+ "stick": "one",
+ },
+ ),
+ (
+ "carrot/test_included.js",
+ {
+ "ancestor_manifest": "carrot/xpcshell-two.ini",
+ "manifest": "carrot/xpcshell-shared.ini",
+ "flavor": "xpcshell",
+ "stick": "two",
+ },
+ ),
+ (
+ "dragonfruit/elderberry/test_xpcshell_C.js",
+ {
+ "flavor": "xpcshell",
+ "generated-files": "head_update.js",
+ "head": "head_update.js",
+ "manifest": "dragonfruit/xpcshell.ini",
+ "reason": "busted",
+ "run-sequentially": "Launches application.",
+ "skip-if": "os == 'android'",
+ },
+ ),
+ (
+ "dragonfruit/elderberry/test_xpcshell_C.js",
+ {
+ "flavor": "xpcshell",
+ "generated-files": "head_update.js",
+ "head": "head_update.js head2.js",
+ "manifest": "dragonfruit/elderberry/xpcshell_updater.ini",
+ "reason": "don't work",
+ "run-sequentially": "Launches application.",
+ "skip-if": "os == 'android'",
+ },
+ ),
+ (
+ "fig/grape/src/TestInstrumentationA.java",
+ {
+ "flavor": "instrumentation",
+ "manifest": "fig/grape/instrumentation.ini",
+ "subsuite": "background",
+ },
+ ),
+ (
+ "fig/huckleberry/src/TestInstrumentationB.java",
+ {
+ "flavor": "instrumentation",
+ "manifest": "fig/huckleberry/instrumentation.ini",
+ "subsuite": "browser",
+ },
+ ),
+ (
+ "juniper/browser_chrome.js",
+ {
+ "flavor": "browser-chrome",
+ "manifest": "juniper/browser.ini",
+ "skip-if": "e10s # broken",
+ },
+ ),
+ (
+ "kiwi/browser_devtools.js",
+ {
+ "flavor": "browser-chrome",
+ "manifest": "kiwi/browser.ini",
+ "subsuite": "devtools",
+ "tags": "devtools",
+ },
+ ),
+ ]
+ )
+
+
+@pytest.fixture(scope="module")
+def defaults(topsrcdir):
+ def to_abspath(relpath):
+ # test-defaults.pkl uses absolute paths with platform-specific path separators.
+ # Use platform-specific separators if needed to avoid regressing on bug 1644223.
+ return os.path.normpath(os.path.join(topsrcdir, relpath))
+
+ return {
+ (to_abspath("dragonfruit/elderberry/xpcshell_updater.ini")): {
+ "support-files": "\ndata/**\nxpcshell_updater.ini"
+ },
+ (
+ to_abspath("carrot/xpcshell-one.ini"),
+ to_abspath("carrot/xpcshell-shared.ini"),
+ ): {
+ "head": "head_one.js",
+ },
+ (
+ to_abspath("carrot/xpcshell-two.ini"),
+ to_abspath("carrot/xpcshell-shared.ini"),
+ ): {
+ "head": "head_two.js",
+ },
+ }
+
+
+class WPTManifestNamespace(object):
+ """Stand-in object for various WPT classes."""
+
+ def __init__(self, *args):
+ self.args = args
+
+ def __hash__(self):
+ return hash(str(self))
+
+ def __eq__(self, other):
+ return self.args == other.args
+
+ def __iter__(self):
+ yield tuple(self.args)
+
+
+def fake_wpt_manifestupdate(topsrcdir, *args, **kwargs):
+ with open(os.path.join(topsrcdir, "wpt_manifest_data.json")) as fh:
+ data = json.load(fh)
+
+ items = {}
+ for tests_root, test_data in data.items():
+ kwargs = {"tests_path": os.path.join(topsrcdir, tests_root)}
+
+ for test_type, tests in test_data.items():
+ for test in tests:
+ obj = WPTManifestNamespace()
+ if "mozilla" in tests_root:
+ obj.id = "/_mozilla/" + test
+ else:
+ obj.id = "/" + test
+
+ items[WPTManifestNamespace(test_type, test, {obj})] = kwargs
+ return items
+
+
+@pytest.fixture(params=[BuildBackendLoader, TestManifestLoader])
+def resolver(request, tmpdir, monkeypatch, topsrcdir, all_tests, defaults):
+ topobjdir = tmpdir.mkdir("objdir").strpath
+ loader_cls = request.param
+
+ if loader_cls == BuildBackendLoader:
+ with open(os.path.join(topobjdir, "all-tests.pkl"), "wb") as fh:
+ pickle.dump(all_tests, fh)
+ with open(os.path.join(topobjdir, "test-defaults.pkl"), "wb") as fh:
+ pickle.dump(defaults, fh)
+
+ # The mock data already exists, so prevent BuildBackendLoader from regenerating
+ # the build information from the whole gecko tree...
+ class BuildBackendLoaderNeverOutOfDate(BuildBackendLoader):
+ def backend_out_of_date(self, backend_file):
+ return False
+
+ loader_cls = BuildBackendLoaderNeverOutOfDate
+
+ # Patch WPT's manifestupdate.run to return tests based on the contents of
+ # 'data/srcdir/wpt_manifest_data.json'.
+ monkeypatch.setattr(manifestupdate, "run", fake_wpt_manifestupdate)
+
+ resolver = TestResolver(
+ topsrcdir, None, None, topobjdir=topobjdir, loader_cls=loader_cls
+ )
+ resolver._puppeteer_loaded = True
+
+ if loader_cls == TestManifestLoader:
+ config = MockConfig(topsrcdir)
+ resolver.load_tests.reader = BuildReader(config)
+ return resolver
+
+
+def test_load(resolver):
+ assert len(resolver.tests_by_path) == 9
+
+ assert len(resolver.tests_by_flavor["mochitest-plain"]) == 0
+ assert len(resolver.tests_by_flavor["xpcshell"]) == 4
+ assert len(resolver.tests_by_flavor["web-platform-tests"]) == 0
+
+ assert len(resolver.tests_by_manifest) == 9
+
+ resolver.add_wpt_manifest_data()
+ assert len(resolver.tests_by_path) == 11
+ assert len(resolver.tests_by_flavor["web-platform-tests"]) == 2
+ assert len(resolver.tests_by_manifest) == 11
+ assert "/html" in resolver.tests_by_manifest
+ assert "/_mozilla/html" in resolver.tests_by_manifest
+
+
+def test_resolve_all(resolver):
+ assert len(list(resolver._resolve())) == 13
+
+
+def test_resolve_filter_flavor(resolver):
+ assert len(list(resolver._resolve(flavor="xpcshell"))) == 6
+
+
+def test_resolve_by_dir(resolver):
+ assert len(list(resolver._resolve(paths=["banana/currant"]))) == 2
+
+
+def test_resolve_under_path(resolver):
+ assert len(list(resolver._resolve(under_path="banana"))) == 2
+ assert len(list(resolver._resolve(flavor="xpcshell", under_path="banana"))) == 2
+
+
+def test_resolve_multiple_paths(resolver):
+ result = list(resolver.resolve_tests(paths=["banana", "dragonfruit"]))
+ assert len(result) == 4
+
+
+def test_resolve_support_files(resolver):
+ expected_support_files = "\ndata/**\nxpcshell_updater.ini"
+ tests = list(resolver.resolve_tests(paths=["dragonfruit"]))
+ assert len(tests) == 2
+
+ for test in tests:
+ if test["manifest"].endswith("xpcshell_updater.ini"):
+ assert test["support-files"] == expected_support_files
+ else:
+ assert "support-files" not in test
+
+
+def test_resolve_path_prefix(resolver):
+ tests = list(resolver._resolve(paths=["juniper"]))
+ assert len(tests) == 1
+
+ # relative manifest
+ tests = list(resolver._resolve(paths=["apple/a11y.ini"]))
+ assert len(tests) == 1
+ assert tests[0]["name"] == "test_a11y.html"
+
+ # absolute manifest
+ tests = list(
+ resolver._resolve(paths=[os.path.join(resolver.topsrcdir, "apple/a11y.ini")])
+ )
+ assert len(tests) == 1
+ assert tests[0]["name"] == "test_a11y.html"
+
+
+def test_cwd_children_only(resolver):
+ """If cwd is defined, only resolve tests under the specified cwd."""
+ # Pretend we're under '/services' and ask for 'common'. This should
+ # pick up all tests from '/services/common'
+ tests = list(
+ resolver.resolve_tests(
+ paths=["currant"], cwd=os.path.join(resolver.topsrcdir, "banana")
+ )
+ )
+
+ assert len(tests) == 2
+
+ # Tests should be rewritten to objdir.
+ for t in tests:
+ assert t["here"] == mozpath.join(
+ resolver.topobjdir, "_tests/xpcshell/banana/currant"
+ )
+
+
+def test_various_cwd(resolver):
+ """Test various cwd conditions are all equal."""
+ expected = list(resolver.resolve_tests(paths=["banana"]))
+ actual = list(resolver.resolve_tests(paths=["banana"], cwd="/"))
+ assert actual == expected
+
+ actual = list(resolver.resolve_tests(paths=["banana"], cwd=resolver.topsrcdir))
+ assert actual == expected
+
+ actual = list(resolver.resolve_tests(paths=["banana"], cwd=resolver.topobjdir))
+ assert actual == expected
+
+
+def test_subsuites(resolver):
+ """Test filtering by subsuite."""
+ tests = list(resolver.resolve_tests(paths=["fig"]))
+ assert len(tests) == 2
+
+ tests = list(resolver.resolve_tests(paths=["fig"], subsuite="browser"))
+ assert len(tests) == 1
+ assert tests[0]["name"] == "src/TestInstrumentationB.java"
+
+ tests = list(resolver.resolve_tests(paths=["fig"], subsuite="background"))
+ assert len(tests) == 1
+ assert tests[0]["name"] == "src/TestInstrumentationA.java"
+
+ # Resolve tests *without* a subsuite.
+ tests = list(resolver.resolve_tests(flavor="browser-chrome", subsuite="undefined"))
+ assert len(tests) == 1
+ assert tests[0]["name"] == "browser_chrome.js"
+
+
+def test_wildcard_patterns(resolver):
+ """Test matching paths by wildcard."""
+ tests = list(resolver.resolve_tests(paths=["fig/**"]))
+ assert len(tests) == 2
+ for t in tests:
+ assert t["file_relpath"].startswith("fig")
+
+ tests = list(resolver.resolve_tests(paths=["**/**.js", "apple/**"]))
+ assert len(tests) == 9
+ for t in tests:
+ path = t["file_relpath"]
+ assert path.startswith("apple") or path.endswith(".js")
+
+
+def test_resolve_metadata(resolver):
+ """Test finding metadata from outgoing files."""
+ suites, tests = resolver.resolve_metadata(["bc"])
+ assert suites == {"mochitest-browser-chrome"}
+ assert tests == []
+
+ suites, tests = resolver.resolve_metadata(
+ ["mochitest-a11y", "/browser", "xpcshell"]
+ )
+ assert suites == {"mochitest-a11y", "xpcshell"}
+ assert sorted(t["file_relpath"] for t in tests) == [
+ "juniper/browser_chrome.js",
+ "kiwi/browser_devtools.js",
+ ]
+
+
+def test_ancestor_manifest_defaults(resolver, topsrcdir, defaults):
+ """Test that defaults from ancestor manifests are found."""
+ tests = list(resolver._resolve(paths=["carrot/test_included.js"]))
+ assert len(tests) == 2
+
+ if tests[0]["ancestor_manifest"] == os.path.join("carrot", "xpcshell-one.ini"):
+ [testOne, testTwo] = tests
+ else:
+ [testTwo, testOne] = tests
+
+ assert testOne["ancestor_manifest"] == os.path.join("carrot", "xpcshell-one.ini")
+ assert testOne["manifest_relpath"] == os.path.join("carrot", "xpcshell-shared.ini")
+ assert testOne["head"] == "head_one.js"
+ assert testOne["stick"] == "one"
+
+ assert testTwo["ancestor_manifest"] == os.path.join("carrot", "xpcshell-two.ini")
+ assert testTwo["manifest_relpath"] == os.path.join("carrot", "xpcshell-shared.ini")
+ assert testTwo["head"] == "head_two.js"
+ assert testTwo["stick"] == "two"
+
+
+def test_task_regexes():
+ """Test the task_regexes defined in TEST_SUITES."""
+ task_labels = [
+ "test-linux64/opt-browser-screenshots-1",
+ "test-linux64/opt-browser-screenshots-e10s-1",
+ "test-linux64/opt-marionette",
+ "test-linux64/opt-mochitest-plain",
+ "test-linux64/debug-mochitest-plain-e10s",
+ "test-linux64/opt-mochitest-a11y",
+ "test-linux64/opt-mochitest-browser",
+ "test-linux64/opt-mochitest-browser-chrome",
+ "test-linux64/opt-mochitest-browser-chrome-e10s",
+ "test-linux64/opt-mochitest-browser-chrome-e10s-11",
+ "test-linux64/opt-mochitest-chrome",
+ "test-linux64/opt-mochitest-devtools",
+ "test-linux64/opt-mochitest-devtools-chrome",
+ "test-linux64/opt-mochitest-gpu",
+ "test-linux64/opt-mochitest-gpu-e10s",
+ "test-linux64/opt-mochitest-media-e10s-1",
+ "test-linux64/opt-mochitest-media-e10s-11",
+ "test-linux64/opt-mochitest-screenshots-1",
+ "test-linux64/opt-reftest",
+ "test-linux64/opt-geckoview-reftest",
+ "test-linux64/debug-reftest-e10s-1",
+ "test-linux64/debug-reftest-e10s-11",
+ "test-linux64/opt-robocop",
+ "test-linux64/opt-robocop-1",
+ "test-linux64/opt-robocop-e10s",
+ "test-linux64/opt-robocop-e10s-1",
+ "test-linux64/opt-robocop-e10s-11",
+ "test-linux64/opt-web-platform-tests-e10s-1",
+ "test-linux64/opt-web-platform-tests-reftest-e10s-1",
+ "test-linux64/opt-web-platform-tests-wdspec-e10s-1",
+ "test-linux64/opt-web-platform-tests-1",
+ "test-linux64/opt-web-platform-test-e10s-1",
+ "test-linux64/opt-xpcshell",
+ "test-linux64/opt-xpcshell-1",
+ "test-linux64/opt-xpcshell-2",
+ ]
+
+ test_cases = {
+ "mochitest-browser-chrome": [
+ "test-linux64/opt-mochitest-browser-chrome",
+ "test-linux64/opt-mochitest-browser-chrome-e10s",
+ ],
+ "mochitest-chrome": [
+ "test-linux64/opt-mochitest-chrome",
+ ],
+ "mochitest-devtools-chrome": [
+ "test-linux64/opt-mochitest-devtools-chrome",
+ ],
+ "mochitest-media": [
+ "test-linux64/opt-mochitest-media-e10s-1",
+ ],
+ "mochitest-plain": [
+ "test-linux64/opt-mochitest-plain",
+ "test-linux64/debug-mochitest-plain-e10s",
+ ],
+ "mochitest-plain-gpu": [
+ "test-linux64/opt-mochitest-gpu",
+ "test-linux64/opt-mochitest-gpu-e10s",
+ ],
+ "mochitest-browser-chrome-screenshots": [
+ "test-linux64/opt-browser-screenshots-1",
+ "test-linux64/opt-browser-screenshots-e10s-1",
+ ],
+ "reftest": [
+ "test-linux64/opt-reftest",
+ "test-linux64/opt-geckoview-reftest",
+ "test-linux64/debug-reftest-e10s-1",
+ ],
+ "robocop": [
+ "test-linux64/opt-robocop",
+ "test-linux64/opt-robocop-1",
+ "test-linux64/opt-robocop-e10s",
+ "test-linux64/opt-robocop-e10s-1",
+ ],
+ "web-platform-tests": [
+ "test-linux64/opt-web-platform-tests-e10s-1",
+ "test-linux64/opt-web-platform-tests-1",
+ ],
+ "web-platform-tests-reftest": [
+ "test-linux64/opt-web-platform-tests-reftest-e10s-1",
+ ],
+ "web-platform-tests-wdspec": [
+ "test-linux64/opt-web-platform-tests-wdspec-e10s-1",
+ ],
+ "xpcshell": [
+ "test-linux64/opt-xpcshell",
+ "test-linux64/opt-xpcshell-1",
+ ],
+ }
+
+ regexes = []
+
+ def match_task(task):
+ return any(re.search(pattern, task) for pattern in regexes)
+
+ for suite, expected in sorted(test_cases.items()):
+ print(suite)
+ regexes = TEST_SUITES[suite]["task_regex"]
+ assert set(filter(match_task, task_labels)) == set(expected)
+
+
+if __name__ == "__main__":
+ mozunit.main()