summaryrefslogtreecommitdiffstats
path: root/testing/mochitest/tests/python
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /testing/mochitest/tests/python
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/mochitest/tests/python')
-rw-r--r--testing/mochitest/tests/python/conftest.py157
-rw-r--r--testing/mochitest/tests/python/files/browser-args.ini7
-rw-r--r--testing/mochitest/tests/python/files/browser_assertion.js7
-rw-r--r--testing/mochitest/tests/python/files/browser_crash.js7
-rw-r--r--testing/mochitest/tests/python/files/browser_fail.js3
-rw-r--r--testing/mochitest/tests/python/files/browser_leak.js4
-rw-r--r--testing/mochitest/tests/python/files/browser_pass.js3
-rw-r--r--testing/mochitest/tests/python/files/mochitest-args.ini7
-rw-r--r--testing/mochitest/tests/python/files/mochitest-dupemanifest-1.ini1
-rw-r--r--testing/mochitest/tests/python/files/mochitest-dupemanifest-2.ini1
-rw-r--r--testing/mochitest/tests/python/files/test_assertion.html28
-rw-r--r--testing/mochitest/tests/python/files/test_crash.html28
-rw-r--r--testing/mochitest/tests/python/files/test_fail.html24
-rw-r--r--testing/mochitest/tests/python/files/test_leak.html25
-rw-r--r--testing/mochitest/tests/python/files/test_pass.html24
-rw-r--r--testing/mochitest/tests/python/python.ini9
-rw-r--r--testing/mochitest/tests/python/test_build_profile.py82
-rw-r--r--testing/mochitest/tests/python/test_create_directories.py221
-rw-r--r--testing/mochitest/tests/python/test_get_active_tests.py269
-rw-r--r--testing/mochitest/tests/python/test_message_logger.py191
-rw-r--r--testing/mochitest/tests/python/test_mochitest_integration.py314
21 files changed, 1412 insertions, 0 deletions
diff --git a/testing/mochitest/tests/python/conftest.py b/testing/mochitest/tests/python/conftest.py
new file mode 100644
index 0000000000..e418dfb816
--- /dev/null
+++ b/testing/mochitest/tests/python/conftest.py
@@ -0,0 +1,157 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+from argparse import Namespace
+
+import mozinfo
+import pytest
+import six
+from manifestparser import TestManifest, expression
+from moztest.selftest.fixtures import binary_fixture, setup_test_harness # noqa
+
+here = os.path.abspath(os.path.dirname(__file__))
+setup_args = [os.path.join(here, "files"), "mochitest", "testing/mochitest"]
+
+
+@pytest.fixture
+def create_manifest(tmpdir, build_obj):
+ def inner(string, name="manifest.ini"):
+ manifest = tmpdir.join(name)
+ manifest.write(string, ensure=True)
+ # pylint --py3k: W1612
+ path = six.text_type(manifest)
+ return TestManifest(manifests=(path,), strict=False, rootdir=tmpdir.strpath)
+
+ return inner
+
+
+@pytest.fixture(scope="function")
+def parser(request):
+ parser = pytest.importorskip("mochitest_options")
+
+ app = getattr(request.module, "APP", "generic")
+ return parser.MochitestArgumentParser(app=app)
+
+
+@pytest.fixture(scope="function")
+def runtests(setup_test_harness, binary, parser, request):
+ """Creates an easy to use entry point into the mochitest harness.
+
+ :returns: A function with the signature `*tests, **opts`. Each test is a file name
+ (relative to the `files` dir). At least one is required. The opts are
+ used to override the default mochitest options, they are optional.
+ """
+ flavor = "plain"
+ if "flavor" in request.fixturenames:
+ flavor = request.getfixturevalue("flavor")
+
+ runFailures = ""
+ if "runFailures" in request.fixturenames:
+ runFailures = request.getfixturevalue("runFailures")
+
+ setup_test_harness(*setup_args, flavor=flavor)
+
+ runtests = pytest.importorskip("runtests")
+
+ mochitest_root = runtests.SCRIPT_DIR
+ if flavor == "plain":
+ test_root = os.path.join(mochitest_root, "tests", "selftests")
+ manifest_name = "mochitest.ini"
+ elif flavor == "browser-chrome":
+ test_root = os.path.join(mochitest_root, "browser", "tests", "selftests")
+ manifest_name = "browser.ini"
+ else:
+ raise Exception(f"Invalid flavor {flavor}!")
+
+ # pylint --py3k: W1648
+ buf = six.StringIO()
+ options = vars(parser.parse_args([]))
+ options.update(
+ {
+ "app": binary,
+ "flavor": flavor,
+ "runFailures": runFailures,
+ "keep_open": False,
+ "log_raw": [buf],
+ }
+ )
+
+ if runFailures == "selftest":
+ options["crashAsPass"] = True
+ options["timeoutAsPass"] = True
+ runtests.mozinfo.update({"selftest": True})
+
+ if not os.path.isdir(runtests.build_obj.bindir):
+ package_root = os.path.dirname(mochitest_root)
+ options.update(
+ {
+ "certPath": os.path.join(package_root, "certs"),
+ "utilityPath": os.path.join(package_root, "bin"),
+ }
+ )
+ options["extraProfileFiles"].append(
+ os.path.join(package_root, "bin", "plugins")
+ )
+
+ options.update(getattr(request.module, "OPTIONS", {}))
+
+ def normalize(test):
+ return {
+ "name": test,
+ "relpath": test,
+ "path": os.path.join(test_root, test),
+ # add a dummy manifest file because mochitest expects it
+ "manifest": os.path.join(test_root, manifest_name),
+ "manifest_relpath": manifest_name,
+ "skip-if": runFailures,
+ }
+
+ def inner(*tests, **opts):
+ assert len(tests) > 0
+
+ # Inject a TestManifest in the runtests option if one
+ # has not been already included by the caller.
+ if not isinstance(options["manifestFile"], TestManifest):
+ manifest = TestManifest()
+ options["manifestFile"] = manifest
+ # pylint --py3k: W1636
+ manifest.tests.extend(list(map(normalize, tests)))
+ options.update(opts)
+
+ result = runtests.run_test_harness(parser, Namespace(**options))
+ out = json.loads("[" + ",".join(buf.getvalue().splitlines()) + "]")
+ buf.close()
+ return result, out
+
+ return inner
+
+
+@pytest.fixture
+def build_obj(setup_test_harness):
+ setup_test_harness(*setup_args)
+ mochitest_options = pytest.importorskip("mochitest_options")
+ return mochitest_options.build_obj
+
+
+@pytest.fixture(autouse=True)
+def skip_using_mozinfo(request, setup_test_harness):
+ """Gives tests the ability to skip based on values from mozinfo.
+
+ Example:
+ @pytest.mark.skip_mozinfo("!e10s || os == 'linux'")
+ def test_foo():
+ pass
+ """
+
+ setup_test_harness(*setup_args)
+ runtests = pytest.importorskip("runtests")
+ runtests.update_mozinfo()
+
+ skip_mozinfo = request.node.get_closest_marker("skip_mozinfo")
+ if skip_mozinfo:
+ value = skip_mozinfo.args[0]
+ if expression.parse(value, **mozinfo.info):
+ pytest.skip("skipped due to mozinfo match: \n{}".format(value))
diff --git a/testing/mochitest/tests/python/files/browser-args.ini b/testing/mochitest/tests/python/files/browser-args.ini
new file mode 100644
index 0000000000..1425512c1c
--- /dev/null
+++ b/testing/mochitest/tests/python/files/browser-args.ini
@@ -0,0 +1,7 @@
+[DEFAULT]
+args =
+ --headless
+ --window-size=800,600
+ --new-tab http://example.org
+
+[browser_pass.js]
diff --git a/testing/mochitest/tests/python/files/browser_assertion.js b/testing/mochitest/tests/python/files/browser_assertion.js
new file mode 100644
index 0000000000..243703206e
--- /dev/null
+++ b/testing/mochitest/tests/python/files/browser_assertion.js
@@ -0,0 +1,7 @@
+function test() {
+ const Cc = SpecialPowers.Cc;
+ const Ci = SpecialPowers.Ci;
+ let debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ debug.assertion("failed assertion check", "false", "test_assertion.js", 15);
+ ok(true, "Should pass");
+} \ No newline at end of file
diff --git a/testing/mochitest/tests/python/files/browser_crash.js b/testing/mochitest/tests/python/files/browser_crash.js
new file mode 100644
index 0000000000..54e431ed7f
--- /dev/null
+++ b/testing/mochitest/tests/python/files/browser_crash.js
@@ -0,0 +1,7 @@
+function test() {
+ const Cc = SpecialPowers.Cc;
+ const Ci = SpecialPowers.Ci;
+ let debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ debug.abort("test_crash.js", 5);
+ ok(false, "Should pass");
+} \ No newline at end of file
diff --git a/testing/mochitest/tests/python/files/browser_fail.js b/testing/mochitest/tests/python/files/browser_fail.js
new file mode 100644
index 0000000000..abcb6dae60
--- /dev/null
+++ b/testing/mochitest/tests/python/files/browser_fail.js
@@ -0,0 +1,3 @@
+function test() {
+ ok(false, "Test is ok");
+}
diff --git a/testing/mochitest/tests/python/files/browser_leak.js b/testing/mochitest/tests/python/files/browser_leak.js
new file mode 100644
index 0000000000..ded8dd8b56
--- /dev/null
+++ b/testing/mochitest/tests/python/files/browser_leak.js
@@ -0,0 +1,4 @@
+function test() {
+ SpecialPowers.Cu.intentionallyLeak();
+ ok(true, "Test is ok");
+}
diff --git a/testing/mochitest/tests/python/files/browser_pass.js b/testing/mochitest/tests/python/files/browser_pass.js
new file mode 100644
index 0000000000..5e5c567f13
--- /dev/null
+++ b/testing/mochitest/tests/python/files/browser_pass.js
@@ -0,0 +1,3 @@
+function test() {
+ ok(true, "Test is OK");
+} \ No newline at end of file
diff --git a/testing/mochitest/tests/python/files/mochitest-args.ini b/testing/mochitest/tests/python/files/mochitest-args.ini
new file mode 100644
index 0000000000..9c3d44d05f
--- /dev/null
+++ b/testing/mochitest/tests/python/files/mochitest-args.ini
@@ -0,0 +1,7 @@
+[DEFAULT]
+args =
+ --headless
+ --window-size=800,600
+ --new-tab http://example.org
+
+[test_pass.html]
diff --git a/testing/mochitest/tests/python/files/mochitest-dupemanifest-1.ini b/testing/mochitest/tests/python/files/mochitest-dupemanifest-1.ini
new file mode 100644
index 0000000000..35d66d765c
--- /dev/null
+++ b/testing/mochitest/tests/python/files/mochitest-dupemanifest-1.ini
@@ -0,0 +1 @@
+[test_pass.html]
diff --git a/testing/mochitest/tests/python/files/mochitest-dupemanifest-2.ini b/testing/mochitest/tests/python/files/mochitest-dupemanifest-2.ini
new file mode 100644
index 0000000000..35d66d765c
--- /dev/null
+++ b/testing/mochitest/tests/python/files/mochitest-dupemanifest-2.ini
@@ -0,0 +1 @@
+[test_pass.html]
diff --git a/testing/mochitest/tests/python/files/test_assertion.html b/testing/mochitest/tests/python/files/test_assertion.html
new file mode 100644
index 0000000000..7740064107
--- /dev/null
+++ b/testing/mochitest/tests/python/files/test_assertion.html
@@ -0,0 +1,28 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1343659
+-->
+<head>
+ <meta charset="utf-8">
+ <title>Test Assertion</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+ <script type="application/javascript">
+ const Cc = SpecialPowers.Cc;
+ const Ci = SpecialPowers.Ci;
+ let debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ debug.assertion("failed assertion check", "false", "test_assertion.html", 15);
+ ok(true, "Should pass");
+ </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1343659">Mozilla Bug 1343659</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
diff --git a/testing/mochitest/tests/python/files/test_crash.html b/testing/mochitest/tests/python/files/test_crash.html
new file mode 100644
index 0000000000..09ea2faf01
--- /dev/null
+++ b/testing/mochitest/tests/python/files/test_crash.html
@@ -0,0 +1,28 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1343659
+-->
+<head>
+ <meta charset="utf-8">
+ <title>Test Crash</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+ <script type="application/javascript">
+ const Cc = SpecialPowers.Cc;
+ const Ci = SpecialPowers.Ci;
+ let debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ debug.abort("test_crash.html", 15);
+ ok(true, "Should pass");
+ </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1343659">Mozilla Bug 1343659</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
diff --git a/testing/mochitest/tests/python/files/test_fail.html b/testing/mochitest/tests/python/files/test_fail.html
new file mode 100644
index 0000000000..3d0555a5a0
--- /dev/null
+++ b/testing/mochitest/tests/python/files/test_fail.html
@@ -0,0 +1,24 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1343659
+-->
+<head>
+ <meta charset="utf-8">
+ <title>Test Fail</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+ <script type="application/javascript">
+ ok(false, "Test is ok");
+ </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1343659">Mozilla Bug 1343659</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
diff --git a/testing/mochitest/tests/python/files/test_leak.html b/testing/mochitest/tests/python/files/test_leak.html
new file mode 100644
index 0000000000..4609e368de
--- /dev/null
+++ b/testing/mochitest/tests/python/files/test_leak.html
@@ -0,0 +1,25 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1521223
+-->
+<head>
+ <meta charset="utf-8">
+ <title>Test Pass</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+ <script type="application/javascript">
+ SpecialPowers.Cu.intentionallyLeak();
+ ok(true, "Test is ok");
+ </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1521223">Mozilla Bug 1521223</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
diff --git a/testing/mochitest/tests/python/files/test_pass.html b/testing/mochitest/tests/python/files/test_pass.html
new file mode 100644
index 0000000000..9dacafaaa3
--- /dev/null
+++ b/testing/mochitest/tests/python/files/test_pass.html
@@ -0,0 +1,24 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1343659
+-->
+<head>
+ <meta charset="utf-8">
+ <title>Test Pass</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+ <script type="application/javascript">
+ ok(true, "Test is ok");
+ </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1343659">Mozilla Bug 1343659</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
diff --git a/testing/mochitest/tests/python/python.ini b/testing/mochitest/tests/python/python.ini
new file mode 100644
index 0000000000..ecdf4f39b6
--- /dev/null
+++ b/testing/mochitest/tests/python/python.ini
@@ -0,0 +1,9 @@
+[DEFAULT]
+subsuite = mochitest
+
+[test_mochitest_integration.py]
+sequential = true
+[test_build_profile.py]
+[test_get_active_tests.py]
+[test_message_logger.py]
+[test_create_directories.py]
diff --git a/testing/mochitest/tests/python/test_build_profile.py b/testing/mochitest/tests/python/test_build_profile.py
new file mode 100644
index 0000000000..438efb4a04
--- /dev/null
+++ b/testing/mochitest/tests/python/test_build_profile.py
@@ -0,0 +1,82 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+from argparse import Namespace
+
+import mozunit
+import pytest
+from conftest import setup_args
+from mozbuild.base import MozbuildObject
+from mozprofile import Profile
+from mozprofile.prefs import Preferences
+from six import string_types
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+@pytest.fixture
+def build_profile(monkeypatch, setup_test_harness, parser):
+ setup_test_harness(*setup_args)
+ runtests = pytest.importorskip("runtests")
+ md = runtests.MochitestDesktop("plain", {"log_tbpl": "-"})
+ monkeypatch.setattr(md, "fillCertificateDB", lambda *args, **kwargs: None)
+
+ options = parser.parse_args([])
+ options = vars(options)
+
+ def inner(**kwargs):
+ opts = options.copy()
+ opts.update(kwargs)
+
+ return md, md.buildProfile(Namespace(**opts))
+
+ return inner
+
+
+@pytest.fixture
+def profile_data_dir():
+ build = MozbuildObject.from_environment(cwd=here)
+ return os.path.join(build.topsrcdir, "testing", "profiles")
+
+
+def test_common_prefs_are_all_set(build_profile, profile_data_dir):
+ # We set e10s=False here because MochitestDesktop.buildProfile overwrites
+ # the value defined in the base profile.
+ # TODO stop setting browser.tabs.remote.autostart in the base profile
+ md, result = build_profile(e10s=False)
+
+ with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
+ base_profiles = json.load(fh)["mochitest"]
+
+ # build the expected prefs
+ expected_prefs = {}
+ for profile in base_profiles:
+ for name in Profile.preference_file_names:
+ path = os.path.join(profile_data_dir, profile, name)
+ if os.path.isfile(path):
+ expected_prefs.update(Preferences.read_prefs(path))
+
+ # read the actual prefs
+ actual_prefs = {}
+ for name in Profile.preference_file_names:
+ path = os.path.join(md.profile.profile, name)
+ if os.path.isfile(path):
+ actual_prefs.update(Preferences.read_prefs(path))
+
+ # keep this in sync with the values in MochitestDesktop.merge_base_profiles
+ interpolation = {
+ "server": "127.0.0.1:8888",
+ }
+ for k, v in expected_prefs.items():
+ if isinstance(v, string_types):
+ v = v.format(**interpolation)
+
+ assert k in actual_prefs
+ assert k and actual_prefs[k] == v
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mochitest/tests/python/test_create_directories.py b/testing/mochitest/tests/python/test_create_directories.py
new file mode 100644
index 0000000000..ffbed625a5
--- /dev/null
+++ b/testing/mochitest/tests/python/test_create_directories.py
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import unittest.mock as mock
+from argparse import Namespace
+from collections import defaultdict
+from textwrap import dedent
+
+import mozunit
+import pytest
+import six
+from conftest import setup_args
+from manifestparser import TestManifest
+
+
+# Directly running runTests() is likely not working nor a good idea
+# So at least we try to minimize with just:
+# - getActiveTests()
+# - create manifests list
+# - parseAndCreateTestsDirs()
+#
+# Hopefully, breaking the runTests() calls to parseAndCreateTestsDirs() will
+# anyway trigger other tests failures so it would be spotted, and we at least
+# ensure some coverage of handling the manifest content, creation of the
+# directories and cleanup
+@pytest.fixture
+def prepareRunTests(setup_test_harness, parser):
+ setup_test_harness(*setup_args)
+ runtests = pytest.importorskip("runtests")
+ md = runtests.MochitestDesktop("plain", {"log_tbpl": "-"})
+
+ options = vars(parser.parse_args([]))
+
+ def inner(**kwargs):
+ opts = options.copy()
+ opts.update(kwargs)
+
+ manifest = opts.get("manifestFile")
+ if isinstance(manifest, six.string_types):
+ md.testRootAbs = os.path.dirname(manifest)
+ elif isinstance(manifest, TestManifest):
+ md.testRootAbs = manifest.rootdir
+
+ md._active_tests = None
+ md.prefs_by_manifest = defaultdict(set)
+ tests = md.getActiveTests(Namespace(**opts))
+ manifests = set(t["manifest"] for t in tests)
+ for m in sorted(manifests):
+ md.parseAndCreateTestsDirs(m)
+ return md
+
+ return inner
+
+
+@pytest.fixture
+def create_manifest(tmpdir, build_obj):
+ def inner(string, name="manifest.ini"):
+ manifest = tmpdir.join(name)
+ manifest.write(string, ensure=True)
+ # pylint --py3k: W1612
+ path = six.text_type(manifest)
+ return TestManifest(manifests=(path,), strict=False, rootdir=tmpdir.strpath)
+
+ return inner
+
+
+def create_manifest_empty(create_manifest):
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ )
+ )
+
+ return {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+
+
+def create_manifest_one(create_manifest):
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ test-directories =
+ .snap_firefox_current_real
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ )
+ )
+
+ return {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+
+
+def create_manifest_mult(create_manifest):
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ test-directories =
+ .snap_firefox_current_real
+ .snap_firefox_current_real2
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ )
+ )
+
+ return {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+
+
+def test_no_entry(prepareRunTests, create_manifest):
+ options = create_manifest_empty(create_manifest)
+ with mock.patch("os.makedirs") as mock_os_makedirs:
+ _ = prepareRunTests(**options)
+ mock_os_makedirs.assert_not_called()
+
+
+def test_one_entry(prepareRunTests, create_manifest):
+ options = create_manifest_one(create_manifest)
+ with mock.patch("os.makedirs") as mock_os_makedirs:
+ md = prepareRunTests(**options)
+ mock_os_makedirs.assert_called_once_with(".snap_firefox_current_real")
+
+ opts = mock.Mock(pidFile="") # so cleanup() does not check it
+ with mock.patch("os.path.exists") as mock_os_path_exists, mock.patch(
+ "shutil.rmtree"
+ ) as mock_shutil_rmtree:
+ md.cleanup(opts, False)
+ mock_os_path_exists.assert_called_once_with(".snap_firefox_current_real")
+ mock_shutil_rmtree.assert_called_once_with(".snap_firefox_current_real")
+
+
+def test_one_entry_already_exists(prepareRunTests, create_manifest):
+ options = create_manifest_one(create_manifest)
+ with mock.patch(
+ "os.path.exists", return_value=True
+ ) as mock_os_path_exists, mock.patch("os.makedirs") as mock_os_makedirs:
+ with pytest.raises(FileExistsError):
+ _ = prepareRunTests(**options)
+ mock_os_path_exists.assert_called_once_with(".snap_firefox_current_real")
+ mock_os_makedirs.assert_not_called()
+
+
+def test_mult_entry(prepareRunTests, create_manifest):
+ options = create_manifest_mult(create_manifest)
+ with mock.patch("os.makedirs") as mock_os_makedirs:
+ md = prepareRunTests(**options)
+ assert mock_os_makedirs.call_count == 2
+ mock_os_makedirs.assert_has_calls(
+ [
+ mock.call(".snap_firefox_current_real"),
+ mock.call(".snap_firefox_current_real2"),
+ ]
+ )
+
+ opts = mock.Mock(pidFile="") # so cleanup() does not check it
+ with mock.patch("os.path.exists") as mock_os_path_exists, mock.patch(
+ "shutil.rmtree"
+ ) as mock_shutil_rmtree:
+ md.cleanup(opts, False)
+
+ assert mock_os_path_exists.call_count == 2
+ mock_os_path_exists.assert_has_calls(
+ [
+ mock.call(".snap_firefox_current_real"),
+ mock.call().__bool__(),
+ mock.call(".snap_firefox_current_real2"),
+ mock.call().__bool__(),
+ ]
+ )
+
+ assert mock_os_makedirs.call_count == 2
+ mock_shutil_rmtree.assert_has_calls(
+ [
+ mock.call(".snap_firefox_current_real"),
+ mock.call(".snap_firefox_current_real2"),
+ ]
+ )
+
+
+def test_mult_entry_one_already_exists(prepareRunTests, create_manifest):
+ options = create_manifest_mult(create_manifest)
+ with mock.patch(
+ "os.path.exists", side_effect=[True, False]
+ ) as mock_os_path_exists, mock.patch("os.makedirs") as mock_os_makedirs:
+ with pytest.raises(FileExistsError):
+ _ = prepareRunTests(**options)
+ mock_os_path_exists.assert_called_once_with(".snap_firefox_current_real")
+ mock_os_makedirs.assert_not_called()
+
+ with mock.patch(
+ "os.path.exists", side_effect=[False, True]
+ ) as mock_os_path_exists, mock.patch("os.makedirs") as mock_os_makedirs:
+ with pytest.raises(FileExistsError):
+ _ = prepareRunTests(**options)
+ assert mock_os_path_exists.call_count == 2
+ mock_os_path_exists.assert_has_calls(
+ [
+ mock.call(".snap_firefox_current_real"),
+ mock.call(".snap_firefox_current_real2"),
+ ]
+ )
+ mock_os_makedirs.assert_not_called()
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mochitest/tests/python/test_get_active_tests.py b/testing/mochitest/tests/python/test_get_active_tests.py
new file mode 100644
index 0000000000..f91bc64a5f
--- /dev/null
+++ b/testing/mochitest/tests/python/test_get_active_tests.py
@@ -0,0 +1,269 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from argparse import Namespace
+from collections import defaultdict
+from textwrap import dedent
+
+import mozunit
+import pytest
+import six
+from conftest import setup_args
+from manifestparser import TestManifest
+
+
+@pytest.fixture
+def get_active_tests(setup_test_harness, parser):
+ setup_test_harness(*setup_args)
+ runtests = pytest.importorskip("runtests")
+ md = runtests.MochitestDesktop("plain", {"log_tbpl": "-"})
+
+ options = vars(parser.parse_args([]))
+
+ def inner(**kwargs):
+ opts = options.copy()
+ opts.update(kwargs)
+
+ manifest = opts.get("manifestFile")
+ if isinstance(manifest, six.string_types):
+ md.testRootAbs = os.path.dirname(manifest)
+ elif isinstance(manifest, TestManifest):
+ md.testRootAbs = manifest.rootdir
+
+ md._active_tests = None
+ md.prefs_by_manifest = defaultdict(set)
+ return md, md.getActiveTests(Namespace(**opts))
+
+ return inner
+
+
+@pytest.fixture
+def create_manifest(tmpdir, build_obj):
+ def inner(string, name="manifest.ini"):
+ manifest = tmpdir.join(name)
+ manifest.write(string, ensure=True)
+ # pylint --py3k: W1612
+ path = six.text_type(manifest)
+ return TestManifest(manifests=(path,), strict=False, rootdir=tmpdir.strpath)
+
+ return inner
+
+
+def test_args_validation(get_active_tests, create_manifest):
+ # Test args set in a single manifest.
+ manifest_relpath = "manifest.ini"
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ args=
+ --cheese
+ --foo=bar
+ --foo1 bar1
+
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ )
+ )
+
+ options = {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+ md, tests = get_active_tests(**options)
+
+ assert len(tests) == 2
+ assert manifest_relpath in md.args_by_manifest
+
+ args = md.args_by_manifest[manifest_relpath]
+ assert len(args) == 1
+ assert args.pop() == "\n--cheese\n--foo=bar\n--foo1 bar1"
+
+ # Test args set with runByManifest disabled.
+ options["runByManifest"] = False
+ with pytest.raises(SystemExit):
+ get_active_tests(**options)
+
+ # Test args set in non-default section.
+ options["runByManifest"] = True
+ options["manifestFile"] = create_manifest(
+ dedent(
+ """
+ [files/test_pass.html]
+ args=--foo2=bar2
+ [files/test_fail.html]
+ """
+ )
+ )
+ with pytest.raises(SystemExit):
+ get_active_tests(**options)
+
+
+def test_args_validation_with_ancestor_manifest(get_active_tests, create_manifest):
+ # Test args set by an ancestor manifest.
+ create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ args=
+ --cheese
+
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ ),
+ name="subdir/manifest.ini",
+ )
+
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ args =
+ --foo=bar
+
+ [include:manifest.ini]
+ [test_foo.html]
+ """
+ ),
+ name="subdir/ancestor-manifest.ini",
+ )
+
+ options = {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+
+ md, tests = get_active_tests(**options)
+ assert len(tests) == 3
+
+ key = os.path.join("subdir", "ancestor-manifest.ini")
+ assert key in md.args_by_manifest
+ args = md.args_by_manifest[key]
+ assert len(args) == 1
+ assert args.pop() == "\n--foo=bar"
+
+ key = "{}:{}".format(
+ os.path.join("subdir", "ancestor-manifest.ini"),
+ os.path.join("subdir", "manifest.ini"),
+ )
+ assert key in md.args_by_manifest
+ args = md.args_by_manifest[key]
+ assert len(args) == 1
+ assert args.pop() == "\n--foo=bar \n--cheese"
+
+
+def test_prefs_validation(get_active_tests, create_manifest):
+ # Test prefs set in a single manifest.
+ manifest_relpath = "manifest.ini"
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ prefs=
+ foo=bar
+ browser.dom.foo=baz
+
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ )
+ )
+
+ options = {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+ md, tests = get_active_tests(**options)
+
+ assert len(tests) == 2
+ assert manifest_relpath in md.prefs_by_manifest
+
+ prefs = md.prefs_by_manifest[manifest_relpath]
+ assert len(prefs) == 1
+ assert prefs.pop() == "\nfoo=bar\nbrowser.dom.foo=baz"
+
+ # Test prefs set with runByManifest disabled.
+ options["runByManifest"] = False
+ with pytest.raises(SystemExit):
+ get_active_tests(**options)
+
+ # Test prefs set in non-default section.
+ options["runByManifest"] = True
+ options["manifestFile"] = create_manifest(
+ dedent(
+ """
+ [files/test_pass.html]
+ prefs=foo=bar
+ [files/test_fail.html]
+ """
+ )
+ )
+ with pytest.raises(SystemExit):
+ get_active_tests(**options)
+
+
+def test_prefs_validation_with_ancestor_manifest(get_active_tests, create_manifest):
+ # Test prefs set by an ancestor manifest.
+ create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ prefs=
+ foo=bar
+ browser.dom.foo=baz
+
+ [files/test_pass.html]
+ [files/test_fail.html]
+ """
+ ),
+ name="subdir/manifest.ini",
+ )
+
+ manifest = create_manifest(
+ dedent(
+ """
+ [DEFAULT]
+ prefs =
+ browser.dom.foo=fleem
+ flower=rose
+
+ [include:manifest.ini]
+ [test_foo.html]
+ """
+ ),
+ name="subdir/ancestor-manifest.ini",
+ )
+
+ options = {
+ "runByManifest": True,
+ "manifestFile": manifest,
+ }
+
+ md, tests = get_active_tests(**options)
+ assert len(tests) == 3
+
+ key = os.path.join("subdir", "ancestor-manifest.ini")
+ assert key in md.prefs_by_manifest
+ prefs = md.prefs_by_manifest[key]
+ assert len(prefs) == 1
+ assert prefs.pop() == "\nbrowser.dom.foo=fleem\nflower=rose"
+
+ key = "{}:{}".format(
+ os.path.join("subdir", "ancestor-manifest.ini"),
+ os.path.join("subdir", "manifest.ini"),
+ )
+ assert key in md.prefs_by_manifest
+ prefs = md.prefs_by_manifest[key]
+ assert len(prefs) == 1
+ assert (
+ prefs.pop()
+ == "\nbrowser.dom.foo=fleem\nflower=rose \nfoo=bar\nbrowser.dom.foo=baz"
+ )
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mochitest/tests/python/test_message_logger.py b/testing/mochitest/tests/python/test_message_logger.py
new file mode 100644
index 0000000000..60bf6f9dc9
--- /dev/null
+++ b/testing/mochitest/tests/python/test_message_logger.py
@@ -0,0 +1,191 @@
+# Any copyright is dedicated to the Public Domain.
+# http://creativecommons.org/publicdomain/zero/1.0/
+
+import json
+import time
+import types
+
+import mozunit
+import pytest
+import six
+from conftest import setup_args
+from mozlog.formatters import JSONFormatter
+from mozlog.handlers.base import StreamHandler
+from mozlog.structuredlog import StructuredLogger
+from six import string_types
+
+
+@pytest.fixture
+def logger():
+ logger = StructuredLogger("mochitest_message_logger")
+
+ buf = six.StringIO()
+ handler = StreamHandler(buf, JSONFormatter())
+ logger.add_handler(handler)
+ return logger
+
+
+@pytest.fixture
+def get_message_logger(setup_test_harness, logger):
+ setup_test_harness(*setup_args)
+ runtests = pytest.importorskip("runtests")
+
+ def fake_message(self, action, **extra):
+ message = {
+ "action": action,
+ "time": time.time(),
+ }
+ if action in ("test_start", "test_end", "test_status"):
+ message["test"] = "test_foo.html"
+
+ if action == "test_end":
+ message["status"] = "PASS"
+ message["expected"] = "PASS"
+
+ elif action == "test_status":
+ message["subtest"] = "bar"
+ message["status"] = "PASS"
+
+ elif action == "log":
+ message["level"] = "INFO"
+ message["message"] = "foobar"
+
+ message.update(**extra)
+ return self.write(json.dumps(message))
+
+ def inner(**kwargs):
+ ml = runtests.MessageLogger(logger, **kwargs)
+
+ # Create a convenience function for faking incoming log messages.
+ ml.fake_message = types.MethodType(fake_message, ml)
+ return ml
+
+ return inner
+
+
+@pytest.fixture
+def get_lines(logger):
+ buf = logger.handlers[0].stream
+
+ def inner():
+ lines = buf.getvalue().splitlines()
+ buf.truncate(0)
+ # Python3 will not reposition the buffer position after
+ # truncate and will extend the buffer with null bytes.
+ # Force the buffer position to the start of the buffer
+ # to prevent null bytes from creeping in.
+ buf.seek(0)
+ return lines
+
+ return inner
+
+
+@pytest.fixture
+def assert_actions(get_lines):
+ def inner(expected):
+ if isinstance(expected, string_types):
+ expected = [expected]
+
+ lines = get_lines()
+ actions = [json.loads(l)["action"] for l in lines]
+ assert actions == expected
+
+ return inner
+
+
+def test_buffering_on(get_message_logger, assert_actions):
+ ml = get_message_logger(buffering=True)
+
+ # no buffering initially (outside of test)
+ ml.fake_message("log")
+ assert_actions(["log"])
+
+ # inside a test buffering is enabled, only 'test_start' logged
+ ml.fake_message("test_start")
+ ml.fake_message("test_status")
+ ml.fake_message("log")
+ assert_actions(["test_start"])
+
+ # buffering turned off manually within a test
+ ml.fake_message("buffering_off")
+ ml.fake_message("test_status")
+ ml.fake_message("log")
+ assert_actions(["test_status", "log"])
+
+ # buffering turned back on again
+ ml.fake_message("buffering_on")
+ ml.fake_message("test_status")
+ ml.fake_message("log")
+ assert_actions([])
+
+ # test end, it failed! All previsouly buffered messages are now logged.
+ ml.fake_message("test_end", status="FAIL")
+ assert_actions(
+ [
+ "log", # "Buffered messages logged"
+ "test_status",
+ "log",
+ "test_status",
+ "log",
+ "log", # "Buffered messages finished"
+ "test_end",
+ ]
+ )
+
+ # enabling buffering outside of a test has no affect
+ ml.fake_message("buffering_on")
+ ml.fake_message("log")
+ ml.fake_message("test_status")
+ assert_actions(["log", "test_status"])
+
+
+def test_buffering_off(get_message_logger, assert_actions):
+ ml = get_message_logger(buffering=False)
+
+ ml.fake_message("test_start")
+ assert_actions(["test_start"])
+
+ # messages logged no matter what the state
+ ml.fake_message("test_status")
+ ml.fake_message("buffering_off")
+ ml.fake_message("log")
+ assert_actions(["test_status", "log"])
+
+ # even after a 'buffering_on' action
+ ml.fake_message("buffering_on")
+ ml.fake_message("test_status")
+ ml.fake_message("log")
+ assert_actions(["test_status", "log"])
+
+ # no buffer to empty on test fail
+ ml.fake_message("test_end", status="FAIL")
+ assert_actions(["test_end"])
+
+
+@pytest.mark.parametrize(
+ "name,expected",
+ (
+ ("/tests/test_foo.html", "test_foo.html"),
+ ("chrome://mochitests/content/a11y/test_foo.html", "test_foo.html"),
+ ("chrome://mochitests/content/browser/test_foo.html", "test_foo.html"),
+ ("chrome://mochitests/content/chrome/test_foo.html", "test_foo.html"),
+ (
+ "https://example.org:443/tests/netwerk/test_foo.html",
+ "netwerk/test_foo.html",
+ ),
+ ("http://mochi.test:8888/tests/test_foo.html", "test_foo.html"),
+ ("http://mochi.test:8888/content/dom/browser/test_foo.html", None),
+ ),
+)
+def test_test_names_fixed_to_be_relative(name, expected, get_message_logger, get_lines):
+ ml = get_message_logger(buffering=False)
+ ml.fake_message("test_start", test=name)
+ lines = get_lines()
+
+ if expected is None:
+ expected = name
+ assert json.loads(lines[0])["test"] == expected
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/mochitest/tests/python/test_mochitest_integration.py b/testing/mochitest/tests/python/test_mochitest_integration.py
new file mode 100644
index 0000000000..e2ae89bdaf
--- /dev/null
+++ b/testing/mochitest/tests/python/test_mochitest_integration.py
@@ -0,0 +1,314 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from functools import partial
+
+import mozunit
+import pytest
+from conftest import setup_args
+from manifestparser import TestManifest
+from mozharness.base.log import ERROR, INFO, WARNING
+from mozharness.mozilla.automation import TBPL_FAILURE, TBPL_SUCCESS, TBPL_WARNING
+from moztest.selftest.output import filter_action, get_mozharness_status
+
+here = os.path.abspath(os.path.dirname(__file__))
+get_mozharness_status = partial(get_mozharness_status, "mochitest")
+
+
+@pytest.fixture
+def test_name(request):
+ flavor = request.getfixturevalue("flavor")
+
+ def inner(name):
+ if flavor == "plain":
+ return f"test_{name}.html"
+ elif flavor == "browser-chrome":
+ return f"browser_{name}.js"
+
+ return inner
+
+
+@pytest.fixture
+def test_manifest(setup_test_harness, request):
+ flavor = request.getfixturevalue("flavor")
+ test_root = setup_test_harness(*setup_args, flavor=flavor)
+ assert test_root
+
+ def inner(manifestFileNames):
+ return TestManifest(
+ manifests=[os.path.join(test_root, name) for name in manifestFileNames],
+ strict=False,
+ rootdir=test_root,
+ )
+
+ return inner
+
+
+@pytest.mark.parametrize(
+ "flavor,manifest",
+ [
+ ("plain", "mochitest-args.ini"),
+ ("browser-chrome", "browser-args.ini"),
+ ],
+)
+def test_output_extra_args(flavor, manifest, runtests, test_manifest, test_name):
+ # Explicitly provide a manifestFile property that includes the
+ # manifest file that contains command line arguments.
+ extra_opts = {
+ "manifestFile": test_manifest([manifest]),
+ "runByManifest": True,
+ }
+
+ results = {
+ "status": 0,
+ "tbpl_status": TBPL_SUCCESS,
+ "log_level": (INFO, WARNING),
+ }
+
+ status, lines = runtests(test_name("pass"), **extra_opts)
+ assert status == results["status"]
+
+ tbpl_status, log_level, _ = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level in results["log_level"]
+
+ # Filter log entries for the application command including the used
+ # command line arguments.
+ lines = filter_action("log", lines)
+ command = next(
+ l["message"] for l in lines if l["message"].startswith("Application command")
+ )
+ assert "--headless --window-size 800,600 --new-tab http://example.org" in command
+
+
+@pytest.mark.parametrize("runFailures", ["selftest", ""])
+@pytest.mark.parametrize("flavor", ["plain", "browser-chrome"])
+def test_output_pass(flavor, runFailures, runtests, test_name):
+ extra_opts = {}
+ results = {
+ "status": 1 if runFailures else 0,
+ "tbpl_status": TBPL_WARNING if runFailures else TBPL_SUCCESS,
+ "log_level": (INFO, WARNING),
+ "lines": 2 if runFailures else 1,
+ "line_status": "PASS",
+ }
+ if runFailures:
+ extra_opts["runFailures"] = runFailures
+ extra_opts["crashAsPass"] = True
+ extra_opts["timeoutAsPass"] = True
+
+ status, lines = runtests(test_name("pass"), **extra_opts)
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level in results["log_level"]
+
+ lines = filter_action("test_status", lines)
+ assert len(lines) == results["lines"]
+ assert lines[0]["status"] == results["line_status"]
+
+
+@pytest.mark.parametrize("runFailures", ["selftest", ""])
+@pytest.mark.parametrize("flavor", ["plain", "browser-chrome"])
+def test_output_fail(flavor, runFailures, runtests, test_name):
+ extra_opts = {}
+ results = {
+ "status": 0 if runFailures else 1,
+ "tbpl_status": TBPL_SUCCESS if runFailures else TBPL_WARNING,
+ "log_level": (INFO, WARNING),
+ "lines": 1,
+ "line_status": "PASS" if runFailures else "FAIL",
+ }
+ if runFailures:
+ extra_opts["runFailures"] = runFailures
+ extra_opts["crashAsPass"] = True
+ extra_opts["timeoutAsPass"] = True
+
+ status, lines = runtests(test_name("fail"), **extra_opts)
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level in results["log_level"]
+
+ lines = filter_action("test_status", lines)
+ assert len(lines) == results["lines"]
+ assert lines[0]["status"] == results["line_status"]
+
+
+@pytest.mark.skip_mozinfo("!crashreporter")
+@pytest.mark.parametrize("runFailures", ["selftest", ""])
+@pytest.mark.parametrize("flavor", ["plain", "browser-chrome"])
+def test_output_crash(flavor, runFailures, runtests, test_name):
+ extra_opts = {}
+ results = {
+ "status": 0 if runFailures else 1,
+ "tbpl_status": TBPL_FAILURE,
+ "log_level": ERROR,
+ "lines": 1,
+ }
+ if runFailures:
+ extra_opts["runFailures"] = runFailures
+ extra_opts["crashAsPass"] = True
+ extra_opts["timeoutAsPass"] = True
+ # bug 1443327 - we do not set MOZ_CRASHREPORTER_SHUTDOWN for browser-chrome
+ # the error regex's don't pick this up as a failure
+ if flavor == "browser-chrome":
+ results["tbpl_status"] = TBPL_SUCCESS
+ results["log_level"] = (INFO, WARNING)
+
+ status, lines = runtests(
+ test_name("crash"), environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"], **extra_opts
+ )
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level in results["log_level"]
+
+ if not runFailures:
+ crash = filter_action("crash", lines)
+ assert len(crash) == 1
+ assert crash[0]["action"] == "crash"
+ assert crash[0]["signature"]
+ assert crash[0]["minidump_path"]
+
+ lines = filter_action("test_end", lines)
+ assert len(lines) == results["lines"]
+
+
+@pytest.mark.skip_mozinfo("!asan")
+@pytest.mark.parametrize("runFailures", [""])
+@pytest.mark.parametrize("flavor", ["plain"])
+def test_output_asan(flavor, runFailures, runtests, test_name):
+ extra_opts = {}
+ results = {
+ "status": 245,
+ "tbpl_status": TBPL_FAILURE,
+ "log_level": ERROR,
+ "lines": 0,
+ }
+
+ status, lines = runtests(
+ test_name("crash"), environment=["MOZ_CRASHREPORTER_SHUTDOWN=1"], **extra_opts
+ )
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level == results["log_level"]
+
+ crash = filter_action("crash", lines)
+ assert len(crash) == results["lines"]
+
+ process_output = filter_action("process_output", lines)
+ assert any("ERROR: AddressSanitizer" in l["data"] for l in process_output)
+
+
+@pytest.mark.skip_mozinfo("!debug")
+@pytest.mark.parametrize("runFailures", [""])
+@pytest.mark.parametrize("flavor", ["plain"])
+def test_output_assertion(flavor, runFailures, runtests, test_name):
+ extra_opts = {}
+ results = {
+ "status": 0,
+ "tbpl_status": TBPL_WARNING,
+ "log_level": WARNING,
+ "lines": 1,
+ "assertions": 1,
+ }
+
+ status, lines = runtests(test_name("assertion"), **extra_opts)
+ # TODO: mochitest should return non-zero here
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level == results["log_level"]
+
+ test_end = filter_action("test_end", lines)
+ assert len(test_end) == results["lines"]
+ # TODO: this should be ASSERT, but moving the assertion check before
+ # the test_end action caused a bunch of failures.
+ assert test_end[0]["status"] == "OK"
+
+ assertions = filter_action("assertion_count", lines)
+ assert len(assertions) == results["assertions"]
+ assert assertions[0]["count"] == results["assertions"]
+
+
+@pytest.mark.skip_mozinfo("!debug")
+@pytest.mark.parametrize("runFailures", [""])
+@pytest.mark.parametrize("flavor", ["plain"])
+def test_output_leak(flavor, runFailures, runtests, test_name):
+ extra_opts = {}
+ results = {"status": 0, "tbpl_status": TBPL_WARNING, "log_level": WARNING}
+
+ status, lines = runtests(test_name("leak"), **extra_opts)
+ # TODO: mochitest should return non-zero here
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level == results["log_level"]
+
+ leak_totals = filter_action("mozleak_total", lines)
+ found_leaks = False
+ for lt in leak_totals:
+ if lt["bytes"] == 0:
+ # No leaks in this process.
+ assert len(lt["objects"]) == 0
+ continue
+
+ assert not found_leaks, "Only one process should have leaked"
+ found_leaks = True
+ assert lt["process"] == "tab"
+ assert lt["bytes"] == 1
+ assert lt["objects"] == ["IntentionallyLeakedObject"]
+
+ assert found_leaks, "At least one process should have leaked"
+
+
+@pytest.mark.parametrize("flavor", ["plain"])
+def test_output_testfile_in_dupe_manifests(flavor, runtests, test_name, test_manifest):
+ results = {
+ "status": 0,
+ "tbpl_status": TBPL_SUCCESS,
+ "log_level": (INFO, WARNING),
+ "line_status": "PASS",
+ # We expect the test to be executed exactly 2 times,
+ # once for each manifest where the test file has been included.
+ "lines": 2,
+ }
+
+ # Explicitly provide a manifestFile property that includes the
+ # two manifest files that share the same test file.
+ extra_opts = {
+ "manifestFile": test_manifest(
+ [
+ "mochitest-dupemanifest-1.ini",
+ "mochitest-dupemanifest-2.ini",
+ ]
+ ),
+ "runByManifest": True,
+ }
+
+ # Execute mochitest by explicitly request the test file listed
+ # in two manifest files to be executed.
+ status, lines = runtests(test_name("pass"), **extra_opts)
+ assert status == results["status"]
+
+ tbpl_status, log_level, summary = get_mozharness_status(lines, status)
+ assert tbpl_status == results["tbpl_status"]
+ assert log_level in results["log_level"]
+
+ lines = filter_action("test_status", lines)
+ assert len(lines) == results["lines"]
+ assert lines[0]["status"] == results["line_status"]
+
+
+if __name__ == "__main__":
+ mozunit.main()