summaryrefslogtreecommitdiffstats
path: root/testing/marionette/harness/marionette_harness/tests/harness_unit
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /testing/marionette/harness/marionette_harness/tests/harness_unit
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/marionette/harness/marionette_harness/tests/harness_unit')
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py99
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/python.toml14
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/test_httpd.py92
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_arguments.py80
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_harness.py110
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py541
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_test_result.py55
-rw-r--r--testing/marionette/harness/marionette_harness/tests/harness_unit/test_serve.py69
8 files changed, 1060 insertions, 0 deletions
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py
new file mode 100644
index 0000000000..43951b2c04
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py
@@ -0,0 +1,99 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import pytest
+
+from unittest.mock import Mock, MagicMock
+
+from marionette_driver.marionette import Marionette
+
+from marionette_harness.runner.httpd import FixtureServer
+
+
+@pytest.fixture(scope="module")
+def logger():
+ """
+ Fake logger to help with mocking out other runner-related classes.
+ """
+ import mozlog
+
+ return Mock(spec=mozlog.structuredlog.StructuredLogger)
+
+
+@pytest.fixture
+def mach_parsed_kwargs(logger):
+ """
+ Parsed and verified dictionary used during simplest
+ call to mach marionette-test
+ """
+ return {
+ "adb_path": None,
+ "addons": None,
+ "address": None,
+ "app": None,
+ "app_args": [],
+ "avd": None,
+ "avd_home": None,
+ "binary": "/path/to/firefox",
+ "browsermob_port": None,
+ "browsermob_script": None,
+ "device_serial": None,
+ "emulator": False,
+ "emulator_bin": None,
+ "gecko_log": None,
+ "jsdebugger": False,
+ "log_errorsummary": None,
+ "log_html": None,
+ "log_mach": None,
+ "log_mach_buffer": None,
+ "log_mach_level": None,
+ "log_mach_verbose": None,
+ "log_raw": None,
+ "log_raw_level": None,
+ "log_tbpl": None,
+ "log_tbpl_buffer": None,
+ "log_tbpl_compact": None,
+ "log_tbpl_level": None,
+ "log_unittest": None,
+ "log_xunit": None,
+ "logger_name": "Marionette-based Tests",
+ "prefs": {},
+ "prefs_args": None,
+ "prefs_files": None,
+ "profile": None,
+ "pydebugger": None,
+ "repeat": None,
+ "run_until_failure": None,
+ "server_root": None,
+ "shuffle": False,
+ "shuffle_seed": 2276870381009474531,
+ "socket_timeout": 60.0,
+ "startup_timeout": 60,
+ "symbols_path": None,
+ "test_tags": None,
+ "tests": ["/path/to/unit-tests.toml"],
+ "testvars": None,
+ "this_chunk": None,
+ "timeout": None,
+ "total_chunks": None,
+ "verbose": None,
+ "workspace": None,
+ "logger": logger,
+ }
+
+
+@pytest.fixture
+def mock_httpd(request):
+ """Mock httpd instance"""
+ httpd = MagicMock(spec=FixtureServer)
+ return httpd
+
+
+@pytest.fixture
+def mock_marionette(request):
+ """Mock marionette instance"""
+ marionette = MagicMock(spec=dir(Marionette()))
+ if "has_crashed" in request.fixturenames:
+ marionette.check_for_crash.return_value = request.getfixturevalue("has_crashed")
+ return marionette
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/python.toml b/testing/marionette/harness/marionette_harness/tests/harness_unit/python.toml
new file mode 100644
index 0000000000..7ae7a32440
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/python.toml
@@ -0,0 +1,14 @@
+[DEFAULT]
+subsuite = "marionette-harness"
+
+["test_httpd.py"]
+
+["test_marionette_arguments.py"]
+
+["test_marionette_harness.py"]
+
+["test_marionette_runner.py"]
+
+["test_marionette_test_result.py"]
+
+["test_serve.py"]
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_httpd.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_httpd.py
new file mode 100644
index 0000000000..b62e731ff1
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_httpd.py
@@ -0,0 +1,92 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+import types
+
+import six
+from six.moves.urllib_request import urlopen
+
+import mozunit
+import pytest
+
+from wptserve.handlers import json_handler
+
+from marionette_harness.runner import httpd
+
+here = os.path.abspath(os.path.dirname(__file__))
+parent = os.path.dirname(here)
+default_doc_root = os.path.join(os.path.dirname(parent), "www")
+
+
+@pytest.fixture
+def server():
+ server = httpd.FixtureServer(default_doc_root)
+ yield server
+ server.stop()
+
+
+def test_ctor():
+ with pytest.raises(ValueError):
+ httpd.FixtureServer("foo")
+ httpd.FixtureServer(default_doc_root)
+
+
+def test_start_stop(server):
+ server.start()
+ server.stop()
+
+
+def test_get_url(server):
+ server.start()
+ url = server.get_url("/")
+ assert isinstance(url, six.string_types)
+ assert "http://" in url
+
+ server.stop()
+ with pytest.raises(httpd.NotAliveError):
+ server.get_url("/")
+
+
+def test_doc_root(server):
+ server.start()
+ assert isinstance(server.doc_root, six.string_types)
+ server.stop()
+ assert isinstance(server.doc_root, six.string_types)
+
+
+def test_router(server):
+ assert server.router is not None
+
+
+def test_routes(server):
+ assert server.routes is not None
+
+
+def test_is_alive(server):
+ assert server.is_alive == False
+ server.start()
+ assert server.is_alive == True
+
+
+def test_handler(server):
+ counter = 0
+
+ @json_handler
+ def handler(request, response):
+ return {"count": counter}
+
+ route = ("GET", "/httpd/test_handler", handler)
+ server.router.register(*route)
+ server.start()
+
+ url = server.get_url("/httpd/test_handler")
+ body = urlopen(url).read()
+ res = json.loads(body)
+ assert res["count"] == counter
+
+
+if __name__ == "__main__":
+ mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_arguments.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_arguments.py
new file mode 100644
index 0000000000..b640741a6f
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_arguments.py
@@ -0,0 +1,80 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+
+from marionette_harness.runtests import MarionetteArguments, MarionetteTestRunner
+
+
+@pytest.mark.parametrize("socket_timeout", ["A", "10", "1B-", "1C2", "44.35"])
+def test_parse_arg_socket_timeout(socket_timeout):
+ argv = ["marionette", "--socket-timeout", socket_timeout]
+ parser = MarionetteArguments()
+
+ def _is_float_convertible(value):
+ try:
+ float(value)
+ return True
+ except ValueError:
+ return False
+
+ if not _is_float_convertible(socket_timeout):
+ with pytest.raises(SystemExit) as ex:
+ parser.parse_args(args=argv)
+ assert ex.value.code == 2
+ else:
+ args = parser.parse_args(args=argv)
+ assert hasattr(args, "socket_timeout") and args.socket_timeout == float(
+ socket_timeout
+ )
+
+
+@pytest.mark.parametrize(
+ "arg_name, arg_dest, arg_value, expected_value",
+ [
+ ("app-arg", "app_args", "samplevalue", ["samplevalue"]),
+ ("symbols-path", "symbols_path", "samplevalue", "samplevalue"),
+ ("gecko-log", "gecko_log", "samplevalue", "samplevalue"),
+ ("app", "app", "samplevalue", "samplevalue"),
+ ],
+)
+def test_parsing_optional_arguments(
+ mach_parsed_kwargs, arg_name, arg_dest, arg_value, expected_value
+):
+ parser = MarionetteArguments()
+ parsed_args = parser.parse_args(["--" + arg_name, arg_value])
+ result = vars(parsed_args)
+ assert result.get(arg_dest) == expected_value
+ mach_parsed_kwargs[arg_dest] = result[arg_dest]
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+ built_kwargs = runner._build_kwargs()
+ assert built_kwargs[arg_dest] == expected_value
+
+
+@pytest.mark.parametrize(
+ "arg_name, arg_dest, arg_value, expected_value",
+ [
+ ("adb", "adb_path", "samplevalue", "samplevalue"),
+ ("avd", "avd", "samplevalue", "samplevalue"),
+ ("avd-home", "avd_home", "samplevalue", "samplevalue"),
+ ("package", "package_name", "samplevalue", "samplevalue"),
+ ],
+)
+def test_parse_opt_args_emulator(
+ mach_parsed_kwargs, arg_name, arg_dest, arg_value, expected_value
+):
+ parser = MarionetteArguments()
+ parsed_args = parser.parse_args(["--" + arg_name, arg_value])
+ result = vars(parsed_args)
+ assert result.get(arg_dest) == expected_value
+ mach_parsed_kwargs[arg_dest] = result[arg_dest]
+ mach_parsed_kwargs["emulator"] = True
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+ built_kwargs = runner._build_kwargs()
+ assert built_kwargs[arg_dest] == expected_value
+
+
+if __name__ == "__main__":
+ mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_harness.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_harness.py
new file mode 100644
index 0000000000..b528594381
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_harness.py
@@ -0,0 +1,110 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import mozunit
+import pytest
+
+from unittest.mock import Mock, patch, sentinel
+
+import marionette_harness.marionette_test as marionette_test
+
+from marionette_harness.runtests import MarionetteTestRunner, MarionetteHarness, cli
+
+
+@pytest.fixture
+def harness_class(request):
+ """
+ Mock based on MarionetteHarness whose run method just returns a number of
+ failures according to the supplied test parameter
+ """
+ if "num_fails_crashed" in request.fixturenames:
+ num_fails_crashed = request.getfixturevalue("num_fails_crashed")
+ else:
+ num_fails_crashed = (0, 0)
+ harness_cls = Mock(spec=MarionetteHarness)
+ harness = harness_cls.return_value
+ if num_fails_crashed is None:
+ harness.run.side_effect = Exception
+ else:
+ harness.run.return_value = sum(num_fails_crashed)
+ return harness_cls
+
+
+@pytest.fixture
+def runner_class(request):
+ """
+ Mock based on MarionetteTestRunner, wherein the runner.failed,
+ runner.crashed attributes are provided by a test parameter
+ """
+ if "num_fails_crashed" in request.fixturenames:
+ failures, crashed = request.getfixturevalue("num_fails_crashed")
+ else:
+ failures = 0
+ crashed = 0
+ mock_runner_class = Mock(spec=MarionetteTestRunner)
+ runner = mock_runner_class.return_value
+ runner.failed = failures
+ runner.crashed = crashed
+ return mock_runner_class
+
+
+@pytest.mark.parametrize(
+ "num_fails_crashed,exit_code",
+ [((0, 0), 0), ((1, 0), 10), ((0, 1), 10), (None, 1)],
+)
+def test_cli_exit_code(num_fails_crashed, exit_code, harness_class):
+ with pytest.raises(SystemExit) as err:
+ cli(harness_class=harness_class)
+ assert err.value.code == exit_code
+
+
+@pytest.mark.parametrize("num_fails_crashed", [(0, 0), (1, 0), (1, 1)])
+def test_call_harness_with_parsed_args_yields_num_failures(
+ mach_parsed_kwargs, runner_class, num_fails_crashed
+):
+ with patch(
+ "marionette_harness.runtests.MarionetteHarness.parse_args"
+ ) as parse_args:
+ failed_or_crashed = MarionetteHarness(
+ runner_class, args=mach_parsed_kwargs
+ ).run()
+ parse_args.assert_not_called()
+ assert failed_or_crashed == sum(num_fails_crashed)
+
+
+def test_call_harness_with_no_args_yields_num_failures(runner_class):
+ with patch(
+ "marionette_harness.runtests.MarionetteHarness.parse_args",
+ return_value={"tests": []},
+ ) as parse_args:
+ failed_or_crashed = MarionetteHarness(runner_class).run()
+ assert parse_args.call_count == 1
+ assert failed_or_crashed == 0
+
+
+def test_args_passed_to_runner_class(mach_parsed_kwargs, runner_class):
+ arg_list = list(mach_parsed_kwargs.keys())
+ arg_list.remove("tests")
+ mach_parsed_kwargs.update([(a, getattr(sentinel, a)) for a in arg_list])
+ harness = MarionetteHarness(runner_class, args=mach_parsed_kwargs)
+ harness.process_args = Mock()
+ harness.run()
+ for arg in arg_list:
+ assert harness._runner_class.call_args[1][arg] is getattr(sentinel, arg)
+
+
+def test_harness_sets_up_default_test_handlers(mach_parsed_kwargs):
+ """
+ If the necessary TestCase is not in test_handlers,
+ tests are omitted silently
+ """
+ harness = MarionetteHarness(args=mach_parsed_kwargs)
+ mach_parsed_kwargs.pop("tests")
+ runner = harness._runner_class(**mach_parsed_kwargs)
+ assert marionette_test.MarionetteTestCase in runner.test_handlers
+
+
+if __name__ == "__main__":
+ mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py
new file mode 100644
index 0000000000..fc1a1c70ee
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py
@@ -0,0 +1,541 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import manifestparser
+import mozinfo
+import mozunit
+import pytest
+
+from unittest.mock import Mock, patch, mock_open, sentinel, DEFAULT
+
+from marionette_harness.runtests import MarionetteTestRunner
+
+
+@pytest.fixture
+def runner(mach_parsed_kwargs):
+ """
+ MarionetteTestRunner instance initialized with default options.
+ """
+ return MarionetteTestRunner(**mach_parsed_kwargs)
+
+
+@pytest.fixture
+def mock_runner(runner, mock_marionette, monkeypatch):
+ """
+ MarionetteTestRunner instance with mocked-out
+ self.marionette and other properties,
+ to enable testing runner.run_tests().
+ """
+ runner.driverclass = Mock(return_value=mock_marionette)
+ for attr in ["run_test", "_capabilities"]:
+ setattr(runner, attr, Mock())
+ runner._appName = "fake_app"
+ monkeypatch.setattr("marionette_harness.runner.base.mozversion", Mock())
+ return runner
+
+
+@pytest.fixture
+def build_kwargs_using(mach_parsed_kwargs):
+ """Helper function for test_build_kwargs_* functions"""
+
+ def kwarg_builder(new_items, return_socket=False):
+ mach_parsed_kwargs.update(new_items)
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+ with patch("marionette_harness.runner.base.socket") as socket:
+ built_kwargs = runner._build_kwargs()
+ if return_socket:
+ return built_kwargs, socket
+ return built_kwargs
+
+ return kwarg_builder
+
+
+@pytest.fixture
+def expected_driver_args(runner):
+ """Helper fixture for tests of _build_kwargs
+ with binary/emulator.
+ Provides a dictionary of certain arguments
+ related to binary/emulator settings
+ which we expect to be passed to the
+ driverclass constructor. Expected values can
+ be updated in tests as needed.
+ Provides convenience methods for comparing the
+ expected arguments to the argument dictionary
+ created by _build_kwargs."""
+
+ class ExpectedDict(dict):
+ def assert_matches(self, actual):
+ for k, v in self.items():
+ assert actual[k] == v
+
+ def assert_keys_not_in(self, actual):
+ for k in self.keys():
+ assert k not in actual
+
+ expected = ExpectedDict(host=None, port=None, bin=None)
+ for attr in ["app", "app_args", "profile", "addons", "gecko_log"]:
+ expected[attr] = getattr(runner, attr)
+ return expected
+
+
+class ManifestFixture:
+ def __init__(
+ self,
+ name="mock_manifest",
+ tests=[{"path": "test_something.py", "expected": "pass"}],
+ ):
+ self.filepath = "/path/to/fake/manifest.toml"
+ self.n_disabled = len([t for t in tests if "disabled" in t])
+ self.n_enabled = len(tests) - self.n_disabled
+ mock_manifest = Mock(
+ spec=manifestparser.TestManifest, active_tests=Mock(return_value=tests)
+ )
+ self.manifest_class = Mock(return_value=mock_manifest)
+ self.__repr__ = lambda: "<ManifestFixture {}>".format(name)
+
+
+@pytest.fixture
+def manifest():
+ return ManifestFixture()
+
+
+@pytest.fixture(params=["enabled", "disabled", "enabled_disabled", "empty"])
+def manifest_with_tests(request):
+ """
+ Fixture for the contents of mock_manifest, where a manifest
+ can include enabled tests, disabled tests, both, or neither (empty)
+ """
+ included = []
+ if "enabled" in request.param:
+ included += [
+ ("test_expected_pass.py", "pass"),
+ ("test_expected_fail.py", "fail"),
+ ]
+ if "disabled" in request.param:
+ included += [
+ ("test_pass_disabled.py", "pass", "skip-if: true"),
+ ("test_fail_disabled.py", "fail", "skip-if: true"),
+ ]
+ keys = ("path", "expected", "disabled")
+ active_tests = [dict(list(zip(keys, values))) for values in included]
+
+ return ManifestFixture(request.param, active_tests)
+
+
+def test_args_passed_to_driverclass(mock_runner):
+ built_kwargs = {"arg1": "value1", "arg2": "value2"}
+ mock_runner._build_kwargs = Mock(return_value=built_kwargs)
+ with pytest.raises(IOError):
+ mock_runner.run_tests(["fake_tests.toml"])
+ assert mock_runner.driverclass.call_args[1] == built_kwargs
+
+
+def test_build_kwargs_basic_args(build_kwargs_using):
+ """Test the functionality of runner._build_kwargs:
+ make sure that basic arguments (those which should
+ always be included, irrespective of the runner's settings)
+ get passed to the call to runner.driverclass"""
+
+ basic_args = [
+ "socket_timeout",
+ "prefs",
+ "startup_timeout",
+ "verbose",
+ "symbols_path",
+ ]
+ args_dict = {a: getattr(sentinel, a) for a in basic_args}
+ # Mock an update method to work with calls to MarionetteTestRunner()
+ args_dict["prefs"].update = Mock(return_value={})
+ built_kwargs = build_kwargs_using([(a, getattr(sentinel, a)) for a in basic_args])
+ for arg in basic_args:
+ assert built_kwargs[arg] is getattr(sentinel, arg)
+
+
+@pytest.mark.parametrize("workspace", ["path/to/workspace", None])
+def test_build_kwargs_with_workspace(build_kwargs_using, workspace):
+ built_kwargs = build_kwargs_using({"workspace": workspace})
+ if workspace:
+ assert built_kwargs["workspace"] == workspace
+ else:
+ assert "workspace" not in built_kwargs
+
+
+@pytest.mark.parametrize("address", ["host:123", None])
+def test_build_kwargs_with_address(build_kwargs_using, address):
+ built_kwargs, socket = build_kwargs_using(
+ {"address": address, "binary": None, "emulator": None}, return_socket=True
+ )
+ assert "connect_to_running_emulator" not in built_kwargs
+ if address is not None:
+ host, port = address.split(":")
+ assert built_kwargs["host"] == host and built_kwargs["port"] == int(port)
+ socket.socket().connect.assert_called_with((host, int(port)))
+ assert socket.socket().close.called
+ else:
+ assert not socket.socket.called
+
+
+@pytest.mark.parametrize("address", ["host:123", None])
+@pytest.mark.parametrize("binary", ["path/to/bin", None])
+def test_build_kwargs_with_binary_or_address(
+ expected_driver_args, build_kwargs_using, binary, address
+):
+ built_kwargs = build_kwargs_using(
+ {"binary": binary, "address": address, "emulator": None}
+ )
+ if binary:
+ expected_driver_args["bin"] = binary
+ if address:
+ host, port = address.split(":")
+ expected_driver_args.update({"host": host, "port": int(port)})
+ else:
+ expected_driver_args.update({"host": "127.0.0.1", "port": 2828})
+ expected_driver_args.assert_matches(built_kwargs)
+ elif address is None:
+ expected_driver_args.assert_keys_not_in(built_kwargs)
+
+
+@pytest.mark.parametrize("address", ["host:123", None])
+@pytest.mark.parametrize("emulator", [True, False, None])
+def test_build_kwargs_with_emulator_or_address(
+ expected_driver_args, build_kwargs_using, emulator, address
+):
+ emulator_props = [
+ (a, getattr(sentinel, a)) for a in ["avd_home", "adb_path", "emulator_bin"]
+ ]
+ built_kwargs = build_kwargs_using(
+ [("emulator", emulator), ("address", address), ("binary", None)]
+ + emulator_props
+ )
+ if emulator:
+ expected_driver_args.update(emulator_props)
+ expected_driver_args["emulator_binary"] = expected_driver_args.pop(
+ "emulator_bin"
+ )
+ expected_driver_args["bin"] = True
+ if address:
+ expected_driver_args["connect_to_running_emulator"] = True
+ host, port = address.split(":")
+ expected_driver_args.update({"host": host, "port": int(port)})
+ else:
+ expected_driver_args.update({"host": "127.0.0.1", "port": 2828})
+ assert "connect_to_running_emulator" not in built_kwargs
+ expected_driver_args.assert_matches(built_kwargs)
+ elif not address:
+ expected_driver_args.assert_keys_not_in(built_kwargs)
+
+
+def test_parsing_testvars(mach_parsed_kwargs):
+ mach_parsed_kwargs.pop("tests")
+ testvars_json_loads = [
+ {"wifi": {"ssid": "blah", "keyManagement": "WPA-PSK", "psk": "foo"}},
+ {"wifi": {"PEAP": "bar"}, "device": {"stuff": "buzz"}},
+ ]
+ expected_dict = {
+ "wifi": {
+ "ssid": "blah",
+ "keyManagement": "WPA-PSK",
+ "psk": "foo",
+ "PEAP": "bar",
+ },
+ "device": {"stuff": "buzz"},
+ }
+ with patch(
+ "marionette_harness.runtests.MarionetteTestRunner._load_testvars",
+ return_value=testvars_json_loads,
+ ) as load:
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+ assert runner.testvars == expected_dict
+ assert load.call_count == 1
+
+
+def test_load_testvars_throws_expected_errors(mach_parsed_kwargs):
+ mach_parsed_kwargs["testvars"] = ["some_bad_path.json"]
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+ with pytest.raises(IOError) as io_exc:
+ runner._load_testvars()
+ assert "does not exist" in str(io_exc.value)
+ with patch("os.path.exists", return_value=True):
+ with patch(
+ "marionette_harness.runner.base.open",
+ mock_open(read_data="[not {valid JSON]"),
+ ):
+ with pytest.raises(Exception) as json_exc:
+ runner._load_testvars()
+ assert "not properly formatted" in str(json_exc.value)
+
+
+def _check_crash_counts(has_crashed, runner, mock_marionette):
+ if has_crashed:
+ assert mock_marionette.check_for_crash.call_count == 1
+ assert runner.crashed == 1
+ else:
+ assert runner.crashed == 0
+
+
+@pytest.mark.parametrize("has_crashed", [True, False])
+def test_increment_crash_count_in_run_test_set(runner, has_crashed, mock_marionette):
+ fake_tests = [{"filepath": i, "expected": "pass"} for i in "abc"]
+
+ with patch.multiple(runner, run_test=DEFAULT, marionette=mock_marionette):
+ runner.run_test_set(fake_tests)
+ if not has_crashed:
+ assert runner.marionette.check_for_crash.call_count == len(fake_tests)
+ _check_crash_counts(has_crashed, runner, runner.marionette)
+
+
+@pytest.mark.parametrize("has_crashed", [True, False])
+def test_record_crash(runner, has_crashed, mock_marionette):
+ with patch.object(runner, "marionette", mock_marionette):
+ assert runner.record_crash() == has_crashed
+ _check_crash_counts(has_crashed, runner, runner.marionette)
+
+
+def test_add_test_module(runner):
+ tests = ["test_something.py", "testSomething.js", "bad_test.py"]
+ assert len(runner.tests) == 0
+ for test in tests:
+ with patch("os.path.abspath", return_value=test) as abspath:
+ runner.add_test(test)
+ assert abspath.called
+ expected = {"filepath": test, "expected": "pass", "group": "default"}
+ assert expected in runner.tests
+ # add_test doesn't validate module names; 'bad_test.py' gets through
+ assert len(runner.tests) == 3
+
+
+def test_add_test_directory(runner):
+ test_dir = "path/to/tests"
+ dir_contents = [
+ (test_dir, ("subdir",), ("test_a.py", "bad_test_a.py")),
+ (test_dir + "/subdir", (), ("test_b.py", "bad_test_b.py")),
+ ]
+ tests = list(dir_contents[0][2] + dir_contents[1][2])
+ assert len(runner.tests) == 0
+ # Need to use side effect to make isdir return True for test_dir and False for tests
+ with patch("os.path.isdir", side_effect=[True] + [False for t in tests]) as isdir:
+ with patch("os.walk", return_value=dir_contents) as walk:
+ runner.add_test(test_dir)
+ assert isdir.called and walk.called
+ for test in runner.tests:
+ assert os.path.normpath(test_dir) in test["filepath"]
+ assert len(runner.tests) == 2
+
+
+@pytest.mark.parametrize("test_files_exist", [True, False])
+def test_add_test_manifest(
+ mock_runner, manifest_with_tests, monkeypatch, test_files_exist
+):
+ monkeypatch.setattr(
+ "marionette_harness.runner.base.TestManifest",
+ manifest_with_tests.manifest_class,
+ )
+ mock_runner.marionette = mock_runner.driverclass()
+ with patch(
+ "marionette_harness.runner.base.os.path.exists", return_value=test_files_exist
+ ):
+ if test_files_exist or manifest_with_tests.n_enabled == 0:
+ mock_runner.add_test(manifest_with_tests.filepath)
+ assert len(mock_runner.tests) == manifest_with_tests.n_enabled
+ assert (
+ len(mock_runner.manifest_skipped_tests)
+ == manifest_with_tests.n_disabled
+ )
+ for test in mock_runner.tests:
+ assert test["filepath"].endswith(test["expected"] + ".py")
+ else:
+ with pytest.raises(IOError):
+ mock_runner.add_test(manifest_with_tests.filepath)
+
+ assert manifest_with_tests.manifest_class().read.called
+ assert manifest_with_tests.manifest_class().active_tests.called
+
+
+def get_kwargs_passed_to_manifest(mock_runner, manifest, monkeypatch, **kwargs):
+ """Helper function for test_manifest_* tests.
+ Returns the kwargs passed to the call to manifest.active_tests."""
+ monkeypatch.setattr(
+ "marionette_harness.runner.base.TestManifest", manifest.manifest_class
+ )
+ monkeypatch.setitem(mozinfo.info, "mozinfo_key", "mozinfo_val")
+ for attr in kwargs:
+ setattr(mock_runner, attr, kwargs[attr])
+ mock_runner.marionette = mock_runner.driverclass()
+ with patch("marionette_harness.runner.base.os.path.exists", return_value=True):
+ mock_runner.add_test(manifest.filepath)
+ call_args, call_kwargs = manifest.manifest_class().active_tests.call_args
+ return call_kwargs
+
+
+def test_manifest_basic_args(mock_runner, manifest, monkeypatch):
+ kwargs = get_kwargs_passed_to_manifest(mock_runner, manifest, monkeypatch)
+ assert kwargs["exists"] is False
+ assert kwargs["disabled"] is True
+ assert kwargs["appname"] == "fake_app"
+ assert "mozinfo_key" in kwargs and kwargs["mozinfo_key"] == "mozinfo_val"
+
+
+@pytest.mark.parametrize("test_tags", (None, ["tag", "tag2"]))
+def test_manifest_with_test_tags(mock_runner, manifest, monkeypatch, test_tags):
+ kwargs = get_kwargs_passed_to_manifest(
+ mock_runner, manifest, monkeypatch, test_tags=test_tags
+ )
+ if test_tags is None:
+ assert kwargs["filters"] == []
+ else:
+ assert len(kwargs["filters"]) == 1 and kwargs["filters"][0].tags == test_tags
+
+
+def test_cleanup_with_manifest(mock_runner, manifest_with_tests, monkeypatch):
+ monkeypatch.setattr(
+ "marionette_harness.runner.base.TestManifest",
+ manifest_with_tests.manifest_class,
+ )
+ if manifest_with_tests.n_enabled > 0:
+ context = patch(
+ "marionette_harness.runner.base.os.path.exists", return_value=True
+ )
+ else:
+ context = pytest.raises(Exception)
+ with context:
+ mock_runner.run_tests([manifest_with_tests.filepath])
+ assert mock_runner.marionette is None
+ assert mock_runner.fixture_servers == {}
+
+
+def test_reset_test_stats(mock_runner):
+ def reset_successful(runner):
+ stats = [
+ "passed",
+ "failed",
+ "unexpected_successes",
+ "todo",
+ "skipped",
+ "failures",
+ ]
+ return all([((s in vars(runner)) and (not vars(runner)[s])) for s in stats])
+
+ assert reset_successful(mock_runner)
+ mock_runner.passed = 1
+ mock_runner.failed = 1
+ mock_runner.failures.append(["TEST-UNEXPECTED-FAIL"])
+ assert not reset_successful(mock_runner)
+ mock_runner.run_tests(["test_fake_thing.py"])
+ assert reset_successful(mock_runner)
+
+
+def test_initialize_test_run(mock_runner):
+ tests = ["test_fake_thing.py"]
+ mock_runner.reset_test_stats = Mock()
+ mock_runner.run_tests(tests)
+ assert mock_runner.reset_test_stats.called
+ with pytest.raises(AssertionError) as test_exc:
+ mock_runner.run_tests([])
+ assert "len(tests)" in str(test_exc.traceback[-1].statement)
+ with pytest.raises(AssertionError) as hndl_exc:
+ mock_runner.test_handlers = []
+ mock_runner.run_tests(tests)
+ assert "test_handlers" in str(hndl_exc.traceback[-1].statement)
+ assert mock_runner.reset_test_stats.call_count == 1
+
+
+def test_add_tests(mock_runner):
+ assert len(mock_runner.tests) == 0
+ fake_tests = ["test_" + i + ".py" for i in "abc"]
+ mock_runner.run_tests(fake_tests)
+ assert len(mock_runner.tests) == 3
+ for test_name, added_test in zip(fake_tests, mock_runner.tests):
+ assert added_test["filepath"].endswith(test_name)
+
+
+def test_repeat(mock_runner):
+ def update_result(test, expected):
+ mock_runner.failed += 1
+
+ fake_tests = ["test_1.py"]
+ mock_runner.repeat = 4
+ mock_runner.run_test = Mock(side_effect=update_result)
+ mock_runner.run_tests(fake_tests)
+
+ assert mock_runner.failed == 5
+ assert mock_runner.passed == 0
+ assert mock_runner.todo == 0
+
+
+def test_run_until_failure(mock_runner):
+ def update_result(test, expected):
+ mock_runner.failed += 1
+
+ fake_tests = ["test_1.py"]
+ mock_runner.run_until_failure = True
+ mock_runner.repeat = 4
+ mock_runner.run_test = Mock(side_effect=update_result)
+ mock_runner.run_tests(fake_tests)
+
+ assert mock_runner.failed == 1
+ assert mock_runner.passed == 0
+ assert mock_runner.todo == 0
+
+
+def test_catch_invalid_test_names(runner):
+ good_tests = ["test_ok.py", "test_is_ok.py"]
+ bad_tests = [
+ "bad_test.py",
+ "testbad.py",
+ "_test_bad.py",
+ "test_bad.notpy",
+ "test_bad",
+ "test.py",
+ "test_.py",
+ ]
+ with pytest.raises(Exception) as exc:
+ runner._add_tests(good_tests + bad_tests)
+ msg = str(exc.value)
+ assert "Test file names must be of the form" in msg
+ for bad_name in bad_tests:
+ assert bad_name in msg
+ for good_name in good_tests:
+ assert good_name not in msg
+
+
+@pytest.mark.parametrize("repeat", (None, 0, 42, -1))
+def test_option_repeat(mach_parsed_kwargs, repeat):
+ if repeat is not None:
+ mach_parsed_kwargs["repeat"] = repeat
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+
+ if repeat is None:
+ assert runner.repeat == 0
+ else:
+ assert runner.repeat == repeat
+
+
+@pytest.mark.parametrize("repeat", (None, 42))
+@pytest.mark.parametrize("run_until_failure", (None, True))
+def test_option_run_until_failure(mach_parsed_kwargs, repeat, run_until_failure):
+ if run_until_failure is not None:
+ mach_parsed_kwargs["run_until_failure"] = run_until_failure
+ if repeat is not None:
+ mach_parsed_kwargs["repeat"] = repeat
+ runner = MarionetteTestRunner(**mach_parsed_kwargs)
+
+ if run_until_failure is None:
+ assert runner.run_until_failure is False
+ if repeat is None:
+ assert runner.repeat == 0
+ else:
+ assert runner.repeat == repeat
+
+ else:
+ assert runner.run_until_failure == run_until_failure
+ if repeat is None:
+ assert runner.repeat == 30
+ else:
+ assert runner.repeat == repeat
+
+
+if __name__ == "__main__":
+ mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_test_result.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_test_result.py
new file mode 100644
index 0000000000..6269b4135e
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_test_result.py
@@ -0,0 +1,55 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+
+from marionette_harness import MarionetteTestResult
+
+
+@pytest.fixture
+def empty_marionette_testcase():
+ """Testable MarionetteTestCase class"""
+ from marionette_harness import MarionetteTestCase
+
+ class EmptyTestCase(MarionetteTestCase):
+ def test_nothing(self):
+ pass
+
+ return EmptyTestCase
+
+
+@pytest.fixture
+def empty_marionette_test(mock_marionette, empty_marionette_testcase):
+ return empty_marionette_testcase(
+ lambda: mock_marionette, lambda: mock_httpd, "test_nothing"
+ )
+
+
+@pytest.mark.parametrize("has_crashed", [True, False])
+def test_crash_is_recorded_as_error(empty_marionette_test, logger, has_crashed):
+ """Number of errors is incremented by stopTest iff has_crashed is true"""
+ # collect results from the empty test
+ result = MarionetteTestResult(
+ marionette=empty_marionette_test._marionette_weakref(),
+ logger=logger,
+ verbosity=1,
+ stream=None,
+ descriptions=None,
+ )
+ result.startTest(empty_marionette_test)
+ assert len(result.errors) == 0
+ assert len(result.failures) == 0
+ assert result.testsRun == 1
+ assert result.shouldStop is False
+ result.stopTest(empty_marionette_test)
+ assert result.shouldStop == has_crashed
+ if has_crashed:
+ assert len(result.errors) == 1
+ else:
+ assert len(result.errors) == 0
+
+
+if __name__ == "__main__":
+ mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")
diff --git a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_serve.py b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_serve.py
new file mode 100644
index 0000000000..84e1f7ddf4
--- /dev/null
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_serve.py
@@ -0,0 +1,69 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import types
+
+import six
+
+import mozunit
+import pytest
+
+from marionette_harness.runner import serve
+from marionette_harness.runner.serve import iter_proc, iter_url
+
+
+def teardown_function(func):
+ for server in [s for s in iter_proc(serve.servers) if s.is_alive]:
+ server.stop()
+ server.kill()
+
+
+def test_registered_servers():
+ # [(name, factory), ...]
+ assert serve.registered_servers[0][0] == "http"
+ assert serve.registered_servers[1][0] == "https"
+
+
+def test_globals():
+ assert serve.default_doc_root is not None
+ assert serve.registered_servers is not None
+ assert serve.servers is not None
+
+
+def test_start():
+ serve.start()
+ assert len(serve.servers) == 2
+ assert "http" in serve.servers
+ assert "https" in serve.servers
+ for url in iter_url(serve.servers):
+ assert isinstance(url, six.string_types)
+
+
+def test_start_with_custom_root(tmpdir_factory):
+ tdir = tmpdir_factory.mktemp("foo")
+ serve.start(str(tdir))
+ for server in iter_proc(serve.servers):
+ assert server.doc_root == tdir
+
+
+def test_iter_proc():
+ serve.start()
+ for server in iter_proc(serve.servers):
+ server.stop()
+
+
+def test_iter_url():
+ serve.start()
+ for url in iter_url(serve.servers):
+ assert isinstance(url, six.string_types)
+
+
+def test_where_is():
+ serve.start()
+ assert serve.where_is("/") == serve.servers["http"][1].get_url("/")
+ assert serve.where_is("/", on="https") == serve.servers["https"][1].get_url("/")
+
+
+if __name__ == "__main__":
+ mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")