summaryrefslogtreecommitdiffstats
path: root/tools/tryselect/test
diff options
context:
space:
mode:
Diffstat (limited to 'tools/tryselect/test')
-rw-r--r--tools/tryselect/test/conftest.py101
-rw-r--r--tools/tryselect/test/cram.ini5
-rw-r--r--tools/tryselect/test/python.ini19
-rw-r--r--tools/tryselect/test/setup.sh99
-rw-r--r--tools/tryselect/test/test_again.py73
-rw-r--r--tools/tryselect/test/test_auto.py31
-rw-r--r--tools/tryselect/test/test_auto.t61
-rw-r--r--tools/tryselect/test/test_chooser.py77
-rw-r--r--tools/tryselect/test/test_empty.t47
-rw-r--r--tools/tryselect/test/test_fuzzy.py54
-rw-r--r--tools/tryselect/test/test_fuzzy.t201
-rw-r--r--tools/tryselect/test/test_message.t63
-rw-r--r--tools/tryselect/test/test_mozharness_integration.py143
-rw-r--r--tools/tryselect/test/test_perf.py1245
-rw-r--r--tools/tryselect/test/test_perfcomparators.py150
-rw-r--r--tools/tryselect/test/test_preset.t360
-rw-r--r--tools/tryselect/test/test_presets.py58
-rw-r--r--tools/tryselect/test/test_release.py43
-rw-r--r--tools/tryselect/test/test_scriptworker.py39
-rw-r--r--tools/tryselect/test/test_task_configs.py144
-rw-r--r--tools/tryselect/test/test_tasks.py93
21 files changed, 3106 insertions, 0 deletions
diff --git a/tools/tryselect/test/conftest.py b/tools/tryselect/test/conftest.py
new file mode 100644
index 0000000000..7ffffc4b9b
--- /dev/null
+++ b/tools/tryselect/test/conftest.py
@@ -0,0 +1,101 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from unittest.mock import MagicMock
+
+import pytest
+import yaml
+from moztest.resolve import TestResolver
+from taskgraph.graph import Graph
+from taskgraph.task import Task
+from taskgraph.taskgraph import TaskGraph
+from tryselect import push
+
+
+@pytest.fixture
+def tg(request):
+ if not hasattr(request.module, "TASKS"):
+ pytest.fail(
+ "'tg' fixture used from a module that didn't define the TASKS variable"
+ )
+
+ tasks = request.module.TASKS
+ for task in tasks:
+ task.setdefault("task", {})
+ task["task"].setdefault("tags", {})
+
+ tasks = {t["label"]: Task(**t) for t in tasks}
+ return TaskGraph(tasks, Graph(tasks.keys(), set()))
+
+
+@pytest.fixture
+def patch_resolver(monkeypatch):
+ def inner(suites, tests):
+ def fake_test_metadata(*args, **kwargs):
+ return suites, tests
+
+ monkeypatch.setattr(TestResolver, "resolve_metadata", fake_test_metadata)
+
+ return inner
+
+
+@pytest.fixture(autouse=True)
+def patch_vcs(monkeypatch):
+ attrs = {
+ "path": push.vcs.path,
+ }
+ mock = MagicMock()
+ mock.configure_mock(**attrs)
+ monkeypatch.setattr(push, "vcs", mock)
+
+
+@pytest.fixture(scope="session")
+def run_mach():
+ import mach_initialize
+ from mach.config import ConfigSettings
+ from tryselect.tasks import build
+
+ mach = mach_initialize.initialize(build.topsrcdir)
+
+ def inner(args):
+ mach.settings = ConfigSettings()
+ return mach.run(args)
+
+ return inner
+
+
+def pytest_generate_tests(metafunc):
+ if all(
+ fixture in metafunc.fixturenames
+ for fixture in ("task_config", "args", "expected")
+ ):
+
+ def load_tests():
+ for task_config, tests in metafunc.module.TASK_CONFIG_TESTS.items():
+ for args, expected in tests:
+ yield (task_config, args, expected)
+
+ tests = list(load_tests())
+ ids = ["{} {}".format(t[0], " ".join(t[1])).strip() for t in tests]
+ metafunc.parametrize("task_config,args,expected", tests, ids=ids)
+
+ elif all(
+ fixture in metafunc.fixturenames for fixture in ("shared_name", "shared_preset")
+ ):
+ preset_path = os.path.join(
+ push.build.topsrcdir, "tools", "tryselect", "try_presets.yml"
+ )
+ with open(preset_path, "r") as fh:
+ presets = list(yaml.safe_load(fh).items())
+
+ ids = [p[0] for p in presets]
+
+ # Mark fuzzy presets on Windows xfail due to fzf not being installed.
+ if os.name == "nt":
+ for i, preset in enumerate(presets):
+ if preset[1]["selector"] == "fuzzy":
+ presets[i] = pytest.param(*preset, marks=pytest.mark.xfail)
+
+ metafunc.parametrize("shared_name,shared_preset", presets, ids=ids)
diff --git a/tools/tryselect/test/cram.ini b/tools/tryselect/test/cram.ini
new file mode 100644
index 0000000000..b1f0d8dfec
--- /dev/null
+++ b/tools/tryselect/test/cram.ini
@@ -0,0 +1,5 @@
+[test_auto.t]
+[test_empty.t]
+[test_fuzzy.t]
+[test_message.t]
+[test_preset.t]
diff --git a/tools/tryselect/test/python.ini b/tools/tryselect/test/python.ini
new file mode 100644
index 0000000000..4b75ea6ab5
--- /dev/null
+++ b/tools/tryselect/test/python.ini
@@ -0,0 +1,19 @@
+[DEFAULT]
+subsuite=try
+
+[test_again.py]
+[test_auto.py]
+[test_chooser.py]
+requirements = tools/tryselect/selectors/chooser/requirements.txt
+[test_fuzzy.py]
+[test_mozharness_integration.py]
+[test_perf.py]
+[test_perfcomparators.py]
+[test_presets.py]
+# Modifies "task_duration_history.json" in .mozbuild. Since other tests depend on this file, this test
+# shouldn't be run in parallel with those other tests.
+sequential = true
+[test_release.py]
+[test_scriptworker.py]
+[test_tasks.py]
+[test_task_configs.py]
diff --git a/tools/tryselect/test/setup.sh b/tools/tryselect/test/setup.sh
new file mode 100644
index 0000000000..d9f3b62a2e
--- /dev/null
+++ b/tools/tryselect/test/setup.sh
@@ -0,0 +1,99 @@
+export topsrcdir=$TESTDIR/../../../
+export MOZBUILD_STATE_PATH=$TMP/mozbuild
+export MACH_TRY_PRESET_PATHS=$MOZBUILD_STATE_PATH/try_presets.yml
+
+# This helps to find fzf when running these tests locally, since normally fzf
+# would be found via MOZBUILD_STATE_PATH pointing to $HOME/.mozbuild
+export PATH="$PATH:$HOME/.mozbuild/fzf/bin"
+
+export MACHRC=$TMP/machrc
+cat > $MACHRC << EOF
+[try]
+default=syntax
+EOF
+
+cmd="$topsrcdir/mach python -c 'from mach.util import get_state_dir; print(get_state_dir(specific_to_topsrcdir=True))'"
+# First run local state dir generation so it doesn't affect test output.
+eval $cmd > /dev/null 2>&1
+# Now run it again to get the actual directory.
+cachedir=$(eval $cmd)/cache/taskgraph
+mkdir -p $cachedir
+
+cat > $cachedir/target_task_set << EOF
+{
+ "test/foo-opt": {
+ "kind": "test",
+ "label": "test/foo-opt",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/foo-debug": {
+ "kind": "test",
+ "label": "test/foo-debug",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "build-baz": {
+ "kind": "build",
+ "label": "build-baz",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ }
+}
+EOF
+
+cat > $cachedir/full_task_set << EOF
+{
+ "test/foo-opt": {
+ "kind": "test",
+ "label": "test/foo-opt",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/foo-debug": {
+ "kind": "test",
+ "label": "test/foo-debug",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/bar-opt": {
+ "kind": "test",
+ "label": "test/bar-opt",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/bar-debug": {
+ "kind": "test",
+ "label": "test/bar-debug",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "build-baz": {
+ "kind": "build",
+ "label": "build-baz",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ }
+}
+EOF
+
+# set mtime to the future so we don't re-generate tasks
+find $cachedir -type f -exec touch -d "next day" {} +
+
+export testargs="--no-push --no-artifact"
diff --git a/tools/tryselect/test/test_again.py b/tools/tryselect/test/test_again.py
new file mode 100644
index 0000000000..0e5d9d6b6d
--- /dev/null
+++ b/tools/tryselect/test/test_again.py
@@ -0,0 +1,73 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozunit
+import pytest
+from six.moves import reload_module as reload
+from tryselect import push
+from tryselect.selectors import again
+
+
+@pytest.fixture(autouse=True)
+def patch_history_path(tmpdir, monkeypatch):
+ monkeypatch.setattr(push, "history_path", tmpdir.join("history.json").strpath)
+ reload(again)
+
+
+def test_try_again(monkeypatch):
+ push.push_to_try(
+ "fuzzy",
+ "Fuzzy message",
+ try_task_config=push.generate_try_task_config(
+ "fuzzy",
+ ["foo", "bar"],
+ {"use-artifact-builds": True},
+ ),
+ )
+
+ assert os.path.isfile(push.history_path)
+ with open(push.history_path, "r") as fh:
+ assert len(fh.readlines()) == 1
+
+ def fake_push_to_try(*args, **kwargs):
+ return args, kwargs
+
+ monkeypatch.setattr(push, "push_to_try", fake_push_to_try)
+ reload(again)
+
+ args, kwargs = again.run()
+
+ assert args[0] == "again"
+ assert args[1] == "Fuzzy message"
+
+ try_task_config = kwargs.pop("try_task_config")
+ assert sorted(try_task_config.get("tasks")) == sorted(["foo", "bar"])
+ assert try_task_config.get("env") == {"TRY_SELECTOR": "fuzzy"}
+ assert try_task_config.get("use-artifact-builds")
+
+ with open(push.history_path, "r") as fh:
+ assert len(fh.readlines()) == 1
+
+
+def test_no_push_does_not_generate_history(tmpdir):
+ assert not os.path.isfile(push.history_path)
+
+ push.push_to_try(
+ "fuzzy",
+ "Fuzzy",
+ try_task_config=push.generate_try_task_config(
+ "fuzzy",
+ ["foo", "bar"],
+ {"use-artifact-builds": True},
+ ),
+ dry_run=True,
+ )
+ assert not os.path.isfile(push.history_path)
+ assert again.run() == 1
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_auto.py b/tools/tryselect/test/test_auto.py
new file mode 100644
index 0000000000..63f0fe6bd7
--- /dev/null
+++ b/tools/tryselect/test/test_auto.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+from tryselect.selectors.auto import AutoParser
+
+
+def test_strategy_validation():
+ parser = AutoParser()
+ args = parser.parse_args(["--strategy", "relevant_tests"])
+ assert args.strategy == "gecko_taskgraph.optimize:tryselect.relevant_tests"
+
+ args = parser.parse_args(
+ ["--strategy", "gecko_taskgraph.optimize:experimental.relevant_tests"]
+ )
+ assert args.strategy == "gecko_taskgraph.optimize:experimental.relevant_tests"
+
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--strategy", "gecko_taskgraph.optimize:tryselect"])
+
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--strategy", "foo"])
+
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--strategy", "foo:bar"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_auto.t b/tools/tryselect/test/test_auto.t
new file mode 100644
index 0000000000..c3fe797949
--- /dev/null
+++ b/tools/tryselect/test/test_auto.t
@@ -0,0 +1,61 @@
+
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test auto selector
+
+ $ ./mach try auto $testargs
+ Commit message:
+ Tasks automatically selected.
+
+ Pushed via `mach try auto`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium",
+ "optimize_target_tasks": true,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {}
+ },
+ "version": 2
+ }
+
+
+ $ ./mach try auto $testargs --closed-tree
+ Commit message:
+ Tasks automatically selected. ON A CLOSED TREE
+
+ Pushed via `mach try auto`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium",
+ "optimize_target_tasks": true,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {}
+ },
+ "version": 2
+ }
+
+ $ ./mach try auto $testargs --closed-tree -m "foo {msg} bar"
+ Commit message:
+ foo Tasks automatically selected. bar ON A CLOSED TREE
+
+ Pushed via `mach try auto`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium",
+ "optimize_target_tasks": true,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {}
+ },
+ "version": 2
+ }
+
diff --git a/tools/tryselect/test/test_chooser.py b/tools/tryselect/test/test_chooser.py
new file mode 100644
index 0000000000..885ed41a78
--- /dev/null
+++ b/tools/tryselect/test/test_chooser.py
@@ -0,0 +1,77 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+from tryselect.selectors.chooser.app import create_application
+
+TASKS = [
+ {
+ "kind": "build",
+ "label": "build-windows",
+ "attributes": {
+ "build_platform": "windows",
+ },
+ },
+ {
+ "kind": "test",
+ "label": "test-windows-mochitest-e10s",
+ "attributes": {
+ "unittest_suite": "mochitest-browser-chrome",
+ "mochitest_try_name": "mochitest-browser-chrome",
+ },
+ },
+]
+
+
+@pytest.fixture
+def app(tg):
+ app = create_application(tg)
+ app.config["TESTING"] = True
+
+ ctx = app.app_context()
+ ctx.push()
+ yield app
+ ctx.pop()
+
+
+def test_try_chooser(app):
+ client = app.test_client()
+
+ response = client.get("/")
+ assert response.status_code == 200
+
+ expected_output = [
+ b"""<title>Try Chooser Enhanced</title>""",
+ b"""<input class="filter" type="checkbox" id=windows name="build" value='{"build_platform": ["windows"]}' onchange="console.log('checkbox onchange triggered');apply();">""", # noqa
+ b"""<input class="filter" type="checkbox" id=mochitest-browser-chrome name="test" value='{"unittest_suite": ["mochitest-browser-chrome"]}' onchange="console.log('checkbox onchange triggered');apply();">""", # noqa
+ ]
+
+ for expected in expected_output:
+ assert expected in response.data
+
+ response = client.post("/", data={"action": "Cancel"})
+ assert response.status_code == 200
+ assert b"You may now close this page" in response.data
+ assert app.tasks == []
+
+ response = client.post("/", data={"action": "Push", "selected-tasks": ""})
+ assert response.status_code == 200
+ assert b"You may now close this page" in response.data
+ assert app.tasks == []
+
+ response = client.post(
+ "/",
+ data={
+ "action": "Push",
+ "selected-tasks": "build-windows\ntest-windows-mochitest-e10s",
+ },
+ )
+ assert response.status_code == 200
+ assert b"You may now close this page" in response.data
+ assert set(app.tasks) == set(["build-windows", "test-windows-mochitest-e10s"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_empty.t b/tools/tryselect/test/test_empty.t
new file mode 100644
index 0000000000..f6dea366c4
--- /dev/null
+++ b/tools/tryselect/test/test_empty.t
@@ -0,0 +1,47 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test empty selector
+
+ $ ./mach try empty --no-push
+ Commit message:
+ No try selector specified, use "Add New Jobs" to select tasks.
+
+ Pushed via `mach try empty`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "empty"
+ },
+ "tasks": [],
+ "version": 1
+ }
+
+ $ ./mach try empty --no-push --closed-tree
+ Commit message:
+ No try selector specified, use "Add New Jobs" to select tasks. ON A CLOSED TREE
+
+ Pushed via `mach try empty`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "empty"
+ },
+ "tasks": [],
+ "version": 1
+ }
+
+ $ ./mach try empty --no-push --closed-tree -m "foo {msg} bar"
+ Commit message:
+ foo No try selector specified, use "Add New Jobs" to select tasks. bar ON A CLOSED TREE
+
+ Pushed via `mach try empty`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "empty"
+ },
+ "tasks": [],
+ "version": 1
+ }
+
diff --git a/tools/tryselect/test/test_fuzzy.py b/tools/tryselect/test/test_fuzzy.py
new file mode 100644
index 0000000000..330ba99825
--- /dev/null
+++ b/tools/tryselect/test/test_fuzzy.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozunit
+import pytest
+
+
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+def test_query_paths(run_mach, capfd):
+ cmd = [
+ "try",
+ "fuzzy",
+ "--no-push",
+ "-q",
+ "^test-linux '64-qr/debug-mochitest-chrome-1proc-",
+ "caps/tests/mochitest/test_addonMayLoad.html",
+ ]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ # If there are more than one tasks here, it means that something went wrong
+ # with the path filtering.
+ expected = """
+ "tasks": [
+ "test-linux1804-64-qr/debug-mochitest-chrome-1proc-1"
+ ]""".lstrip()
+
+ assert expected in output
+
+
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+def test_query(run_mach, capfd):
+ cmd = ["try", "fuzzy", "--no-push", "-q", "'source-test-python-taskgraph-tests-py3"]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ # Should only ever mach one task exactly.
+ expected = """
+ "tasks": [
+ "source-test-python-taskgraph-tests-py3"
+ ]""".lstrip()
+
+ assert expected in output
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_fuzzy.t b/tools/tryselect/test/test_fuzzy.t
new file mode 100644
index 0000000000..5f53c07c97
--- /dev/null
+++ b/tools/tryselect/test/test_fuzzy.t
@@ -0,0 +1,201 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test fuzzy selector
+
+ $ ./mach try fuzzy $testargs -q "'foo"
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+ $ ./mach try fuzzy $testargs -q "'bar"
+ no tasks selected
+ $ ./mach try fuzzy $testargs --full -q "'bar"
+ Commit message:
+ Fuzzy query='bar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/bar-debug",
+ "test/bar-opt"
+ ],
+ "version": 1
+ }
+
+
+Test multiple selectors
+
+ $ ./mach try fuzzy $testargs --full -q "'foo" -q "'bar"
+ Commit message:
+ Fuzzy query='foo&query='bar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/bar-debug",
+ "test/bar-opt",
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+Test query intersection
+
+ $ ./mach try fuzzy $testargs --and -q "'foo" -q "'opt"
+ Commit message:
+ Fuzzy query='foo&query='opt
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+Test intersection with preset containing multiple queries
+
+ $ ./mach try fuzzy --save foo -q "'test" -q "'opt"
+ preset saved, run with: --preset=foo
+
+ $ ./mach try fuzzy $testargs --preset foo -xq "'test"
+ Commit message:
+ Fuzzy query='test&query='opt&query='test
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+ $ ./mach try $testargs --preset foo -xq "'test"
+ Commit message:
+ Fuzzy query='test&query='opt&query='test
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+Test exact match
+
+ $ ./mach try fuzzy $testargs --full -q "testfoo | 'testbar"
+ Commit message:
+ Fuzzy query=testfoo | 'testbar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+ $ ./mach try fuzzy $testargs --full --exact -q "testfoo | 'testbar"
+ Commit message:
+ Fuzzy query=testfoo | 'testbar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/bar-debug",
+ "test/bar-opt"
+ ],
+ "version": 1
+ }
+
+
+
+Test task config
+
+ $ ./mach try fuzzy --no-push --artifact -q "'foo"
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "disable-pgo": true,
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "use-artifact-builds": true,
+ "version": 1
+ }
+
+ $ ./mach try fuzzy $testargs --env FOO=1 --env BAR=baz -q "'foo"
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "BAR": "baz",
+ "FOO": "1",
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
diff --git a/tools/tryselect/test/test_message.t b/tools/tryselect/test/test_message.t
new file mode 100644
index 0000000000..25a104833c
--- /dev/null
+++ b/tools/tryselect/test/test_message.t
@@ -0,0 +1,63 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test custom commit messages with fuzzy selector
+
+ $ ./mach try fuzzy $testargs -q foo --message "Foobar"
+ Commit message:
+ Foobar
+
+ Fuzzy query=foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+ $ ./mach try fuzzy $testargs -q foo -m "Foobar: {msg}"
+ Commit message:
+ Foobar: Fuzzy query=foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+ $ unset EDITOR
+ $ ./mach try fuzzy $testargs -q foo -m > /dev/null 2>&1
+ [2]
+
+
+Test custom commit messages with syntax selector
+
+ $ ./mach try syntax $testargs -p linux -u mochitests --message "Foobar"
+ Commit message:
+ Foobar
+
+ try: -b do -p linux -u mochitests
+
+ Pushed via `mach try syntax`
+ $ ./mach try syntax $testargs -p linux -u mochitests -m "Foobar: {msg}"
+ Commit message:
+ Foobar: try: -b do -p linux -u mochitests
+
+ Pushed via `mach try syntax`
+ $ unset EDITOR
+ $ ./mach try syntax $testargs -p linux -u mochitests -m > /dev/null 2>&1
+ [2]
diff --git a/tools/tryselect/test/test_mozharness_integration.py b/tools/tryselect/test/test_mozharness_integration.py
new file mode 100644
index 0000000000..984f8de25b
--- /dev/null
+++ b/tools/tryselect/test/test_mozharness_integration.py
@@ -0,0 +1,143 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import imp
+import json
+import os
+
+import mozunit
+import pytest
+from tryselect.tasks import build, resolve_tests_by_suite
+
+MOZHARNESS_SCRIPTS = {
+ "android_emulator_unittest": {
+ "class_name": "AndroidEmulatorTest",
+ "configs": [
+ "android/android_common.py",
+ ],
+ "xfail": [
+ "cppunittest",
+ "crashtest-qr",
+ "gtest",
+ "geckoview-junit",
+ "jittest",
+ "jsreftest",
+ "reftest-qr",
+ ],
+ },
+ "desktop_unittest": {
+ "class_name": "DesktopUnittest",
+ "configs": [
+ "unittests/linux_unittest.py",
+ "unittests/mac_unittest.py",
+ "unittests/win_unittest.py",
+ ],
+ "xfail": [
+ "cppunittest",
+ "gtest",
+ "jittest",
+ "jittest-chunked",
+ "jittest1",
+ "jittest2",
+ "jsreftest",
+ "mochitest-valgrind-plain",
+ "reftest-no-accel",
+ "reftest-snapshot",
+ "xpcshell-msix",
+ ],
+ },
+}
+"""A suite being listed in a script's `xfail` list means it won't work
+properly with MOZHARNESS_TEST_PATHS (the mechanism |mach try fuzzy <path>|
+uses).
+"""
+
+
+def get_mozharness_test_paths(name):
+ scriptdir = os.path.join(build.topsrcdir, "testing", "mozharness", "scripts")
+
+ files = imp.find_module(name, [scriptdir])
+ mod = imp.load_module("scripts.{}".format(name), *files)
+
+ class_name = MOZHARNESS_SCRIPTS[name]["class_name"]
+ cls = getattr(mod, class_name)
+ return cls(require_config_file=False)._get_mozharness_test_paths
+
+
+@pytest.fixture(scope="module")
+def all_suites():
+ from moztest.resolve import _test_flavors, _test_subsuites
+
+ all_suites = []
+ for flavor in _test_flavors:
+ all_suites.append({"flavor": flavor, "srcdir_relpath": "test"})
+
+ for flavor, subsuite in _test_subsuites:
+ all_suites.append(
+ {"flavor": flavor, "subsuite": subsuite, "srcdir_relpath": "test"}
+ )
+
+ return all_suites
+
+
+def generate_suites_from_config(path):
+ configdir = os.path.join(build.topsrcdir, "testing", "mozharness", "configs")
+
+ parent, name = os.path.split(path)
+ name = os.path.splitext(name)[0]
+
+ files = imp.find_module("{}".format(name), [os.path.join(configdir, parent)])
+ mod = imp.load_module("config.{}".format(name), *files)
+ config = mod.config
+
+ for category in sorted(config["suite_definitions"]):
+ key = "all_{}_suites".format(category)
+ if key not in config:
+ yield category,
+ continue
+
+ for suite in sorted(config["all_{}_suites".format(category)]):
+ yield category, suite
+
+
+def generate_suites():
+ for name, script in MOZHARNESS_SCRIPTS.items():
+ seen = set()
+
+ for path in script["configs"]:
+ for suite in generate_suites_from_config(path):
+ if suite in seen:
+ continue
+ seen.add(suite)
+
+ item = (name, suite)
+
+ if suite[-1] in script["xfail"]:
+ item = pytest.param(item, marks=pytest.mark.xfail)
+
+ yield item
+
+
+def idfn(item):
+ name, suite = item
+ return "{}/{}".format(name, suite[-1])
+
+
+@pytest.mark.parametrize("item", generate_suites(), ids=idfn)
+def test_suites(item, patch_resolver, all_suites):
+ """An integration test to make sure the suites returned by
+ `tasks.resolve_tests_by_suite` match up with the names defined in
+ mozharness.
+ """
+ patch_resolver([], all_suites)
+ suites = resolve_tests_by_suite(["test"])
+ os.environ["MOZHARNESS_TEST_PATHS"] = json.dumps(suites)
+
+ name, suite = item
+ func = get_mozharness_test_paths(name)
+ assert func(*suite)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_perf.py b/tools/tryselect/test/test_perf.py
new file mode 100644
index 0000000000..28b8d56acb
--- /dev/null
+++ b/tools/tryselect/test/test_perf.py
@@ -0,0 +1,1245 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import pathlib
+import shutil
+import tempfile
+from unittest import mock
+
+import mozunit
+import pytest
+from tryselect.selectors.perf import (
+ MAX_PERF_TASKS,
+ Apps,
+ InvalidCategoryException,
+ InvalidRegressionDetectorQuery,
+ PerfParser,
+ Platforms,
+ Suites,
+ Variants,
+ run,
+)
+from tryselect.selectors.perfselector.classification import (
+ check_for_live_sites,
+ check_for_profile,
+)
+
+TASKS = [
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-motionmark-animometer",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-godot-optimizing",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-webaudio",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-speedometer",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-misc",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-jetstream2",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-ares6",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-misc-optimizing",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-sunspider",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-matrix-react-bench",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-godot-baseline",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-twitch-animation",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-assorted-dom",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-stylebench",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-misc-baseline",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-motionmark-htmlsuite",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-unity-webgl",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-godot",
+]
+
+# The TEST_VARIANTS, and TEST_CATEGORIES are used to force
+# a particular set of categories to show up in testing. Otherwise,
+# every time someone adds a category, or a variant, we'll need
+# to redo all the category counts. The platforms, and apps are
+# not forced because they change infrequently.
+TEST_VARIANTS = {
+ Variants.NO_FISSION.value: {
+ "query": "'nofis",
+ "negation": "!nofis",
+ "platforms": [Platforms.ANDROID.value],
+ "apps": [Apps.FENIX.value, Apps.GECKOVIEW.value],
+ },
+ Variants.BYTECODE_CACHED.value: {
+ "query": "'bytecode",
+ "negation": "!bytecode",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+ Variants.LIVE_SITES.value: {
+ "query": "'live",
+ "negation": "!live",
+ "restriction": check_for_live_sites,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": list(PerfParser.apps.keys()),
+ },
+ Variants.PROFILING.value: {
+ "query": "'profil",
+ "negation": "!profil",
+ "restriction": check_for_profile,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": [Apps.FIREFOX.value, Apps.GECKOVIEW.value, Apps.FENIX.value],
+ },
+ Variants.SWR.value: {
+ "query": "'swr",
+ "negation": "!swr",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+}
+
+TEST_CATEGORIES = {
+ "Pageload": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'tp6"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "tasks": [],
+ },
+ "Pageload (essential)": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'tp6 'essential"],
+ },
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.NO_FISSION.value]},
+ "suites": [Suites.RAPTOR.value],
+ "tasks": [],
+ },
+ "Responsiveness": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'responsive"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "tasks": [],
+ },
+ "Benchmarks": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'benchmark"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "tasks": [],
+ },
+ "DAMP (Devtools)": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'damp"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ },
+ "Talos PerfTests": {
+ "query": {
+ Suites.TALOS.value: ["'talos"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ },
+ "Resource Usage": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'xperf | 'tp5"],
+ Suites.RAPTOR.value: ["'power 'osx"],
+ Suites.AWSY.value: ["'awsy"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value, Suites.AWSY.value],
+ "platform-restrictions": [Platforms.DESKTOP.value],
+ "variant-restrictions": {
+ Suites.RAPTOR.value: [],
+ Suites.TALOS.value: [],
+ },
+ "app-restrictions": {
+ Suites.RAPTOR.value: [Apps.FIREFOX.value],
+ Suites.TALOS.value: [Apps.FIREFOX.value],
+ },
+ "tasks": [],
+ },
+ "Graphics, & Media Playback": {
+ "query": {
+ # XXX This might not be an exhaustive list for talos atm
+ Suites.TALOS.value: ["'talos 'svgr | 'bcv | 'webgl"],
+ Suites.RAPTOR.value: ["'browsertime 'youtube-playback"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.NO_FISSION.value]},
+ "tasks": [],
+ },
+}
+
+
+@pytest.mark.parametrize(
+ "category_options, expected_counts, unique_categories, missing",
+ [
+ # Default should show the premade live category, but no chrome or android
+ # The benchmark desktop category should be visible in all configurations
+ # except for when there are requested apps/variants/platforms
+ (
+ {},
+ 58,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ]
+ },
+ "Pageload macosx": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ]
+ },
+ "Resource Usage desktop": {
+ "awsy": ["'awsy", "!android 'shippable !-32 !clang"],
+ "raptor": [
+ "'power 'osx",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ],
+ "talos": [
+ "'talos 'xperf | 'tp5",
+ "!android 'shippable !-32 !clang",
+ "!profil",
+ "!swr",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-p2 geckoview",
+ "Benchmarks desktop chromium",
+ ],
+ ), # Default settings
+ (
+ {"live_sites": True},
+ 66,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ]
+ },
+ "Pageload macosx": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ]
+ },
+ "Pageload macosx live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'live",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-p2 geckoview",
+ "Benchmarks desktop chromium",
+ "Benchmarks desktop firefox profiling",
+ "Talos desktop live-sites",
+ "Talos desktop profiling+swr",
+ "Benchmarks desktop firefox live-sites+profiling"
+ "Benchmarks desktop firefox live-sites",
+ ],
+ ),
+ (
+ {"live_sites": True, "safari": True},
+ 72,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!custom-car",
+ ]
+ },
+ "Pageload macosx safari": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'safari",
+ "!bytecode",
+ "!profil",
+ ]
+ },
+ "Pageload macosx safari live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'safari",
+ "'live",
+ "!bytecode",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Pageload linux safari",
+ "Pageload desktop safari",
+ ],
+ ),
+ (
+ {"live_sites": True, "chrome": True},
+ 114,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!profil",
+ "!safari",
+ "!custom-car",
+ ]
+ },
+ "Pageload macosx live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'live",
+ "!bytecode",
+ "!profil",
+ "!safari",
+ "!custom-car",
+ ],
+ },
+ "Benchmarks desktop chromium": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "'chromium",
+ "!bytecode",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-p2 geckoview",
+ "Firefox Pageload linux chrome",
+ "Talos PerfTests desktop swr",
+ ],
+ ),
+ (
+ {"android": True},
+ 88,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ],
+ },
+ "Responsiveness android-a51 geckoview": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "'android 'a51 'shippable 'aarch64",
+ "'geckoview",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-a51 chrome-m",
+ "Firefox Pageload android",
+ ],
+ ),
+ (
+ {"android": True, "chrome": True},
+ 138,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!safari",
+ "!custom-car",
+ ],
+ },
+ "Responsiveness android-a51 chrome-m": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "'android 'a51 'shippable 'aarch64",
+ "'chrome-m",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ ["Responsiveness android-p2 chrome-m", "Resource Usage android"],
+ ),
+ (
+ {"android": True, "chrome": True, "profile": True},
+ 176,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!safari",
+ "!custom-car",
+ ]
+ },
+ "Talos PerfTests desktop profiling": {
+ "talos": [
+ "'talos",
+ "!android 'shippable !-32 !clang",
+ "'profil",
+ "!swr",
+ ]
+ },
+ },
+ [
+ "Resource Usage desktop profiling",
+ "DAMP (Devtools) desktop chrome",
+ "Resource Usage android",
+ "Resource Usage windows chromium",
+ ],
+ ),
+ # Show all available windows tests, no other platform should exist
+ # including the desktop catgeory
+ (
+ {"requested_platforms": ["windows"]},
+ 14,
+ {
+ "Benchmarks windows firefox": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!-32 'windows 'shippable",
+ "!chrom !geckoview !fenix !safari !custom-car",
+ "!bytecode",
+ "!live",
+ "!profil",
+ ]
+ },
+ },
+ [
+ "Resource Usage desktop",
+ "Benchmarks desktop",
+ "Benchmarks linux firefox bytecode-cached+profiling",
+ ],
+ ),
+ # Can't have fenix on the windows platform
+ (
+ {"requested_platforms": ["windows"], "requested_apps": ["fenix"]},
+ 0,
+ {},
+ ["Benchmarks desktop"],
+ ),
+ # Android flag also needs to be supplied
+ (
+ {"requested_platforms": ["android"], "requested_apps": ["fenix"]},
+ 0,
+ {},
+ ["Benchmarks desktop"],
+ ),
+ # There should be no global categories available, only fenix
+ (
+ {
+ "requested_platforms": ["android"],
+ "requested_apps": ["fenix"],
+ "android": True,
+ },
+ 10,
+ {
+ "Pageload android fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ }
+ },
+ ["Benchmarks desktop", "Pageload (live) android"],
+ ),
+ # Test with multiple apps
+ (
+ {
+ "requested_platforms": ["android"],
+ "requested_apps": ["fenix", "geckoview"],
+ "android": True,
+ },
+ 15,
+ {
+ "Benchmarks android geckoview": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "'android 'a51 'shippable 'aarch64",
+ "'geckoview",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Pageload android no-fission",
+ "Pageload android fenix live-sites",
+ ],
+ ),
+ # Variants are inclusive, so we'll see the variant alongside the
+ # base here for fenix
+ (
+ {
+ "requested_variants": ["no-fission"],
+ "requested_apps": ["fenix"],
+ "android": True,
+ },
+ 32,
+ {
+ "Pageload android-a51 fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android-a51 fenix no-fission": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload (essential) android fenix no-fission": {
+ "raptor": [
+ "'browsertime 'tp6 'essential",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Pageload (live) android",
+ "Pageload android-p2 fenix live-sites",
+ ],
+ ),
+ # With multiple variants, we'll see the base variant (with no combinations)
+ # for each of them
+ (
+ {
+ "requested_variants": ["no-fission", "live-sites"],
+ "requested_apps": ["fenix"],
+ "android": True,
+ },
+ 40,
+ {
+ "Pageload android-a51 fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!profil",
+ ],
+ },
+ "Pageload android-a51 fenix no-fission": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android-a51 fenix live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'live",
+ "!nofis",
+ "!profil",
+ ],
+ },
+ "Pageload (essential) android fenix no-fission": {
+ "raptor": [
+ "'browsertime 'tp6 'essential",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android fenix no-fission+live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "'live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Pageload (live) android",
+ "Pageload android-p2 fenix live-sites",
+ "Pageload (essential) android fenix no-fission+live-sites",
+ ],
+ ),
+ # Make sure that no no-fission tasks are selected when a variant cannot
+ # run on a requested platform
+ (
+ {
+ "requested_variants": ["no-fission"],
+ "requested_platforms": ["windows"],
+ },
+ 14,
+ {
+ "Responsiveness windows firefox": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "!-32 'windows 'shippable",
+ "!chrom !geckoview !fenix !safari !custom-car",
+ "!bytecode",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ ["Benchmarks desktop", "Responsiveness windows firefox no-fisson"],
+ ),
+ # We should only see the base and the live-site variants here for windows
+ (
+ {
+ "requested_variants": ["no-fission", "live-sites"],
+ "requested_platforms": ["windows"],
+ "android": True,
+ },
+ 16,
+ {
+ "Responsiveness windows firefox": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "!-32 'windows 'shippable",
+ "!chrom !geckoview !fenix !safari !custom-car",
+ "!bytecode",
+ "!profil",
+ ],
+ },
+ "Pageload windows live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "!-32 'windows 'shippable",
+ "'live",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ],
+ },
+ "Graphics, & Media Playback windows": {
+ "raptor": [
+ "'browsertime 'youtube-playback",
+ "!-32 'windows 'shippable",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!custom-car",
+ ],
+ "talos": [
+ "'talos 'svgr | 'bcv | 'webgl",
+ "!-32 'windows 'shippable",
+ "!profil",
+ "!swr",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Responsiveness windows firefox no-fisson",
+ "Pageload (live) android",
+ "Talos desktop live-sites",
+ "Talos android",
+ "Graphics, & Media Playback windows live-sites",
+ "Graphics, & Media Playback android no-fission",
+ ],
+ ),
+ ],
+)
+def test_category_expansion(
+ category_options, expected_counts, unique_categories, missing
+):
+ # Set the categories, and variants to expand
+ PerfParser.categories = TEST_CATEGORIES
+ PerfParser.variants = TEST_VARIANTS
+
+ # Expand the categories, then either check if the unique_categories,
+ # exist or are missing from the categories
+ expanded_cats = PerfParser.get_categories(**category_options)
+
+ assert len(expanded_cats) == expected_counts
+ assert not any([expanded_cats.get(ucat, None) is not None for ucat in missing])
+ assert all(
+ [expanded_cats.get(ucat, None) is not None for ucat in unique_categories.keys()]
+ )
+
+ # Ensure that the queries are as expected
+ for cat_name, cat_query in unique_categories.items():
+ # Don't use get here because these fields should always exist
+ assert cat_query == expanded_cats[cat_name]["queries"]
+
+
+@pytest.mark.parametrize(
+ "options, call_counts, log_ind, expected_log_message",
+ [
+ (
+ {},
+ [9, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"query": "'Pageload 'linux 'firefox"},
+ [9, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"cached_revision": "cached_base_revision"},
+ [9, 1, 1, 10, 2, 0],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=cached_base_revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"dry_run": True},
+ [9, 1, 1, 10, 2, 0],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"show_all": True},
+ [1, 2, 2, 8, 2, 1],
+ 0,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"show_all": True, "query": "'shippable !32 speedometer 'firefox"},
+ [1, 2, 2, 8, 2, 1],
+ 0,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"single_run": True},
+ [9, 1, 1, 4, 2, 0],
+ 2,
+ (
+ "If you need any help, you can find us in the #perf-help Matrix channel:\n"
+ "https://matrix.to/#/#perf-help:mozilla.org\n"
+ ),
+ ),
+ (
+ {"detect_changes": True},
+ [10, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ ],
+)
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+def test_full_run(options, call_counts, log_ind, expected_log_message):
+ with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch(
+ "tryselect.selectors.perf.run_fzf"
+ ) as fzf, mock.patch(
+ "tryselect.selectors.perf.get_repository_object", new=mock.MagicMock()
+ ), mock.patch(
+ "tryselect.selectors.perf.LogProcessor.revision",
+ new_callable=mock.PropertyMock,
+ return_value="revision",
+ ) as logger, mock.patch(
+ "tryselect.selectors.perf.PerfParser.check_cached_revision",
+ ) as ccr, mock.patch(
+ "tryselect.selectors.perf.PerfParser.save_revision_treeherder"
+ ) as srt, mock.patch(
+ "tryselect.selectors.perf.print",
+ ) as perf_print:
+ fzf.side_effect = [
+ ["", ["Benchmarks linux"]],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", ["Perftest Change Detector"]],
+ ]
+ ccr.return_value = options.get("cached_revision", "")
+
+ run(**options)
+
+ assert fzf.call_count == call_counts[0]
+ assert ptt.call_count == call_counts[1]
+ assert logger.call_count == call_counts[2]
+ assert perf_print.call_count == call_counts[3]
+ assert ccr.call_count == call_counts[4]
+ assert srt.call_count == call_counts[5]
+ assert perf_print.call_args_list[log_ind][0][0] == expected_log_message
+
+
+@pytest.mark.parametrize(
+ "options, call_counts, log_ind, expected_log_message, expected_failure",
+ [
+ (
+ {"detect_changes": True},
+ [10, 0, 0, 2, 1],
+ 1,
+ (
+ "Executing raptor queries: 'browsertime 'benchmark, !clang 'linux "
+ "'shippable, !bytecode, !live, !profil, !chrom, !safari, !custom-car"
+ ),
+ InvalidRegressionDetectorQuery,
+ ),
+ ],
+)
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+def test_change_detection_task_injection_failure(
+ options,
+ call_counts,
+ log_ind,
+ expected_log_message,
+ expected_failure,
+):
+ with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch(
+ "tryselect.selectors.perf.run_fzf"
+ ) as fzf, mock.patch(
+ "tryselect.selectors.perf.get_repository_object", new=mock.MagicMock()
+ ), mock.patch(
+ "tryselect.selectors.perf.LogProcessor.revision",
+ new_callable=mock.PropertyMock,
+ return_value="revision",
+ ) as logger, mock.patch(
+ "tryselect.selectors.perf.PerfParser.check_cached_revision"
+ ) as ccr, mock.patch(
+ "tryselect.selectors.perf.print",
+ ) as perf_print:
+ fzf.side_effect = [
+ ["", ["Benchmarks linux"]],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ]
+
+ with pytest.raises(expected_failure):
+ run(**options)
+
+ assert fzf.call_count == call_counts[0]
+ assert ptt.call_count == call_counts[1]
+ assert logger.call_count == call_counts[2]
+ assert perf_print.call_count == call_counts[3]
+ assert ccr.call_count == call_counts[4]
+ assert perf_print.call_args_list[log_ind][0][0] == expected_log_message
+
+
+@pytest.mark.parametrize(
+ "query, should_fail",
+ [
+ (
+ {
+ "query": {
+ # Raptor has all variants available so it
+ # should fail on this category
+ "raptor": ["browsertime 'live 'no-fission"],
+ }
+ },
+ True,
+ ),
+ (
+ {
+ "query": {
+ # Awsy has no variants defined so it shouldn't fail
+ # on a query like this
+ "awsy": ["browsertime 'live 'no-fission"],
+ }
+ },
+ False,
+ ),
+ ],
+)
+def test_category_rules(query, should_fail):
+ # Set the categories, and variants to expand
+ PerfParser.categories = {"test-live": query}
+ PerfParser.variants = TEST_VARIANTS
+
+ if should_fail:
+ with pytest.raises(InvalidCategoryException):
+ PerfParser.run_category_checks()
+ else:
+ assert PerfParser.run_category_checks()
+
+ # Reset the categories, and variants to expand
+ PerfParser.categories = TEST_CATEGORIES
+ PerfParser.variants = TEST_VARIANTS
+
+
+@pytest.mark.parametrize(
+ "apk_name, apk_content, should_fail, failure_message",
+ [
+ (
+ "real-file",
+ "file-content",
+ False,
+ None,
+ ),
+ ("bad-file", None, True, "Path does not exist:"),
+ ],
+)
+def test_apk_upload(apk_name, apk_content, should_fail, failure_message):
+ with mock.patch("tryselect.selectors.perf.subprocess") as _, mock.patch(
+ "tryselect.selectors.perf.shutil"
+ ) as _:
+ temp_dir = None
+ try:
+ temp_dir = tempfile.mkdtemp()
+ sample_apk = pathlib.Path(temp_dir, apk_name)
+ if apk_content is not None:
+ with sample_apk.open("w") as f:
+ f.write(apk_content)
+
+ if should_fail:
+ with pytest.raises(Exception) as exc_info:
+ PerfParser.setup_apk_upload("browsertime", str(sample_apk))
+ assert failure_message in str(exc_info)
+ else:
+ PerfParser.setup_apk_upload("browsertime", str(sample_apk))
+ finally:
+ if temp_dir is not None:
+ shutil.rmtree(temp_dir)
+
+
+@pytest.mark.parametrize(
+ "args, load_data, return_value, call_counts, exists_cache_file",
+ [
+ (
+ (
+ [],
+ "base_commit",
+ ),
+ {
+ "base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": [],
+ },
+ ],
+ },
+ "2b04563b5",
+ [1, 0],
+ True,
+ ),
+ (
+ (
+ ["task-a"],
+ "subset_base_commit",
+ ),
+ {
+ "subset_base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": ["task-a", "task-b"],
+ },
+ ],
+ },
+ "2b04563b5",
+ [1, 0],
+ True,
+ ),
+ (
+ ([], "not_exist_cached_base_commit"),
+ {
+ "base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": [],
+ },
+ ],
+ },
+ None,
+ [1, 0],
+ True,
+ ),
+ (
+ (
+ ["task-a", "task-b"],
+ "superset_base_commit",
+ ),
+ {
+ "superset_base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": ["task-a"],
+ },
+ ],
+ },
+ None,
+ [1, 0],
+ True,
+ ),
+ (
+ ([], None),
+ {},
+ None,
+ [1, 1],
+ True,
+ ),
+ (
+ ([], None),
+ {},
+ None,
+ [0, 0],
+ False,
+ ),
+ ],
+)
+def test_check_cached_revision(
+ args, load_data, return_value, call_counts, exists_cache_file
+):
+ with mock.patch("tryselect.selectors.perf.json.load") as load, mock.patch(
+ "tryselect.selectors.perf.json.dump"
+ ) as dump, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.is_file"
+ ) as is_file, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.open"
+ ):
+ load.return_value = load_data
+ is_file.return_value = exists_cache_file
+ result = PerfParser.check_cached_revision(*args)
+
+ assert load.call_count == call_counts[0]
+ assert dump.call_count == call_counts[1]
+ assert result == return_value
+
+
+@pytest.mark.parametrize(
+ "args, call_counts, exists_cache_file",
+ [
+ (
+ ["base_commit", "base_revision_treeherder"],
+ [0, 1],
+ False,
+ ),
+ (
+ ["base_commit", "base_revision_treeherder"],
+ [1, 1],
+ True,
+ ),
+ ],
+)
+def test_save_revision_treeherder(args, call_counts, exists_cache_file):
+ with mock.patch("tryselect.selectors.perf.json.load") as load, mock.patch(
+ "tryselect.selectors.perf.json.dump"
+ ) as dump, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.is_file"
+ ) as is_file, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.open"
+ ):
+ is_file.return_value = exists_cache_file
+ PerfParser.save_revision_treeherder(TASKS, args[0], args[1])
+
+ assert load.call_count == call_counts[0]
+ assert dump.call_count == call_counts[1]
+
+
+@pytest.mark.parametrize(
+ "total_tasks, options, call_counts, expected_log_message, expected_failure",
+ [
+ (
+ MAX_PERF_TASKS + 1,
+ {},
+ [1, 0, 0, 1],
+ (
+ "That's a lot of tests selected (300)!\n"
+ "These tests won't be triggered. If this was unexpected, "
+ "please file a bug in Testing :: Performance."
+ ),
+ True,
+ ),
+ (
+ MAX_PERF_TASKS,
+ {"show_all": True},
+ [9, 0, 0, 8],
+ (
+ "For more information on the performance tests, see our "
+ "PerfDocs here:\nhttps://firefox-source-docs.mozilla.org/testing/perfdocs/"
+ ),
+ False,
+ ),
+ (
+ int((MAX_PERF_TASKS + 2) / 2),
+ {"show_all": True, "try_config": {"rebuild": 2}},
+ [1, 0, 0, 1],
+ (
+ "That's a lot of tests selected (300)!\n"
+ "These tests won't be triggered. If this was unexpected, "
+ "please file a bug in Testing :: Performance."
+ ),
+ True,
+ ),
+ (0, {}, [1, 0, 0, 1], ("No tasks selected"), True),
+ ],
+)
+def test_max_perf_tasks(
+ total_tasks,
+ options,
+ call_counts,
+ expected_log_message,
+ expected_failure,
+):
+ # Set the categories, and variants to expand
+ PerfParser.categories = TEST_CATEGORIES
+ PerfParser.variants = TEST_VARIANTS
+
+ with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch(
+ "tryselect.selectors.perf.print",
+ ) as perf_print, mock.patch(
+ "tryselect.selectors.perf.LogProcessor.revision",
+ new_callable=mock.PropertyMock,
+ return_value="revision",
+ ), mock.patch(
+ "tryselect.selectors.perf.PerfParser.perf_push_to_try",
+ new_callable=mock.MagicMock,
+ return_value=("revision1", "revision2"),
+ ) as perf_push_to_try_mock, mock.patch(
+ "tryselect.selectors.perf.PerfParser.get_perf_tasks"
+ ) as get_perf_tasks_mock, mock.patch(
+ "tryselect.selectors.perf.PerfParser.get_tasks"
+ ) as get_tasks_mock, mock.patch(
+ "tryselect.selectors.perf.run_fzf"
+ ) as fzf, mock.patch(
+ "tryselect.selectors.perf.fzf_bootstrap", return_value=mock.MagicMock()
+ ):
+ tasks = ["a-task"] * total_tasks
+ get_tasks_mock.return_value = tasks
+ get_perf_tasks_mock.return_value = tasks, [], []
+
+ run(**options)
+
+ assert perf_push_to_try_mock.call_count == 0 if expected_failure else 1
+ assert ptt.call_count == call_counts[1]
+ assert perf_print.call_count == call_counts[3]
+ assert fzf.call_count == 0
+ assert perf_print.call_args_list[-1][0][0] == expected_log_message
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_perfcomparators.py b/tools/tryselect/test/test_perfcomparators.py
new file mode 100644
index 0000000000..51f0bdb287
--- /dev/null
+++ b/tools/tryselect/test/test_perfcomparators.py
@@ -0,0 +1,150 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import tempfile
+from unittest import mock
+
+import mozunit
+import pytest
+from tryselect.selectors.perfselector.perfcomparators import (
+ BadComparatorArgs,
+ BenchmarkComparator,
+ ComparatorNotFound,
+ get_comparator,
+)
+
+
+@pytest.mark.parametrize(
+ "test_link",
+ [
+ "https://github.com/mozilla-mobile/firefox-android/pull/1627",
+ "https://github.com/mozilla-mobile/firefox-android/pull/1876/"
+ "commits/17c7350cc37a4a85cea140a7ce54e9fd037b5365",
+ ],
+)
+def test_benchmark_comparator(test_link):
+ def _verify_extra_args(extra_args):
+ assert len(extra_args) == 3
+ if "commit" in test_link:
+ assert (
+ "benchmark-revision=17c7350cc37a4a85cea140a7ce54e9fd037b5365"
+ in extra_args
+ )
+ else:
+ assert "benchmark-revision=sha-for-link" in extra_args
+ assert "benchmark-repository=url-for-link" in extra_args
+ assert "benchmark-branch=ref-for-link" in extra_args
+
+ comparator = BenchmarkComparator(
+ None, None, None, [f"base-link={test_link}", f"new-link={test_link}"]
+ )
+
+ with mock.patch("requests.get") as mocked_get:
+ magic_get = mock.MagicMock()
+ magic_get.json.return_value = {
+ "head": {
+ "repo": {
+ "html_url": "url-for-link",
+ },
+ "sha": "sha-for-link",
+ "ref": "ref-for-link",
+ }
+ }
+ magic_get.status_code = 200
+ mocked_get.return_value = magic_get
+
+ extra_args = []
+ comparator.setup_base_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+ extra_args = []
+ comparator.setup_new_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+
+def test_benchmark_comparator_no_pr_links():
+ def _verify_extra_args(extra_args):
+ assert len(extra_args) == 3
+ assert "benchmark-revision=rev" in extra_args
+ assert "benchmark-repository=link" in extra_args
+ assert "benchmark-branch=fake" in extra_args
+
+ comparator = BenchmarkComparator(
+ None,
+ None,
+ None,
+ [
+ "base-repo=link",
+ "base-branch=fake",
+ "base-revision=rev",
+ "new-repo=link",
+ "new-branch=fake",
+ "new-revision=rev",
+ ],
+ )
+
+ with mock.patch("requests.get") as mocked_get:
+ magic_get = mock.MagicMock()
+ magic_get.json.return_value = {
+ "head": {
+ "repo": {
+ "html_url": "url-for-link",
+ },
+ "sha": "sha-for-link",
+ "ref": "ref-for-link",
+ }
+ }
+ magic_get.status_code = 200
+ mocked_get.return_value = magic_get
+
+ extra_args = []
+ comparator.setup_base_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+ extra_args = []
+ comparator.setup_new_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+
+def test_benchmark_comparator_bad_args():
+ comparator = BenchmarkComparator(
+ None,
+ None,
+ None,
+ [
+ "base-bad-args=val",
+ ],
+ )
+
+ with pytest.raises(BadComparatorArgs):
+ comparator.setup_base_revision([])
+
+
+def test_get_comparator_bad_name():
+ with pytest.raises(ComparatorNotFound):
+ get_comparator("BadName")
+
+
+def test_get_comparator_bad_script():
+ with pytest.raises(ComparatorNotFound):
+ with tempfile.NamedTemporaryFile() as tmpf:
+ tmpf.close()
+ get_comparator(tmpf.name)
+
+
+def test_get_comparator_benchmark_name():
+ comparator_klass = get_comparator("BenchmarkComparator")
+ assert comparator_klass.__name__ == "BenchmarkComparator"
+
+
+def test_get_comparator_benchmark_script():
+ # If the get_comparator method is working for scripts, then
+ # it should find the first defined class in this file, or the
+ # first imported class that matches it
+ comparator_klass = get_comparator(__file__)
+ assert comparator_klass.__name__ == "BenchmarkComparator"
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_preset.t b/tools/tryselect/test/test_preset.t
new file mode 100644
index 0000000000..d67a5d6341
--- /dev/null
+++ b/tools/tryselect/test/test_preset.t
@@ -0,0 +1,360 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test preset with no subcommand
+
+ $ ./mach try $testargs --save foo -b do -p linux -u mochitests -t none --tag foo
+ preset saved, run with: --preset=foo
+
+ $ ./mach try $testargs --preset foo
+ Commit message:
+ try: -b do -p linux -u mochitests -t none --tag foo
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try syntax $testargs --preset foo
+ Commit message:
+ try: -b do -p linux -u mochitests -t none --tag foo
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try $testargs --list-presets
+ Presets from */mozbuild/try_presets.yml: (glob)
+
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+ $ unset EDITOR
+ $ ./mach try $testargs --edit-presets
+ error: must set the $EDITOR environment variable to use --edit-presets
+ $ export EDITOR=cat
+ $ ./mach try $testargs --edit-presets
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+Test preset with syntax subcommand
+
+ $ ./mach try syntax $testargs --save bar -b do -p win32 -u none -t all --tag bar
+ preset saved, run with: --preset=bar
+
+ $ ./mach try syntax $testargs --preset bar
+ Commit message:
+ try: -b do -p win32 -u none -t all --tag bar
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try $testargs --preset bar
+ Commit message:
+ try: -b do -p win32 -u none -t all --tag bar
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try syntax $testargs --list-presets
+ Presets from */mozbuild/try_presets.yml: (glob)
+
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+ $ ./mach try syntax $testargs --edit-presets
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+Test preset with fuzzy subcommand
+
+ $ ./mach try fuzzy $testargs --save baz -q "'foo" --rebuild 5
+ preset saved, run with: --preset=baz
+
+ $ ./mach try fuzzy $testargs --preset baz
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+ $ ./mach try $testargs --preset baz
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+Queries can be appended to presets
+
+ $ ./mach try fuzzy $testargs --preset baz -q "'build"
+ Commit message:
+ Fuzzy query='foo&query='build
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "build-baz",
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+ $ ./mach try $testargs --preset baz -xq "'opt"
+ Commit message:
+ Fuzzy query='foo&query='opt
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+
+ $ ./mach try fuzzy $testargs --list-presets
+ Presets from */mozbuild/try_presets.yml: (glob)
+
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ baz:
+ dry_run: true
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+ $ ./mach try fuzzy $testargs --edit-presets
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ baz:
+ dry_run: true
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+Test gecko-profile argument handling. Add in profiling to a preset.
+
+ $ ./mach try fuzzy $testargs --preset baz --gecko-profile-features=nostacksampling,cpu
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "gecko-profile": true,
+ "gecko-profile-features": "nostacksampling,cpu",
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+Check whether the gecko-profile flags can be used from a preset, and check
+dashes vs underscores (presets save with underscores to match ArgumentParser
+settings; everything else uses dashes.)
+
+ $ ./mach try fuzzy $testargs --save profile -q "'foo" --rebuild 5 --gecko-profile-features=nostacksampling,cpu
+ preset saved, run with: --preset=profile
+
+ $ ./mach try fuzzy $testargs --preset profile
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "gecko-profile": true,
+ "gecko-profile-features": "nostacksampling,cpu",
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "version": 1
+ }
+
+ $ EDITOR=cat ./mach try fuzzy $testargs --edit-preset profile
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ baz:
+ dry_run: true
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+ profile:
+ dry_run: true
+ gecko_profile_features: nostacksampling,cpu
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+
+ $ rm $MOZBUILD_STATE_PATH/try_presets.yml
diff --git a/tools/tryselect/test/test_presets.py b/tools/tryselect/test/test_presets.py
new file mode 100644
index 0000000000..89cc810808
--- /dev/null
+++ b/tools/tryselect/test/test_presets.py
@@ -0,0 +1,58 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+
+TASKS = [
+ {
+ "kind": "build",
+ "label": "build-windows",
+ "attributes": {
+ "build_platform": "windows",
+ },
+ },
+ {
+ "kind": "test",
+ "label": "test-windows-mochitest-e10s",
+ "attributes": {
+ "unittest_suite": "mochitest",
+ "unittest_flavor": "browser-chrome",
+ "mochitest_try_name": "mochitest",
+ },
+ },
+]
+
+
+@pytest.fixture(autouse=True)
+def skip_taskgraph_generation(monkeypatch, tg):
+ def fake_generate_tasks(*args, **kwargs):
+ return tg
+
+ from tryselect import tasks
+
+ monkeypatch.setattr(tasks, "generate_tasks", fake_generate_tasks)
+
+
+@pytest.mark.xfail(
+ strict=False, reason="Bug 1635204: " "test_shared_presets[sample-suites] is flaky"
+)
+def test_shared_presets(run_mach, shared_name, shared_preset):
+ """This test makes sure that we don't break any of the in-tree presets when
+ renaming/removing variables in any of the selectors.
+ """
+ assert "description" in shared_preset
+ assert "selector" in shared_preset
+
+ selector = shared_preset["selector"]
+ if selector == "fuzzy":
+ assert "query" in shared_preset
+ assert isinstance(shared_preset["query"], list)
+
+ # Run the preset and assert there were no exceptions.
+ assert run_mach(["try", "--no-push", "--preset", shared_name]) == 0
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_release.py b/tools/tryselect/test/test_release.py
new file mode 100644
index 0000000000..a1a0d348b2
--- /dev/null
+++ b/tools/tryselect/test/test_release.py
@@ -0,0 +1,43 @@
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+from textwrap import dedent
+
+import mozunit
+
+
+def test_release(run_mach, capfd):
+ cmd = [
+ "try",
+ "release",
+ "--no-push",
+ "--version=97.0",
+ ]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ expected = dedent(
+ """
+ Commit message:
+ staging release: 97.0
+
+ Pushed via `mach try release`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": true,
+ "release_type": "release",
+ "target_tasks_method": "staging_release_builds"
+ },
+ "version": 2
+ }
+
+ """
+ ).lstrip()
+ assert expected in output
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_scriptworker.py b/tools/tryselect/test/test_scriptworker.py
new file mode 100644
index 0000000000..e25279ace4
--- /dev/null
+++ b/tools/tryselect/test/test_scriptworker.py
@@ -0,0 +1,39 @@
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+import re
+from textwrap import dedent
+
+import mozunit
+
+
+def test_release(run_mach, capfd):
+ cmd = [
+ "try",
+ "scriptworker",
+ "--no-push",
+ "tree",
+ ]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ expected = re.compile(
+ dedent(
+ r"""
+ Pushed via `mach try scriptworker`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "app_version": "\d+\.\d+",
+ "build_number": \d+,
+ """
+ ).lstrip(),
+ re.MULTILINE,
+ )
+ assert expected.search(output)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_task_configs.py b/tools/tryselect/test/test_task_configs.py
new file mode 100644
index 0000000000..4865a1bfca
--- /dev/null
+++ b/tools/tryselect/test/test_task_configs.py
@@ -0,0 +1,144 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import inspect
+import subprocess
+from argparse import ArgumentParser
+from textwrap import dedent
+
+import mozunit
+import pytest
+from tryselect.task_config import Pernosco, all_task_configs
+
+# task configs have a list of tests of the form (input, expected)
+TASK_CONFIG_TESTS = {
+ "artifact": [
+ (["--no-artifact"], None),
+ (["--artifact"], {"use-artifact-builds": True, "disable-pgo": True}),
+ ],
+ "chemspill-prio": [
+ ([], None),
+ (["--chemspill-prio"], {"chemspill-prio": {}}),
+ ],
+ "env": [
+ ([], None),
+ (["--env", "foo=bar", "--env", "num=10"], {"env": {"foo": "bar", "num": "10"}}),
+ ],
+ "path": [
+ ([], None),
+ (
+ ["dom/indexedDB"],
+ {"env": {"MOZHARNESS_TEST_PATHS": '{"xpcshell": ["dom/indexedDB"]}'}},
+ ),
+ (
+ ["dom/indexedDB", "testing"],
+ {
+ "env": {
+ "MOZHARNESS_TEST_PATHS": '{"xpcshell": ["dom/indexedDB", "testing"]}'
+ }
+ },
+ ),
+ (["invalid/path"], SystemExit),
+ ],
+ "pernosco": [
+ ([], None),
+ ],
+ "rebuild": [
+ ([], None),
+ (["--rebuild", "10"], {"rebuild": 10}),
+ (["--rebuild", "1"], SystemExit),
+ (["--rebuild", "21"], SystemExit),
+ ],
+ "worker-overrides": [
+ ([], None),
+ (
+ ["--worker-override", "alias=worker/pool"],
+ {"worker-overrides": {"alias": "worker/pool"}},
+ ),
+ (
+ [
+ "--worker-override",
+ "alias=worker/pool",
+ "--worker-override",
+ "alias=other/pool",
+ ],
+ SystemExit,
+ ),
+ (
+ ["--worker-suffix", "b-linux=-dev"],
+ {"worker-overrides": {"b-linux": "gecko-1/b-linux-dev"}},
+ ),
+ (
+ [
+ "--worker-override",
+ "b-linux=worker/pool" "--worker-suffix",
+ "b-linux=-dev",
+ ],
+ SystemExit,
+ ),
+ ],
+}
+
+
+@pytest.fixture
+def config_patch_resolver(patch_resolver):
+ def inner(paths):
+ patch_resolver(
+ [], [{"flavor": "xpcshell", "srcdir_relpath": path} for path in paths]
+ )
+
+ return inner
+
+
+def test_task_configs(config_patch_resolver, task_config, args, expected):
+ parser = ArgumentParser()
+
+ cfg = all_task_configs[task_config]()
+ cfg.add_arguments(parser)
+
+ if inspect.isclass(expected) and issubclass(expected, BaseException):
+ with pytest.raises(expected):
+ args = parser.parse_args(args)
+ if task_config == "path":
+ config_patch_resolver(**vars(args))
+
+ cfg.try_config(**vars(args))
+ else:
+ args = parser.parse_args(args)
+ if task_config == "path":
+ config_patch_resolver(**vars(args))
+ assert cfg.try_config(**vars(args)) == expected
+
+
+@pytest.fixture
+def patch_pernosco_email_check(monkeypatch):
+ def inner(val):
+ def fake_check_output(*args, **kwargs):
+ return val
+
+ monkeypatch.setattr(subprocess, "check_output", fake_check_output)
+
+ return inner
+
+
+def test_pernosco(patch_pernosco_email_check):
+ patch_pernosco_email_check(
+ dedent(
+ """
+ user foobar@mozilla.com
+ hostname hg.mozilla.com
+ """
+ )
+ )
+
+ parser = ArgumentParser()
+
+ cfg = Pernosco()
+ cfg.add_arguments(parser)
+ args = parser.parse_args(["--pernosco"])
+ assert cfg.try_config(**vars(args)) == {"env": {"PERNOSCO": "1"}}
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_tasks.py b/tools/tryselect/test/test_tasks.py
new file mode 100644
index 0000000000..cdf8cf84c1
--- /dev/null
+++ b/tools/tryselect/test/test_tasks.py
@@ -0,0 +1,93 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozunit
+import pytest
+from tryselect.tasks import cache_key, filter_tasks_by_paths, resolve_tests_by_suite
+
+
+def test_filter_tasks_by_paths(patch_resolver):
+ tasks = ["foobar/xpcshell-1", "foobar/mochitest", "foobar/xpcshell"]
+
+ patch_resolver(["xpcshell"], {})
+ assert list(filter_tasks_by_paths(tasks, "dummy")) == []
+
+ patch_resolver([], [{"flavor": "xpcshell"}])
+ assert list(filter_tasks_by_paths(tasks, "dummy")) == [
+ "foobar/xpcshell-1",
+ "foobar/xpcshell",
+ ]
+
+
+@pytest.mark.parametrize(
+ "input, tests, expected",
+ (
+ pytest.param(
+ ["xpcshell.js"],
+ [{"flavor": "xpcshell", "srcdir_relpath": "xpcshell.js"}],
+ {"xpcshell": ["xpcshell.js"]},
+ id="single test",
+ ),
+ pytest.param(
+ ["xpcshell.ini"],
+ [
+ {
+ "flavor": "xpcshell",
+ "srcdir_relpath": "xpcshell.js",
+ "manifest_relpath": "xpcshell.ini",
+ },
+ ],
+ {"xpcshell": ["xpcshell.ini"]},
+ id="single manifest",
+ ),
+ pytest.param(
+ ["xpcshell.js", "mochitest.js"],
+ [
+ {"flavor": "xpcshell", "srcdir_relpath": "xpcshell.js"},
+ {"flavor": "mochitest", "srcdir_relpath": "mochitest.js"},
+ ],
+ {
+ "xpcshell": ["xpcshell.js"],
+ "mochitest-plain": ["mochitest.js"],
+ },
+ id="two tests",
+ ),
+ pytest.param(
+ ["test/xpcshell.ini"],
+ [
+ {
+ "flavor": "xpcshell",
+ "srcdir_relpath": "test/xpcshell.js",
+ "manifest_relpath": os.path.join("test", "xpcshell.ini"),
+ },
+ ],
+ {"xpcshell": ["test/xpcshell.ini"]},
+ id="mismatched path separators",
+ ),
+ ),
+)
+def test_resolve_tests_by_suite(patch_resolver, input, tests, expected):
+ patch_resolver([], tests)
+ assert resolve_tests_by_suite(input) == expected
+
+
+@pytest.mark.parametrize(
+ "attr,params,disable_target_task_filter,expected",
+ (
+ ("target_task_set", None, False, "target_task_set"),
+ ("target_task_set", {"project": "autoland"}, False, "target_task_set"),
+ ("target_task_set", {"project": "mozilla-central"}, False, "target_task_set"),
+ ("target_task_set", None, True, "target_task_set-uncommon"),
+ ("full_task_set", {"project": "pine"}, False, "full_task_set-pine"),
+ ("full_task_set", None, True, "full_task_set"),
+ ),
+)
+def test_cache_key(attr, params, disable_target_task_filter, expected):
+ assert cache_key(attr, params, disable_target_task_filter) == expected
+
+
+if __name__ == "__main__":
+ mozunit.main()