summaryrefslogtreecommitdiffstats
path: root/tools/tryselect/selectors
diff options
context:
space:
mode:
Diffstat (limited to 'tools/tryselect/selectors')
-rw-r--r--tools/tryselect/selectors/__init__.py3
-rw-r--r--tools/tryselect/selectors/again.py151
-rw-r--r--tools/tryselect/selectors/auto.py118
-rw-r--r--tools/tryselect/selectors/chooser/.eslintrc.js16
-rw-r--r--tools/tryselect/selectors/chooser/__init__.py120
-rw-r--r--tools/tryselect/selectors/chooser/app.py176
-rw-r--r--tools/tryselect/selectors/chooser/static/filter.js116
-rw-r--r--tools/tryselect/selectors/chooser/static/select.js46
-rw-r--r--tools/tryselect/selectors/chooser/static/style.css107
-rw-r--r--tools/tryselect/selectors/chooser/templates/chooser.html78
-rw-r--r--tools/tryselect/selectors/chooser/templates/close.html11
-rw-r--r--tools/tryselect/selectors/chooser/templates/layout.html71
-rw-r--r--tools/tryselect/selectors/compare.py66
-rw-r--r--tools/tryselect/selectors/coverage.py452
-rw-r--r--tools/tryselect/selectors/empty.py43
-rw-r--r--tools/tryselect/selectors/fuzzy.py284
-rw-r--r--tools/tryselect/selectors/perf.py1511
-rw-r--r--tools/tryselect/selectors/perf_preview.py62
-rw-r--r--tools/tryselect/selectors/perfselector/__init__.py3
-rw-r--r--tools/tryselect/selectors/perfselector/classification.py387
-rw-r--r--tools/tryselect/selectors/perfselector/perfcomparators.py258
-rw-r--r--tools/tryselect/selectors/perfselector/utils.py44
-rw-r--r--tools/tryselect/selectors/preview.py102
-rw-r--r--tools/tryselect/selectors/release.py159
-rw-r--r--tools/tryselect/selectors/scriptworker.py174
-rw-r--r--tools/tryselect/selectors/syntax.py708
26 files changed, 5266 insertions, 0 deletions
diff --git a/tools/tryselect/selectors/__init__.py b/tools/tryselect/selectors/__init__.py
new file mode 100644
index 0000000000..c580d191c1
--- /dev/null
+++ b/tools/tryselect/selectors/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/tools/tryselect/selectors/again.py b/tools/tryselect/selectors/again.py
new file mode 100644
index 0000000000..434aed7cc1
--- /dev/null
+++ b/tools/tryselect/selectors/again.py
@@ -0,0 +1,151 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+
+from ..cli import BaseTryParser
+from ..push import history_path, push_to_try
+
+
+class AgainParser(BaseTryParser):
+ name = "again"
+ arguments = [
+ [
+ ["--index"],
+ {
+ "default": 0,
+ "const": "list",
+ "nargs": "?",
+ "help": "Index of entry in the history to re-push, "
+ "where '0' is the most recent (default 0). "
+ "Use --index without a value to display indices.",
+ },
+ ],
+ [
+ ["--list"],
+ {
+ "default": False,
+ "action": "store_true",
+ "dest": "list_configs",
+ "help": "Display history and exit",
+ },
+ ],
+ [
+ ["--list-tasks"],
+ {
+ "default": 0,
+ "action": "count",
+ "dest": "list_tasks",
+ "help": "Like --list, but display selected tasks "
+ "for each history entry, up to 10. Repeat "
+ "to display all selected tasks.",
+ },
+ ],
+ [
+ ["--purge"],
+ {
+ "default": False,
+ "action": "store_true",
+ "help": "Remove all history and exit",
+ },
+ ],
+ ]
+ common_groups = ["push"]
+
+
+def run(
+ index=0, purge=False, list_configs=False, list_tasks=0, message="{msg}", **pushargs
+):
+ if index == "list":
+ list_configs = True
+ else:
+ try:
+ index = int(index)
+ except ValueError:
+ print("error: '--index' must be an integer")
+ return 1
+
+ if purge:
+ os.remove(history_path)
+ return
+
+ if not os.path.isfile(history_path):
+ print("error: history file not found: {}".format(history_path))
+ return 1
+
+ with open(history_path) as fh:
+ history = fh.readlines()
+
+ if list_configs or list_tasks > 0:
+ for i, data in enumerate(history):
+ msg, config = json.loads(data)
+ version = config.get("version", "1")
+ settings = {}
+ if version == 1:
+ tasks = config["tasks"]
+ settings = config
+ elif version == 2:
+ try_config = config.get("parameters", {}).get("try_task_config", {})
+ tasks = try_config.get("tasks")
+ else:
+ tasks = None
+
+ if tasks is not None:
+ # Select only the things that are of interest to display.
+ settings = settings.copy()
+ env = settings.pop("env", {}).copy()
+ env.pop("TRY_SELECTOR", None)
+ for name in ("tasks", "version"):
+ settings.pop(name, None)
+
+ def pluralize(n, noun):
+ return "{n} {noun}{s}".format(
+ n=n, noun=noun, s="" if n == 1 else "s"
+ )
+
+ out = str(i) + ". (" + pluralize(len(tasks), "task")
+ if env:
+ out += ", " + pluralize(len(env), "env var")
+ if settings:
+ out += ", " + pluralize(len(settings), "setting")
+ out += ") " + msg
+ print(out)
+
+ if list_tasks > 0:
+ indent = " " * 4
+ if list_tasks > 1:
+ shown_tasks = tasks
+ else:
+ shown_tasks = tasks[:10]
+ print(indent + ("\n" + indent).join(shown_tasks))
+
+ num_hidden_tasks = len(tasks) - len(shown_tasks)
+ if num_hidden_tasks > 0:
+ print("{}... and {} more".format(indent, num_hidden_tasks))
+
+ if list_tasks and env:
+ for line in ("env: " + json.dumps(env, indent=2)).splitlines():
+ print(" " + line)
+
+ if list_tasks and settings:
+ for line in (
+ "settings: " + json.dumps(settings, indent=2)
+ ).splitlines():
+ print(" " + line)
+ else:
+ print(
+ "{index}. {msg}".format(
+ index=i,
+ msg=msg,
+ )
+ )
+
+ return
+
+ msg, try_task_config = json.loads(history[index])
+ return push_to_try(
+ "again", message.format(msg=msg), try_task_config=try_task_config, **pushargs
+ )
diff --git a/tools/tryselect/selectors/auto.py b/tools/tryselect/selectors/auto.py
new file mode 100644
index 0000000000..e7cc6c508c
--- /dev/null
+++ b/tools/tryselect/selectors/auto.py
@@ -0,0 +1,118 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from taskgraph.util.python_path import find_object
+
+from ..cli import BaseTryParser
+from ..push import push_to_try
+from ..util.dicttools import merge
+
+TRY_AUTO_PARAMETERS = {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium", # noqa
+ "optimize_target_tasks": True,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {},
+}
+
+
+class AutoParser(BaseTryParser):
+ name = "auto"
+ common_groups = ["push"]
+ task_configs = [
+ "artifact",
+ "env",
+ "chemspill-prio",
+ "disable-pgo",
+ "worker-overrides",
+ ]
+ arguments = [
+ [
+ ["--strategy"],
+ {
+ "default": None,
+ "help": "Override the default optimization strategy. Valid values "
+ "are the experimental strategies defined at the bottom of "
+ "`taskcluster/gecko_taskgraph/optimize/__init__.py`.",
+ },
+ ],
+ [
+ ["--tasks-regex"],
+ {
+ "default": [],
+ "action": "append",
+ "help": "Apply a regex filter to the tasks selected. Specifying "
+ "multiple times schedules the union of computed tasks.",
+ },
+ ],
+ [
+ ["--tasks-regex-exclude"],
+ {
+ "default": [],
+ "action": "append",
+ "help": "Apply a regex filter to the tasks selected. Specifying "
+ "multiple times excludes computed tasks matching any regex.",
+ },
+ ],
+ ]
+
+ def validate(self, args):
+ super().validate(args)
+
+ if args.strategy:
+ if ":" not in args.strategy:
+ args.strategy = "gecko_taskgraph.optimize:tryselect.{}".format(
+ args.strategy
+ )
+
+ try:
+ obj = find_object(args.strategy)
+ except (ImportError, AttributeError):
+ self.error("invalid module path '{}'".format(args.strategy))
+
+ if not isinstance(obj, dict):
+ self.error("object at '{}' must be a dict".format(args.strategy))
+
+
+def run(
+ message="{msg}",
+ stage_changes=False,
+ dry_run=False,
+ closed_tree=False,
+ strategy=None,
+ tasks_regex=None,
+ tasks_regex_exclude=None,
+ try_config_params=None,
+ push_to_lando=False,
+ **ignored
+):
+ msg = message.format(msg="Tasks automatically selected.")
+
+ params = TRY_AUTO_PARAMETERS.copy()
+ if try_config_params:
+ params = merge(params, try_config_params)
+
+ if strategy:
+ params["optimize_strategies"] = strategy
+
+ if tasks_regex or tasks_regex_exclude:
+ params.setdefault("try_task_config", {})["tasks-regex"] = {}
+ params["try_task_config"]["tasks-regex"]["include"] = tasks_regex
+ params["try_task_config"]["tasks-regex"]["exclude"] = tasks_regex_exclude
+
+ task_config = {
+ "version": 2,
+ "parameters": params,
+ }
+ return push_to_try(
+ "auto",
+ msg,
+ try_task_config=task_config,
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/chooser/.eslintrc.js b/tools/tryselect/selectors/chooser/.eslintrc.js
new file mode 100644
index 0000000000..861d6bafc2
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/.eslintrc.js
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+module.exports = {
+ env: {
+ jquery: true,
+ },
+ globals: {
+ apply: true,
+ applyChunks: true,
+ tasks: true,
+ },
+};
diff --git a/tools/tryselect/selectors/chooser/__init__.py b/tools/tryselect/selectors/chooser/__init__.py
new file mode 100644
index 0000000000..d6a32e08d0
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/__init__.py
@@ -0,0 +1,120 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import multiprocessing
+import os
+import time
+import webbrowser
+from threading import Timer
+
+from gecko_taskgraph.target_tasks import filter_by_uncommon_try_tasks
+
+from tryselect.cli import BaseTryParser
+from tryselect.push import (
+ check_working_directory,
+ generate_try_task_config,
+ push_to_try,
+)
+from tryselect.tasks import generate_tasks
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+class ChooserParser(BaseTryParser):
+ name = "chooser"
+ arguments = []
+ common_groups = ["push", "task"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "chemspill-prio",
+ "disable-pgo",
+ "env",
+ "existing-tasks",
+ "gecko-profile",
+ "path",
+ "pernosco",
+ "rebuild",
+ "worker-overrides",
+ ]
+
+
+def run(
+ update=False,
+ query=None,
+ try_config_params=None,
+ full=False,
+ parameters=None,
+ save=False,
+ preset=None,
+ mod_presets=False,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ from .app import create_application
+
+ push = not stage_changes and not dry_run
+ check_working_directory(push)
+
+ tg = generate_tasks(parameters, full)
+
+ # Remove tasks that are not to be shown unless `--full` is specified.
+ if not full:
+ excluded_tasks = [
+ label
+ for label in tg.tasks.keys()
+ if not filter_by_uncommon_try_tasks(label)
+ ]
+ for task in excluded_tasks:
+ tg.tasks.pop(task)
+
+ queue = multiprocessing.Queue()
+
+ if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
+ # we are in the reloader process, don't open the browser or do any try stuff
+ app = create_application(tg, queue)
+ app.run()
+ return
+
+ # give app a second to start before opening the browser
+ url = "http://127.0.0.1:5000"
+ Timer(1, lambda: webbrowser.open(url)).start()
+ print("Starting trychooser on {}".format(url))
+ process = multiprocessing.Process(
+ target=create_and_run_application, args=(tg, queue)
+ )
+ process.start()
+
+ selected = queue.get()
+
+ # Allow the close page to render before terminating the process.
+ time.sleep(1)
+ process.terminate()
+ if not selected:
+ print("no tasks selected")
+ return
+
+ msg = "Try Chooser Enhanced ({} tasks selected)".format(len(selected))
+ return push_to_try(
+ "chooser",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config(
+ "chooser", selected, params=try_config_params
+ ),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
+
+
+def create_and_run_application(tg, queue: multiprocessing.Queue):
+ from .app import create_application
+
+ app = create_application(tg, queue)
+
+ app.run()
diff --git a/tools/tryselect/selectors/chooser/app.py b/tools/tryselect/selectors/chooser/app.py
new file mode 100644
index 0000000000..99d63cd37f
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/app.py
@@ -0,0 +1,176 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import multiprocessing
+from abc import ABCMeta, abstractproperty
+from collections import defaultdict
+
+from flask import Flask, render_template, request
+
+SECTIONS = []
+SUPPORTED_KINDS = set()
+
+
+def register_section(cls):
+ assert issubclass(cls, Section)
+ instance = cls()
+ SECTIONS.append(instance)
+ SUPPORTED_KINDS.update(instance.kind.split(","))
+
+
+class Section(object):
+ __metaclass__ = ABCMeta
+
+ @abstractproperty
+ def name(self):
+ pass
+
+ @abstractproperty
+ def kind(self):
+ pass
+
+ @abstractproperty
+ def title(self):
+ pass
+
+ @abstractproperty
+ def attrs(self):
+ pass
+
+ def contains(self, task):
+ return task.kind in self.kind.split(",")
+
+ def get_context(self, tasks):
+ labels = defaultdict(lambda: {"max_chunk": 0, "attrs": defaultdict(list)})
+
+ for task in tasks.values():
+ if not self.contains(task):
+ continue
+
+ task = task.attributes
+ label = labels[self.labelfn(task)]
+ for attr in self.attrs:
+ if attr in task and task[attr] not in label["attrs"][attr]:
+ label["attrs"][attr].append(task[attr])
+
+ if "test_chunk" in task:
+ label["max_chunk"] = max(
+ label["max_chunk"], int(task["test_chunk"])
+ )
+
+ return {
+ "name": self.name,
+ "kind": self.kind,
+ "title": self.title,
+ "labels": labels,
+ }
+
+
+@register_section
+class Platform(Section):
+ name = "platform"
+ kind = "build"
+ title = "Platforms"
+ attrs = ["build_platform"]
+
+ def labelfn(self, task):
+ return task["build_platform"]
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+
+ # android-stuff tasks aren't actual platforms
+ return task.task["tags"].get("android-stuff", False) != "true"
+
+
+@register_section
+class Test(Section):
+ name = "test"
+ kind = "test"
+ title = "Test Suites"
+ attrs = ["unittest_suite"]
+
+ def labelfn(self, task):
+ suite = task["unittest_suite"].replace(" ", "-")
+
+ if suite.endswith("-chunked"):
+ suite = suite[: -len("-chunked")]
+
+ return suite
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+ return task.attributes["unittest_suite"] not in ("raptor", "talos")
+
+
+@register_section
+class Perf(Section):
+ name = "perf"
+ kind = "test"
+ title = "Performance"
+ attrs = ["unittest_suite", "raptor_try_name", "talos_try_name"]
+
+ def labelfn(self, task):
+ suite = task["unittest_suite"]
+ label = task["{}_try_name".format(suite)]
+
+ if not label.startswith(suite):
+ label = "{}-{}".format(suite, label)
+
+ if label.endswith("-e10s"):
+ label = label[: -len("-e10s")]
+
+ return label
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+ return task.attributes["unittest_suite"] in ("raptor", "talos")
+
+
+@register_section
+class Analysis(Section):
+ name = "analysis"
+ kind = "build,static-analysis-autotest,hazard"
+ title = "Analysis"
+ attrs = ["build_platform"]
+
+ def labelfn(self, task):
+ return task["build_platform"]
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+ if task.kind == "build":
+ return task.task["tags"].get("android-stuff", False) == "true"
+ return True
+
+
+def create_application(tg, queue: multiprocessing.Queue):
+ tasks = {l: t for l, t in tg.tasks.items() if t.kind in SUPPORTED_KINDS}
+ sections = [s.get_context(tasks) for s in SECTIONS]
+ context = {
+ "tasks": {l: t.attributes for l, t in tasks.items()},
+ "sections": sections,
+ }
+
+ app = Flask(__name__)
+ app.env = "development"
+ app.tasks = []
+
+ @app.route("/", methods=["GET", "POST"])
+ def chooser():
+ if request.method == "GET":
+ return render_template("chooser.html", **context)
+
+ if request.form["action"] == "Push":
+ labels = request.form["selected-tasks"].splitlines()
+ app.tasks.extend(labels)
+
+ queue.put(app.tasks)
+ return render_template("close.html")
+
+ return app
diff --git a/tools/tryselect/selectors/chooser/static/filter.js b/tools/tryselect/selectors/chooser/static/filter.js
new file mode 100644
index 0000000000..2d8731e61f
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/static/filter.js
@@ -0,0 +1,116 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const selection = $("#selection")[0];
+const count = $("#selection-count")[0];
+const pluralize = (count, noun, suffix = "s") =>
+ `${count} ${noun}${count !== 1 ? suffix : ""}`;
+
+var selected = [];
+
+var updateLabels = () => {
+ $(".tab-pane.active > .filter-label").each(function (index) {
+ let box = $("#" + this.htmlFor)[0];
+ let method = box.checked ? "add" : "remove";
+ $(this)[method + "Class"]("is-checked");
+ });
+};
+
+var apply = () => {
+ let filters = {};
+ let kinds = [];
+
+ $(".filter:checked").each(function (index) {
+ for (let kind of this.name.split(",")) {
+ if (!kinds.includes(kind)) {
+ kinds.push(kind);
+ }
+ }
+
+ // Checkbox element values are generated by Section.get_context() in app.py
+ let attrs = JSON.parse(this.value);
+ for (let attr in attrs) {
+ if (!(attr in filters)) {
+ filters[attr] = [];
+ }
+
+ let values = attrs[attr];
+ filters[attr] = filters[attr].concat(values);
+ }
+ });
+ updateLabels();
+
+ if (
+ !Object.keys(filters).length ||
+ (Object.keys(filters).length == 1 && "build_type" in filters)
+ ) {
+ selection.value = "";
+ count.innerHTML = "0 tasks selected";
+ return;
+ }
+
+ var taskMatches = label => {
+ let task = tasks[label];
+
+ // If no box for the given kind has been checked, this task is
+ // automatically not selected.
+ if (!kinds.includes(task.kind)) {
+ return false;
+ }
+
+ for (let attr in filters) {
+ let values = filters[attr];
+ if (!(attr in task) || values.includes(task[attr])) {
+ continue;
+ }
+ return false;
+ }
+ return true;
+ };
+
+ selected = Object.keys(tasks).filter(taskMatches);
+ applyChunks();
+};
+
+var applyChunks = () => {
+ // For tasks that have a chunk filter applied, we handle that here.
+ let filters = {};
+ $(".filter:text").each(function (index) {
+ let value = $(this).val();
+ if (value === "") {
+ return;
+ }
+
+ let attrs = JSON.parse(this.name);
+ let key = `${attrs.unittest_suite}-${attrs.unittest_flavor}`;
+ if (!(key in filters)) {
+ filters[key] = [];
+ }
+
+ // Parse the chunk strings. These are formatted like printer page setups, e.g: "1,4-6,9"
+ for (let item of value.split(",")) {
+ if (!item.includes("-")) {
+ filters[key].push(parseInt(item));
+ continue;
+ }
+
+ let [start, end] = item.split("-");
+ for (let i = parseInt(start); i <= parseInt(end); ++i) {
+ filters[key].push(i);
+ }
+ }
+ });
+
+ let chunked = selected.filter(function (label) {
+ let task = tasks[label];
+ let key = task.unittest_suite + "-" + task.unittest_flavor;
+ if (key in filters && !filters[key].includes(parseInt(task.test_chunk))) {
+ return false;
+ }
+ return true;
+ });
+
+ selection.value = chunked.join("\n");
+ count.innerText = pluralize(chunked.length, "task") + " selected";
+};
diff --git a/tools/tryselect/selectors/chooser/static/select.js b/tools/tryselect/selectors/chooser/static/select.js
new file mode 100644
index 0000000000..8a315c0a52
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/static/select.js
@@ -0,0 +1,46 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const labels = $("label.multiselect");
+const boxes = $("label.multiselect input:checkbox");
+var lastChecked = {};
+
+// implements shift+click
+labels.click(function (e) {
+ if (e.target.tagName === "INPUT") {
+ return;
+ }
+
+ let box = $("#" + this.htmlFor)[0];
+ let activeSection = $("div.tab-pane.active")[0].id;
+
+ if (activeSection in lastChecked) {
+ // Bug 559506 - In Firefox shift/ctrl/alt+clicking a label doesn't check the box.
+ let isFirefox = navigator.userAgent.toLowerCase().indexOf("firefox") > -1;
+
+ if (e.shiftKey) {
+ if (isFirefox) {
+ box.checked = !box.checked;
+ }
+
+ let start = boxes.index(box);
+ let end = boxes.index(lastChecked[activeSection]);
+
+ boxes
+ .slice(Math.min(start, end), Math.max(start, end) + 1)
+ .prop("checked", box.checked);
+ apply();
+ }
+ }
+
+ lastChecked[activeSection] = box;
+});
+
+function selectAll(btn) {
+ let checked = !!btn.value;
+ $("div.active label.filter-label").each(function (index) {
+ $(this).find("input:checkbox")[0].checked = checked;
+ });
+ apply();
+}
diff --git a/tools/tryselect/selectors/chooser/static/style.css b/tools/tryselect/selectors/chooser/static/style.css
new file mode 100644
index 0000000000..6b2f96935b
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/static/style.css
@@ -0,0 +1,107 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+body {
+ padding-top: 70px;
+}
+
+/* Tabs */
+
+#tabbar .nav-link {
+ color: #009570;
+ font-size: 18px;
+ padding-bottom: 15px;
+ padding-top: 15px;
+}
+
+#tabbar .nav-link.active {
+ color: #212529;
+}
+
+#tabbar .nav-link:hover {
+ color: #0f5a3a;
+}
+
+/* Sections */
+
+.tab-content button {
+ font-size: 14px;
+ margin-bottom: 5px;
+ margin-top: 10px;
+}
+
+.filter-label {
+ display: block;
+ font-size: 16px;
+ position: relative;
+ padding-left: 15px;
+ padding-right: 15px;
+ padding-top: 10px;
+ padding-bottom: 10px;
+ margin-bottom: 0;
+ user-select: none;
+ vertical-align: middle;
+}
+
+.filter-label span {
+ display: flex;
+ min-height: 34px;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.filter-label input[type="checkbox"] {
+ position: absolute;
+ opacity: 0;
+ height: 0;
+ width: 0;
+}
+
+.filter-label input[type="text"] {
+ width: 50px;
+}
+
+.filter-label:hover {
+ background-color: #91a0b0;
+}
+
+.filter-label.is-checked:hover {
+ background-color: #91a0b0;
+}
+
+.filter-label.is-checked {
+ background-color: #404c59;
+ color: white;
+}
+
+/* Preview pane */
+
+#preview {
+ position: fixed;
+ height: 100vh;
+ margin-left: 66%;
+ width: 100%;
+}
+
+#submit-tasks {
+ display: flex;
+ flex-direction: column;
+ height: 80%;
+}
+
+#buttons {
+ display: flex;
+ justify-content: space-between;
+}
+
+#push {
+ background-color: #00e9b7;
+ margin-left: 5px;
+ width: 100%;
+}
+
+#selection {
+ height: 100%;
+ width: 100%;
+}
diff --git a/tools/tryselect/selectors/chooser/templates/chooser.html b/tools/tryselect/selectors/chooser/templates/chooser.html
new file mode 100644
index 0000000000..4e009d94ac
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/templates/chooser.html
@@ -0,0 +1,78 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+{% extends 'layout.html' %}
+{% block content %}
+<div class="container-fluid">
+ <div class="row">
+ <div class="col-8">
+ <div class="form-group form-inline">
+ <span class="col-form-label col-md-2 pt-1">Build Type</span>
+ <div class="form-check form-check-inline">
+ <input id="both" class="filter form-check-input" type="radio" name="buildtype" value='{}' onchange="apply();" checked>
+ <label for="both" class="form-check-label">both</label>
+ </div>
+ {% for type in ["opt", "debug"] %}
+ <div class="form-check form-check-inline">
+ <input id="{{ type }}" class="filter form-check-input" type="radio" name="buildtype" value='{"build_type": "{{ type }}"}' onchange="apply();">
+ <label for={{ type }} class="form-check-label">{{ type }}</label>
+ </div>
+ {% endfor %}
+ </div>
+ <ul class="nav nav-tabs" id="tabbar" role="tablist">
+ {% for section in sections %}
+ <li class="nav-item">
+ {% if loop.first %}
+ <a class="nav-link active" id="{{ section.name }}-tab" data-toggle="tab" href="#{{section.name }}" role="tab" aria-controls="{{ section.name }}" aria-selected="true">{{ section.title }}</a>
+ {% else %}
+ <a class="nav-link" id="{{ section.name }}-tab" data-toggle="tab" href="#{{section.name }}" role="tab" aria-controls="{{ section.name }}" aria-selected="false">{{ section.title }}</a>
+ {% endif %}
+ </li>
+ {% endfor %}
+ </ul>
+ <div class="tab-content">
+ <button type="button" class="btn btn-secondary" value="true" onclick="selectAll(this);">Select All</button>
+ <button type="button" class="btn btn-secondary" onclick="selectAll(this);">Deselect All</button>
+ {% for section in sections %}
+ {% if loop.first %}
+ <div class="tab-pane show active" id="{{ section.name }}" role="tabpanel" aria-labelledby="{{ section.name }}-tab">
+ {% else %}
+ <div class="tab-pane" id="{{ section.name }}" role="tabpanel" aria-labelledby="{{ section.name }}-tab">
+ {% endif %}
+ {% for label, meta in section.labels|dictsort %}
+ <label class="multiselect filter-label" for={{ label }}>
+ <span>
+ {{ label }}
+ <input class="filter" type="checkbox" id={{ label }} name="{{ section.kind }}" value='{{ meta.attrs|tojson|safe }}' onchange="console.log('checkbox onchange triggered');apply();">
+ {% if meta.max_chunk > 1 %}
+ <input class="filter" type="text" pattern="[0-9][0-9,\-]*" placeholder="1-{{ meta.max_chunk }}" name='{{ meta.attrs|tojson|safe }}' oninput="applyChunks();">
+ {% endif %}
+ </span>
+ </label>
+ {% endfor %}
+ </div>
+ {% endfor %}
+ </div>
+ </div>
+ <div class="col-4" id="preview">
+ <form id="submit-tasks" action="" method="POST">
+ <textarea id="selection" name="selected-tasks" wrap="off"></textarea>
+ <span id="selection-count">0 tasks selected</span><br>
+ <span id="buttons">
+ <input id="cancel" class="btn btn-default" type="submit" name="action" value="Cancel">
+ <input id="push" class="btn btn-default" type="submit" name="action" value="Push">
+ </span>
+ </form>
+ </div>
+ </div>
+</div>
+{% endblock %}
+
+{% block scripts %}
+<script>
+ const tasks = {{ tasks|tojson|safe }};
+</script>
+<script src="{{ url_for('static', filename='filter.js') }}"></script>
+<script src="{{ url_for('static', filename='select.js') }}"></script>
+{% endblock %}
diff --git a/tools/tryselect/selectors/chooser/templates/close.html b/tools/tryselect/selectors/chooser/templates/close.html
new file mode 100644
index 0000000000..9dc0a161f3
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/templates/close.html
@@ -0,0 +1,11 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+{% extends 'layout.html' %} {% block content %}
+<div class="container-fluid">
+ <div class="alert alert-primary" role="alert">
+ You may now close this page.
+ </div>
+</div>
+{% endblock %}
diff --git a/tools/tryselect/selectors/chooser/templates/layout.html b/tools/tryselect/selectors/chooser/templates/layout.html
new file mode 100644
index 0000000000..8553ae94df
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/templates/layout.html
@@ -0,0 +1,71 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>Try Chooser Enhanced</title>
+ <link
+ rel="stylesheet"
+ href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css"
+ />
+ <link
+ rel="stylesheet"
+ href="{{ url_for('static', filename='style.css') }}"
+ />
+ </head>
+ <body>
+ <nav class="navbar navbar-default fixed-top navbar-dark bg-dark">
+ <div class="container-fluid">
+ <span class="navbar-brand mb-0 h1">Try Chooser Enhanced</span>
+ <button
+ class="navbar-toggler"
+ type="button"
+ data-toggle="collapse"
+ data-target="#navbarSupportedContent"
+ aria-controls="navbarSupportedContent"
+ aria-expanded="false"
+ aria-label="Toggle navigation"
+ >
+ <span class="navbar-toggler-icon"></span>
+ </button>
+ <div class="collapse navbar-collapse" id="navbarSupportedContent">
+ <ul class="navbar-nav mr-auto">
+ <li class="nav-item">
+ <a
+ class="nav-link"
+ href="https://firefox-source-docs.mozilla.org/tools/try/index.html"
+ >Documentation</a
+ >
+ </li>
+ <li class="nav-item">
+ <a
+ class="nav-link"
+ href="https://treeherder.mozilla.org/#/jobs?repo=try"
+ >Treeherder</a
+ >
+ </li>
+ </ul>
+ </div>
+ </div>
+ </nav>
+ {% block content %}{% endblock %}
+ <script
+ src="https://code.jquery.com/jquery-3.3.1.slim.min.js"
+ integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"
+ crossorigin="anonymous"
+ ></script>
+ <script
+ src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js"
+ integrity="sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49"
+ crossorigin="anonymous"
+ ></script>
+ <script
+ src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
+ integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
+ crossorigin="anonymous"
+ ></script>
+ {% block scripts %}{% endblock %}
+ </body>
+</html>
diff --git a/tools/tryselect/selectors/compare.py b/tools/tryselect/selectors/compare.py
new file mode 100644
index 0000000000..ac468e0974
--- /dev/null
+++ b/tools/tryselect/selectors/compare.py
@@ -0,0 +1,66 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+from mozbuild.base import MozbuildObject
+from mozversioncontrol import get_repository_object
+
+from tryselect.cli import BaseTryParser
+
+from .again import run as again_run
+from .fuzzy import run as fuzzy_run
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+
+
+class CompareParser(BaseTryParser):
+ name = "compare"
+ arguments = [
+ [
+ ["-cc", "--compare-commit"],
+ {
+ "default": None,
+ "help": "The commit that you want to compare your current revision with",
+ },
+ ],
+ ]
+ common_groups = ["task"]
+ task_configs = [
+ "rebuild",
+ ]
+
+ def get_revisions_to_run(vcs, compare_commit):
+ if compare_commit is None:
+ compare_commit = vcs.base_ref
+ if vcs.branch:
+ current_revision_ref = vcs.branch
+ else:
+ current_revision_ref = vcs.head_ref
+
+ return compare_commit, current_revision_ref
+
+
+def run(compare_commit=None, **kwargs):
+ vcs = get_repository_object(build.topsrcdir)
+ compare_commit, current_revision_ref = CompareParser.get_revisions_to_run(
+ vcs, compare_commit
+ )
+ print("********************************************")
+ print("* 2 commits are created with this command *")
+ print("********************************************")
+
+ try:
+ fuzzy_run(**kwargs)
+ print("********************************************")
+ print("* The base commit can be found above *")
+ print("********************************************")
+ vcs.update(compare_commit)
+ again_run()
+ print("*****************************************")
+ print("* The compare commit can be found above *")
+ print("*****************************************")
+ finally:
+ vcs.update(current_revision_ref)
diff --git a/tools/tryselect/selectors/coverage.py b/tools/tryselect/selectors/coverage.py
new file mode 100644
index 0000000000..f396e4618c
--- /dev/null
+++ b/tools/tryselect/selectors/coverage.py
@@ -0,0 +1,452 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import collections
+import datetime
+import hashlib
+import json
+import os
+import shutil
+import sqlite3
+import subprocess
+
+import requests
+import six
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozpack.files import FileFinder
+from moztest.resolve import TestResolver
+from mozversioncontrol import get_repository_object
+
+from ..cli import BaseTryParser
+from ..push import generate_try_task_config, push_to_try
+from ..tasks import filter_tasks_by_paths, generate_tasks, resolve_tests_by_suite
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = None
+vcs = None
+CHUNK_MAPPING_FILE = None
+CHUNK_MAPPING_TAG_FILE = None
+
+
+def setup_globals():
+ # Avoid incurring expensive computation on import.
+ global build, vcs, CHUNK_MAPPING_TAG_FILE, CHUNK_MAPPING_FILE
+ build = MozbuildObject.from_environment(cwd=here)
+ vcs = get_repository_object(build.topsrcdir)
+
+ root_hash = hashlib.sha256(
+ six.ensure_binary(os.path.abspath(build.topsrcdir))
+ ).hexdigest()
+ cache_dir = os.path.join(get_state_dir(), "cache", root_hash, "chunk_mapping")
+ if not os.path.isdir(cache_dir):
+ os.makedirs(cache_dir)
+ CHUNK_MAPPING_FILE = os.path.join(cache_dir, "chunk_mapping.sqlite")
+ CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, "chunk_mapping_tag.json")
+
+
+# Maps from platform names in the chunk_mapping sqlite database to respective
+# substrings in task names.
+PLATFORM_MAP = {
+ "linux": "test-linux64/opt",
+ "windows": "test-windows10-64/opt",
+}
+
+# List of platform/build type combinations that are included in pushes by |mach try coverage|.
+OPT_TASK_PATTERNS = [
+ "macosx64/opt",
+ "windows10-64/opt",
+ "windows7-32/opt",
+ "linux64/opt",
+]
+
+
+class CoverageParser(BaseTryParser):
+ name = "coverage"
+ arguments = []
+ common_groups = ["push", "task"]
+ task_configs = [
+ "artifact",
+ "env",
+ "rebuild",
+ "chemspill-prio",
+ "disable-pgo",
+ "worker-overrides",
+ ]
+
+
+def read_test_manifests():
+ """Uses TestResolver to read all test manifests in the tree.
+
+ Returns a (tests, support_files_map) tuple that describes the tests in the tree:
+ tests - a set of test file paths
+ support_files_map - a dict that maps from each support file to a list with
+ test files that require them it
+ """
+ setup_globals()
+ test_resolver = TestResolver.from_environment(cwd=here)
+ file_finder = FileFinder(build.topsrcdir)
+ support_files_map = collections.defaultdict(list)
+ tests = set()
+
+ for test in test_resolver.resolve_tests(build.topsrcdir):
+ tests.add(test["srcdir_relpath"])
+ if "support-files" not in test:
+ continue
+
+ for support_file_pattern in test["support-files"].split():
+ # Get the pattern relative to topsrcdir.
+ if support_file_pattern.startswith("!/"):
+ support_file_pattern = support_file_pattern[2:]
+ elif support_file_pattern.startswith("/"):
+ support_file_pattern = support_file_pattern[1:]
+ else:
+ support_file_pattern = os.path.normpath(
+ os.path.join(test["dir_relpath"], support_file_pattern)
+ )
+
+ # If it doesn't have a glob, then it's a single file.
+ if "*" not in support_file_pattern:
+ # Simple case: single support file, just add it here.
+ support_files_map[support_file_pattern].append(test["srcdir_relpath"])
+ continue
+
+ for support_file, _ in file_finder.find(support_file_pattern):
+ support_files_map[support_file].append(test["srcdir_relpath"])
+
+ return tests, support_files_map
+
+
+# TODO cache the output of this function
+all_tests, all_support_files = read_test_manifests()
+
+
+def download_coverage_mapping(base_revision):
+ try:
+ with open(CHUNK_MAPPING_TAG_FILE) as f:
+ tags = json.load(f)
+ if tags["target_revision"] == base_revision:
+ return
+ else:
+ print("Base revision changed.")
+ except (OSError, ValueError):
+ print("Chunk mapping file not found.")
+
+ CHUNK_MAPPING_URL_TEMPLATE = "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.production.cron.{}/artifacts/public/chunk_mapping.tar.xz" # noqa
+ JSON_PUSHES_URL_TEMPLATE = "https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&startdate={}" # noqa
+
+ # Get pushes from at most one month ago.
+ PUSH_HISTORY_DAYS = 30
+ delta = datetime.timedelta(days=PUSH_HISTORY_DAYS)
+ start_time = (datetime.datetime.now() - delta).strftime("%Y-%m-%d")
+ pushes_url = JSON_PUSHES_URL_TEMPLATE.format(start_time)
+ pushes_data = requests.get(pushes_url + "&tochange={}".format(base_revision)).json()
+ if "error" in pushes_data:
+ if "unknown revision" in pushes_data["error"]:
+ print(
+ "unknown revision {}, trying with latest mozilla-central".format(
+ base_revision
+ )
+ )
+ pushes_data = requests.get(pushes_url).json()
+
+ if "error" in pushes_data:
+ raise Exception(pushes_data["error"])
+
+ pushes = pushes_data["pushes"]
+
+ print("Looking for coverage data. This might take a minute or two.")
+ print("Base revision:", base_revision)
+ for push_id in sorted(pushes.keys())[::-1]:
+ rev = pushes[push_id]["changesets"][0]
+ url = CHUNK_MAPPING_URL_TEMPLATE.format(rev)
+ print("push id: {},\trevision: {}".format(push_id, rev))
+
+ r = requests.head(url)
+ if not r.ok:
+ continue
+
+ print("Chunk mapping found, downloading...")
+ r = requests.get(url, stream=True)
+
+ CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, "chunk_mapping.tar.xz")
+ with open(CHUNK_MAPPING_ARCHIVE, "wb") as f:
+ r.raw.decode_content = True
+ shutil.copyfileobj(r.raw, f)
+
+ subprocess.check_call(
+ [
+ "tar",
+ "-xJf",
+ CHUNK_MAPPING_ARCHIVE,
+ "-C",
+ os.path.dirname(CHUNK_MAPPING_FILE),
+ ]
+ )
+ os.remove(CHUNK_MAPPING_ARCHIVE)
+ assert os.path.isfile(CHUNK_MAPPING_FILE)
+ with open(CHUNK_MAPPING_TAG_FILE, "w") as f:
+ json.dump(
+ {
+ "target_revision": base_revision,
+ "chunk_mapping_revision": rev,
+ "download_date": start_time,
+ },
+ f,
+ )
+ return
+ raise Exception("Could not find suitable coverage data.")
+
+
+def is_a_test(cursor, path):
+ """Checks the all_tests global and the chunk mapping database to see if a
+ given file is a test file.
+ """
+ if path in all_tests:
+ return True
+
+ cursor.execute("SELECT COUNT(*) from chunk_to_test WHERE path=?", (path,))
+ if cursor.fetchone()[0]:
+ return True
+
+ cursor.execute("SELECT COUNT(*) from file_to_test WHERE test=?", (path,))
+ if cursor.fetchone()[0]:
+ return True
+
+ return False
+
+
+def tests_covering_file(cursor, path):
+ """Returns a set of tests that cover a given source file."""
+ cursor.execute("SELECT test FROM file_to_test WHERE source=?", (path,))
+ return {e[0] for e in cursor.fetchall()}
+
+
+def tests_in_chunk(cursor, platform, chunk):
+ """Returns a set of tests that are contained in a given chunk."""
+ cursor.execute(
+ "SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?", (platform, chunk)
+ )
+ # Because of bug 1480103, some entries in this table contain both a file name and a test name,
+ # separated by a space. With the split, only the file name is kept.
+ return {e[0].split(" ")[0] for e in cursor.fetchall()}
+
+
+def chunks_covering_file(cursor, path):
+ """Returns a set of (platform, chunk) tuples with the chunks that cover a given source file."""
+ cursor.execute("SELECT platform, chunk FROM file_to_chunk WHERE path=?", (path,))
+ return set(cursor.fetchall())
+
+
+def tests_supported_by_file(path):
+ """Returns a set of tests that are using the given file as a support-file."""
+ return set(all_support_files[path])
+
+
+def find_tests(changed_files):
+ """Finds both individual tests and test chunks that should be run to test code changes.
+ Argument: a list of file paths relative to the source checkout.
+
+ Returns: a (test_files, test_chunks) tuple with two sets.
+ test_files - contains tests that should be run to verify changes to changed_files.
+ test_chunks - contains (platform, chunk) tuples with chunks that should be
+ run. These chunnks do not support running a subset of the tests (like
+ cppunit or gtest), so the whole chunk must be run.
+ """
+ test_files = set()
+ test_chunks = set()
+ files_no_coverage = set()
+
+ with sqlite3.connect(CHUNK_MAPPING_FILE) as conn:
+ c = conn.cursor()
+ for path in changed_files:
+ # If path is a test, add it to the list and continue.
+ if is_a_test(c, path):
+ test_files.add(path)
+ continue
+
+ # Look at the chunk mapping and add all tests that cover this file.
+ tests = tests_covering_file(c, path)
+ chunks = chunks_covering_file(c, path)
+ # If we found tests covering this, then it's not a support-file, so
+ # save these and continue.
+ if tests or chunks:
+ test_files |= tests
+ test_chunks |= chunks
+ continue
+
+ # Check if the path is a support-file for any test, by querying test manifests.
+ tests = tests_supported_by_file(path)
+ if tests:
+ test_files |= tests
+ continue
+
+ # There is no coverage information for this file.
+ files_no_coverage.add(path)
+
+ files_covered = set(changed_files) - files_no_coverage
+ test_files = {s.replace("\\", "/") for s in test_files}
+
+ _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks)
+
+ remaining_test_chunks = set()
+ # For all test_chunks, try to find the tests contained by them in the
+ # chunk_to_test mapping.
+ for platform, chunk in test_chunks:
+ tests = tests_in_chunk(c, platform, chunk)
+ if tests:
+ for test in tests:
+ test_files.add(test.replace("\\", "/"))
+ else:
+ remaining_test_chunks.add((platform, chunk))
+
+ return test_files, remaining_test_chunks
+
+
+def _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks):
+ """Print a summary of what will be run to the user's terminal."""
+ files_covered = sorted(files_covered)
+ files_no_coverage = sorted(files_no_coverage)
+ test_files = sorted(test_files)
+ test_chunks = sorted(test_chunks)
+
+ if files_covered:
+ print(
+ "Found {} modified source files with test coverage:".format(
+ len(files_covered)
+ )
+ )
+ for covered in files_covered:
+ print("\t", covered)
+
+ if files_no_coverage:
+ print(
+ "Found {} modified source files with no coverage:".format(
+ len(files_no_coverage)
+ )
+ )
+ for f in files_no_coverage:
+ print("\t", f)
+
+ if not files_covered:
+ print("No modified source files are covered by tests.")
+ elif not files_no_coverage:
+ print("All modified source files are covered by tests.")
+
+ if test_files:
+ print("Running {} individual test files.".format(len(test_files)))
+ else:
+ print("Could not find any individual tests to run.")
+
+ if test_chunks:
+ print("Running {} test chunks.".format(len(test_chunks)))
+ for platform, chunk in test_chunks:
+ print("\t", platform, chunk)
+ else:
+ print("Could not find any test chunks to run.")
+
+
+def filter_tasks_by_chunks(tasks, chunks):
+ """Find all tasks that will run the given chunks."""
+ selected_tasks = set()
+ for platform, chunk in chunks:
+ platform = PLATFORM_MAP[platform]
+
+ selected_task = None
+ for task in tasks.keys():
+ if not task.startswith(platform):
+ continue
+
+ if not any(
+ task[len(platform) + 1 :].endswith(c) for c in [chunk, chunk + "-e10s"]
+ ):
+ continue
+
+ assert (
+ selected_task is None
+ ), "Only one task should be selected for a given platform-chunk couple ({} - {}), {} and {} were selected".format( # noqa
+ platform, chunk, selected_task, task
+ )
+ selected_task = task
+
+ if selected_task is None:
+ print("Warning: no task found for chunk", platform, chunk)
+ else:
+ selected_tasks.add(selected_task)
+
+ return list(selected_tasks)
+
+
+def is_opt_task(task):
+ """True if the task runs on a supported platform and build type combination.
+ This is used to remove -ccov/asan/pgo tasks, along with all /debug tasks.
+ """
+ return any(platform in task for platform in OPT_TASK_PATTERNS)
+
+
+def run(
+ try_config_params={},
+ full=False,
+ parameters=None,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ setup_globals()
+ download_coverage_mapping(vcs.base_ref)
+
+ changed_sources = vcs.get_outgoing_files()
+ test_files, test_chunks = find_tests(changed_sources)
+ if not test_files and not test_chunks:
+ print("ERROR Could not find any tests or chunks to run.")
+ return 1
+
+ tg = generate_tasks(parameters, full)
+ all_tasks = tg.tasks
+
+ tasks_by_chunks = filter_tasks_by_chunks(all_tasks, test_chunks)
+ tasks_by_path = filter_tasks_by_paths(all_tasks, test_files)
+ tasks = filter(is_opt_task, set(tasks_by_path) | set(tasks_by_chunks))
+ tasks = list(tasks)
+
+ if not tasks:
+ print("ERROR Did not find any matching tasks after filtering.")
+ return 1
+ test_count_message = (
+ "{test_count} test file{test_plural} that "
+ + "cover{test_singular} these changes "
+ + "({task_count} task{task_plural} to be scheduled)"
+ ).format(
+ test_count=len(test_files),
+ test_plural="" if len(test_files) == 1 else "s",
+ test_singular="s" if len(test_files) == 1 else "",
+ task_count=len(tasks),
+ task_plural="" if len(tasks) == 1 else "s",
+ )
+ print("Found " + test_count_message)
+
+ # Set the test paths to be run by setting MOZHARNESS_TEST_PATHS.
+ path_env = {
+ "MOZHARNESS_TEST_PATHS": six.ensure_text(
+ json.dumps(resolve_tests_by_suite(test_files))
+ )
+ }
+ try_config_params.setdefault("try_task_config", {}).setdefault("env", {}).update(
+ path_env
+ )
+
+ # Build commit message.
+ msg = "try coverage - " + test_count_message
+ return push_to_try(
+ "coverage",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config("coverage", tasks, try_config_params),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/empty.py b/tools/tryselect/selectors/empty.py
new file mode 100644
index 0000000000..15a48fa5d2
--- /dev/null
+++ b/tools/tryselect/selectors/empty.py
@@ -0,0 +1,43 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from ..cli import BaseTryParser
+from ..push import generate_try_task_config, push_to_try
+
+
+class EmptyParser(BaseTryParser):
+ name = "empty"
+ common_groups = ["push"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "chemspill-prio",
+ "disable-pgo",
+ "env",
+ "gecko-profile",
+ "pernosco",
+ "routes",
+ "worker-overrides",
+ ]
+
+
+def run(
+ message="{msg}",
+ try_config_params=None,
+ stage_changes=False,
+ dry_run=False,
+ closed_tree=False,
+ push_to_lando=False,
+):
+ msg = 'No try selector specified, use "Add New Jobs" to select tasks.'
+ return push_to_try(
+ "empty",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config("empty", [], params=try_config_params),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/fuzzy.py b/tools/tryselect/selectors/fuzzy.py
new file mode 100644
index 0000000000..7a9bccc4b7
--- /dev/null
+++ b/tools/tryselect/selectors/fuzzy.py
@@ -0,0 +1,284 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import sys
+from pathlib import PurePath
+
+from gecko_taskgraph.target_tasks import filter_by_uncommon_try_tasks
+from mach.util import get_state_dir
+
+from ..cli import BaseTryParser
+from ..push import check_working_directory, generate_try_task_config, push_to_try
+from ..tasks import filter_tasks_by_paths, generate_tasks
+from ..util.fzf import (
+ FZF_NOT_FOUND,
+ PREVIEW_SCRIPT,
+ format_header,
+ fzf_bootstrap,
+ fzf_shortcuts,
+ run_fzf,
+)
+from ..util.manage_estimates import (
+ download_task_history_data,
+ make_trimmed_taskgraph_cache,
+)
+
+
+class FuzzyParser(BaseTryParser):
+ name = "fuzzy"
+ arguments = [
+ [
+ ["-q", "--query"],
+ {
+ "metavar": "STR",
+ "action": "append",
+ "default": [],
+ "help": "Use the given query instead of entering the selection "
+ "interface. Equivalent to typing <query><ctrl-a><enter> "
+ "from the interface. Specifying multiple times schedules "
+ "the union of computed tasks.",
+ },
+ ],
+ [
+ ["-i", "--interactive"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Force running fzf interactively even when using presets or "
+ "queries with -q/--query.",
+ },
+ ],
+ [
+ ["-x", "--and"],
+ {
+ "dest": "intersection",
+ "action": "store_true",
+ "default": False,
+ "help": "When specifying queries on the command line with -q/--query, "
+ "use the intersection of tasks rather than the union. This is "
+ "especially useful for post filtering presets.",
+ },
+ ],
+ [
+ ["-e", "--exact"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Enable exact match mode. Terms will use an exact match "
+ "by default, and terms prefixed with ' will become fuzzy.",
+ },
+ ],
+ [
+ ["-u", "--update"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Update fzf before running.",
+ },
+ ],
+ [
+ ["-s", "--show-estimates"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show task duration estimates.",
+ },
+ ],
+ [
+ ["--disable-target-task-filter"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Some tasks run on mozilla-central but are filtered out "
+ "of the default list due to resource constraints. This flag "
+ "disables this filtering.",
+ },
+ ],
+ [
+ ["--show-chunk-numbers"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Chunk numbers are hidden to simplify the selection. This flag "
+ "makes them appear again.",
+ },
+ ],
+ ]
+ common_groups = ["push", "task", "preset"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "chemspill-prio",
+ "disable-pgo",
+ "env",
+ "existing-tasks",
+ "gecko-profile",
+ "new-test-config",
+ "path",
+ "pernosco",
+ "rebuild",
+ "routes",
+ "worker-overrides",
+ ]
+
+
+def run(
+ update=False,
+ query=None,
+ intersect_query=None,
+ full=False,
+ parameters=None,
+ try_config_params=None,
+ save_query=False,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ test_paths=None,
+ exact=False,
+ closed_tree=False,
+ show_estimates=False,
+ disable_target_task_filter=False,
+ push_to_lando=False,
+ show_chunk_numbers=False,
+ new_test_config=False,
+):
+ fzf = fzf_bootstrap(update)
+
+ if not fzf:
+ print(FZF_NOT_FOUND)
+ return 1
+
+ push = not stage_changes and not dry_run
+ check_working_directory(push)
+ tg = generate_tasks(
+ parameters, full=full, disable_target_task_filter=disable_target_task_filter
+ )
+ all_tasks = tg.tasks
+
+ # graph_Cache created by generate_tasks, recreate the path to that file.
+ cache_dir = os.path.join(
+ get_state_dir(specific_to_topsrcdir=True), "cache", "taskgraph"
+ )
+ if full:
+ graph_cache = os.path.join(cache_dir, "full_task_graph")
+ dep_cache = os.path.join(cache_dir, "full_task_dependencies")
+ target_set = os.path.join(cache_dir, "full_task_set")
+ else:
+ graph_cache = os.path.join(cache_dir, "target_task_graph")
+ dep_cache = os.path.join(cache_dir, "target_task_dependencies")
+ target_set = os.path.join(cache_dir, "target_task_set")
+
+ if show_estimates:
+ download_task_history_data(cache_dir=cache_dir)
+ make_trimmed_taskgraph_cache(graph_cache, dep_cache, target_file=target_set)
+
+ if not full and not disable_target_task_filter:
+ all_tasks = {
+ task_name: task
+ for task_name, task in all_tasks.items()
+ if filter_by_uncommon_try_tasks(task_name)
+ }
+
+ if test_paths:
+ all_tasks = filter_tasks_by_paths(all_tasks, test_paths)
+ if not all_tasks:
+ return 1
+
+ key_shortcuts = [k + ":" + v for k, v in fzf_shortcuts.items()]
+ base_cmd = [
+ fzf,
+ "-m",
+ "--bind",
+ ",".join(key_shortcuts),
+ "--header",
+ format_header(),
+ "--preview-window=right:30%",
+ "--print-query",
+ ]
+
+ if show_estimates:
+ base_cmd.extend(
+ [
+ "--preview",
+ '{} {} -g {} -s -c {} -t "{{+f}}"'.format(
+ str(PurePath(sys.executable)), PREVIEW_SCRIPT, dep_cache, cache_dir
+ ),
+ ]
+ )
+ else:
+ base_cmd.extend(
+ [
+ "--preview",
+ '{} {} -t "{{+f}}"'.format(
+ str(PurePath(sys.executable)), PREVIEW_SCRIPT
+ ),
+ ]
+ )
+
+ if exact:
+ base_cmd.append("--exact")
+
+ selected = set()
+ queries = []
+
+ def get_tasks(query_arg=None, candidate_tasks=all_tasks):
+ cmd = base_cmd[:]
+ if query_arg and query_arg != "INTERACTIVE":
+ cmd.extend(["-f", query_arg])
+
+ if not show_chunk_numbers:
+ fzf_tasks = set(task.chunk_pattern for task in candidate_tasks.values())
+ else:
+ fzf_tasks = set(candidate_tasks.keys())
+
+ query_str, tasks = run_fzf(cmd, sorted(fzf_tasks))
+ queries.append(query_str)
+ return set(tasks)
+
+ for q in query or []:
+ selected |= get_tasks(q)
+
+ for q in intersect_query or []:
+ if not selected:
+ selected |= get_tasks(q)
+ else:
+ selected &= get_tasks(
+ q,
+ {
+ task_name: task
+ for task_name, task in all_tasks.items()
+ if task_name in selected or task.chunk_pattern in selected
+ },
+ )
+
+ if not queries:
+ selected = get_tasks()
+
+ if not selected:
+ print("no tasks selected")
+ return
+
+ if save_query:
+ return queries
+
+ # build commit message
+ msg = "Fuzzy"
+ args = ["query={}".format(q) for q in queries]
+ if test_paths:
+ args.append("paths={}".format(":".join(test_paths)))
+ if args:
+ msg = "{} {}".format(msg, "&".join(args))
+ return push_to_try(
+ "fuzzy",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config(
+ "fuzzy", selected, params=try_config_params
+ ),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/perf.py b/tools/tryselect/selectors/perf.py
new file mode 100644
index 0000000000..3c59e5949c
--- /dev/null
+++ b/tools/tryselect/selectors/perf.py
@@ -0,0 +1,1511 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import copy
+import itertools
+import json
+import os
+import pathlib
+import shutil
+import subprocess
+from contextlib import redirect_stdout
+from datetime import datetime, timedelta
+
+import requests
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozversioncontrol import get_repository_object
+
+from ..push import generate_try_task_config, push_to_try
+from ..util.fzf import (
+ FZF_NOT_FOUND,
+ build_base_cmd,
+ fzf_bootstrap,
+ run_fzf,
+ setup_tasks_for_fzf,
+)
+from .compare import CompareParser
+from .perfselector.classification import (
+ Apps,
+ ClassificationProvider,
+ Platforms,
+ Suites,
+ Variants,
+)
+from .perfselector.perfcomparators import get_comparator
+from .perfselector.utils import LogProcessor
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+cache_file = pathlib.Path(get_state_dir(), "try_perf_revision_cache.json")
+PREVIEW_SCRIPT = pathlib.Path(
+ build.topsrcdir, "tools/tryselect/selectors/perf_preview.py"
+)
+
+PERFHERDER_BASE_URL = (
+ "https://treeherder.mozilla.org/perfherder/"
+ "compare?originalProject=try&originalRevision=%s&newProject=try&newRevision=%s"
+)
+PERFCOMPARE_BASE_URL = "https://beta--mozilla-perfcompare.netlify.app/compare-results?baseRev=%s&newRev=%s&baseRepo=try&newRepo=try"
+TREEHERDER_TRY_BASE_URL = "https://treeherder.mozilla.org/jobs?repo=try&revision=%s"
+TREEHERDER_ALERT_TASKS_URL = (
+ "https://treeherder.mozilla.org/api/performance/alertsummary-tasks/?id=%s"
+)
+
+# Prevent users from running more than 300 tests at once. It's possible, but
+# it's more likely that a query is broken and is selecting far too much.
+MAX_PERF_TASKS = 600
+
+# Name of the base category with no variants applied to it
+BASE_CATEGORY_NAME = "base"
+
+# Add environment variable for firefox-android integration.
+# This will let us find the APK to upload automatically. However,
+# the following option will need to be supplied:
+# --browsertime-upload-apk firefox-android
+# OR --mozperftest-upload-apk firefox-android
+MOZ_FIREFOX_ANDROID_APK_OUTPUT = os.getenv("MOZ_FIREFOX_ANDROID_APK_OUTPUT", None)
+
+
+class InvalidCategoryException(Exception):
+ """Thrown when a category is found to be invalid.
+
+ See the `PerfParser.run_category_checks()` method for more info.
+ """
+
+ pass
+
+
+class APKNotFound(Exception):
+ """Raised when a user-supplied path to an APK is invalid."""
+
+ pass
+
+
+class InvalidRegressionDetectorQuery(Exception):
+ """Thrown when the detector query produces anything other than 1 task."""
+
+ pass
+
+
+class PerfParser(CompareParser):
+ name = "perf"
+ common_groups = ["push", "task"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "disable-pgo",
+ "env",
+ "gecko-profile",
+ "path",
+ "rebuild",
+ ]
+
+ provider = ClassificationProvider()
+ platforms = provider.platforms
+ apps = provider.apps
+ variants = provider.variants
+ suites = provider.suites
+ categories = provider.categories
+
+ arguments = [
+ [
+ ["--show-all"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show all available tasks.",
+ },
+ ],
+ [
+ ["--android"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show android test categories (disabled by default).",
+ },
+ ],
+ [
+ # Bug 1866047 - Remove once monorepo changes are complete
+ ["--fenix"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Include Fenix in tasks to run (disabled by default). Must "
+ "be used in conjunction with --android. Fenix isn't built on mozilla-central "
+ "so we pull the APK being tested from the firefox-android project. This "
+ "means that the fenix APK being tested in the two pushes is the same, and "
+ "any local changes made won't impact it.",
+ },
+ ],
+ [
+ ["--chrome"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show tests available for Chrome-based browsers "
+ "(disabled by default).",
+ },
+ ],
+ [
+ ["--custom-car"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show tests available for Custom Chromium-as-Release (disabled by default). "
+ "Use with --android flag to select Custom CaR android tests (cstm-car-m)",
+ },
+ ],
+ [
+ ["--safari"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show tests available for Safari (disabled by default).",
+ },
+ ],
+ [
+ ["--live-sites"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Run tasks with live sites (if possible). "
+ "You can also use the `live-sites` variant.",
+ },
+ ],
+ [
+ ["--profile"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Run tasks with profiling (if possible). "
+ "You can also use the `profiling` variant.",
+ },
+ ],
+ [
+ ["--single-run"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Run tasks without a comparison",
+ },
+ ],
+ [
+ ["-q", "--query"],
+ {
+ "type": str,
+ "default": None,
+ "help": "Query to run in either the perf-category selector, "
+ "or the fuzzy selector if --show-all is provided.",
+ },
+ ],
+ [
+ # Bug 1866047 - Remove once monorepo changes are complete
+ ["--browsertime-upload-apk"],
+ {
+ "type": str,
+ "default": None,
+ "help": "Path to an APK to upload. Note that this "
+ "will replace the APK installed in all Android Performance "
+ "tests. If the Activity, Binary Path, or Intents required "
+ "change at all relative to the existing GeckoView, and Fenix "
+ "tasks, then you will need to make fixes in the associated "
+ "taskcluster files (e.g. taskcluster/ci/test/browsertime-mobile.yml). "
+ "Alternatively, set MOZ_FIREFOX_ANDROID_APK_OUTPUT to a path to "
+ "an APK, and then run the command with --browsertime-upload-apk "
+ "firefox-android. This option will only copy the APK for browsertime, see "
+ "--mozperftest-upload-apk to upload APKs for startup tests.",
+ },
+ ],
+ [
+ # Bug 1866047 - Remove once monorepo changes are complete
+ ["--mozperftest-upload-apk"],
+ {
+ "type": str,
+ "default": None,
+ "help": "See --browsertime-upload-apk. This option does the same "
+ "thing except it's for mozperftest tests such as the startup ones. "
+ "Note that those tests only exist through --show-all, as they "
+ "aren't contained in any existing categories.",
+ },
+ ],
+ [
+ ["--detect-changes"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Adds a task that detects performance changes using MWU.",
+ },
+ ],
+ [
+ ["--comparator"],
+ {
+ "type": str,
+ "default": "BasePerfComparator",
+ "help": "Either a path to a file to setup a custom comparison, "
+ "or a builtin name. See the Firefox source docs for mach try perf for "
+ "examples of how to build your own, along with the interface.",
+ },
+ ],
+ [
+ ["--comparator-args"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "comparator_args",
+ "help": "Arguments provided to the base, and new revision setup stages "
+ "of the comparator.",
+ "metavar": "ARG=VALUE",
+ },
+ ],
+ [
+ ["--variants"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [BASE_CATEGORY_NAME],
+ "dest": "requested_variants",
+ "choices": list(variants.keys()),
+ "help": "Select variants to display in the selector from: "
+ + ", ".join(list(variants.keys())),
+ "metavar": "",
+ },
+ ],
+ [
+ ["--platforms"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "requested_platforms",
+ "choices": list(platforms.keys()),
+ "help": "Select specific platforms to target. Android only "
+ "available with --android. Available platforms: "
+ + ", ".join(list(platforms.keys())),
+ "metavar": "",
+ },
+ ],
+ [
+ ["--apps"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "requested_apps",
+ "choices": list(apps.keys()),
+ "help": "Select specific applications to target from: "
+ + ", ".join(list(apps.keys())),
+ "metavar": "",
+ },
+ ],
+ [
+ ["--clear-cache"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Deletes the try_perf_revision_cache file",
+ },
+ ],
+ [
+ ["--alert"],
+ {
+ "type": str,
+ "default": None,
+ "help": "Run tests that produced this alert summary.",
+ },
+ ],
+ [
+ ["--extra-args"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "extra_args",
+ "help": "Set the extra args "
+ "(e.x, --extra-args verbose post-startup-delay=1)",
+ "metavar": "",
+ },
+ ],
+ [
+ ["--perfcompare-beta"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Use PerfCompare Beta instead of CompareView.",
+ },
+ ],
+ ]
+
+ def get_tasks(base_cmd, queries, query_arg=None, candidate_tasks=None):
+ cmd = base_cmd[:]
+ if query_arg:
+ cmd.extend(["-f", query_arg])
+
+ query_str, tasks = run_fzf(cmd, sorted(candidate_tasks))
+ queries.append(query_str)
+ return set(tasks)
+
+ def get_perf_tasks(base_cmd, all_tg_tasks, perf_categories, query=None):
+ # Convert the categories to tasks
+ selected_tasks = set()
+ queries = []
+
+ selected_categories = PerfParser.get_tasks(
+ base_cmd, queries, query, perf_categories
+ )
+
+ for category, category_info in perf_categories.items():
+ if category not in selected_categories:
+ continue
+ print("Gathering tasks for %s category" % category)
+
+ category_tasks = set()
+ for suite in PerfParser.suites:
+ # Either perform a query to get the tasks (recommended), or
+ # use a hardcoded task list
+ suite_queries = category_info["queries"].get(suite)
+
+ category_suite_tasks = set()
+ if suite_queries:
+ print(
+ "Executing %s queries: %s" % (suite, ", ".join(suite_queries))
+ )
+
+ for perf_query in suite_queries:
+ if not category_suite_tasks:
+ # Get all tasks selected with the first query
+ category_suite_tasks |= PerfParser.get_tasks(
+ base_cmd, queries, perf_query, all_tg_tasks
+ )
+ else:
+ # Keep only those tasks that matched in all previous queries
+ category_suite_tasks &= PerfParser.get_tasks(
+ base_cmd, queries, perf_query, category_suite_tasks
+ )
+
+ if len(category_suite_tasks) == 0:
+ print("Failed to find any tasks for query: %s" % perf_query)
+ break
+
+ if category_suite_tasks:
+ category_tasks |= category_suite_tasks
+
+ if category_info["tasks"]:
+ category_tasks = set(category_info["tasks"]) & all_tg_tasks
+ if category_tasks != set(category_info["tasks"]):
+ print(
+ "Some expected tasks could not be found: %s"
+ % ", ".join(category_info["tasks"] - category_tasks)
+ )
+
+ if not category_tasks:
+ print("Could not find any tasks for category %s" % category)
+ else:
+ # Add the new tasks to the currently selected ones
+ selected_tasks |= category_tasks
+
+ return selected_tasks, selected_categories, queries
+
+ def _check_app(app, target):
+ """Checks if the app exists in the target."""
+ if app.value in target:
+ return True
+ return False
+
+ def _check_platform(platform, target):
+ """Checks if the platform, or it's type exists in the target."""
+ if (
+ platform.value in target
+ or PerfParser.platforms[platform.value]["platform"] in target
+ ):
+ return True
+ return False
+
+ def _build_initial_decision_matrix():
+ # Build first stage of matrix APPS X PLATFORMS
+ initial_decision_matrix = []
+ for platform in Platforms:
+ platform_row = []
+ for app in Apps:
+ if PerfParser._check_platform(
+ platform, PerfParser.apps[app.value]["platforms"]
+ ):
+ # This app can run on this platform
+ platform_row.append(True)
+ else:
+ platform_row.append(False)
+ initial_decision_matrix.append(platform_row)
+ return initial_decision_matrix
+
+ def _build_intermediate_decision_matrix():
+ # Second stage of matrix building applies the 2D matrix found above
+ # to each suite
+ initial_decision_matrix = PerfParser._build_initial_decision_matrix()
+
+ intermediate_decision_matrix = []
+ for suite in Suites:
+ suite_matrix = copy.deepcopy(initial_decision_matrix)
+ suite_info = PerfParser.suites[suite.value]
+
+ # Restric the platforms for this suite now
+ for platform in Platforms:
+ for app in Apps:
+ runnable = False
+ if PerfParser._check_app(
+ app, suite_info["apps"]
+ ) and PerfParser._check_platform(platform, suite_info["platforms"]):
+ runnable = True
+ suite_matrix[platform][app] = (
+ runnable and suite_matrix[platform][app]
+ )
+
+ intermediate_decision_matrix.append(suite_matrix)
+ return intermediate_decision_matrix
+
+ def _build_variants_matrix():
+ # Third stage is expanding the intermediate matrix
+ # across all the variants (non-expanded). Start with the
+ # intermediate matrix in the list since it provides our
+ # base case with no variants
+ intermediate_decision_matrix = PerfParser._build_intermediate_decision_matrix()
+
+ variants_matrix = []
+ for variant in Variants:
+ variant_matrix = copy.deepcopy(intermediate_decision_matrix)
+
+ for suite in Suites:
+ if variant.value in PerfParser.suites[suite.value]["variants"]:
+ # Allow the variant through and set it's platforms and apps
+ # based on how it sets it -> only restrict, don't make allowances
+ # here
+ for platform in Platforms:
+ for app in Apps:
+ if not (
+ PerfParser._check_platform(
+ platform,
+ PerfParser.variants[variant.value]["platforms"],
+ )
+ and PerfParser._check_app(
+ app, PerfParser.variants[variant.value]["apps"]
+ )
+ ):
+ variant_matrix[suite][platform][app] = False
+ else:
+ # This variant matrix needs to be completely False
+ variant_matrix[suite] = [
+ [False] * len(platform_row)
+ for platform_row in variant_matrix[suite]
+ ]
+
+ variants_matrix.append(variant_matrix)
+
+ return variants_matrix, intermediate_decision_matrix
+
+ def _build_decision_matrix():
+ """Build the decision matrix.
+
+ This method builds the decision matrix that is used
+ to determine what categories will be shown to the user.
+ This matrix has the following form (as lists):
+ - Variants
+ - Suites
+ - Platforms
+ - Apps
+
+ Each element in the 4D Matrix is either True or False and tells us
+ whether the particular combination is "runnable" according to
+ the given specifications. This does not mean that the combination
+ exists, just that it's fully configured in this selector.
+
+ The ("base",) variant combination found in the matrix has
+ no variants applied to it. At this stage, it's a catch-all for those
+ categories. The query it uses is reduced further in later stages.
+ """
+ # Get the variants matrix (see methods above) and the intermediate decision
+ # matrix to act as the base category
+ (
+ variants_matrix,
+ intermediate_decision_matrix,
+ ) = PerfParser._build_variants_matrix()
+
+ # Get all possible combinations of the variants
+ expanded_variants = [
+ variant_combination
+ for set_size in range(len(Variants) + 1)
+ for variant_combination in itertools.combinations(list(Variants), set_size)
+ ]
+
+ # Final stage combines the intermediate matrix with the
+ # expanded variants and leaves a "base" category which
+ # doesn't have any variant specifications (it catches them all)
+ decision_matrix = {(BASE_CATEGORY_NAME,): intermediate_decision_matrix}
+ for variant_combination in expanded_variants:
+ expanded_variant_matrix = []
+
+ # Perform an AND operation on the combination of variants
+ # to determine where this particular combination can run
+ for suite in Suites:
+ suite_matrix = []
+ suite_variants = PerfParser.suites[suite.value]["variants"]
+
+ # Disable the variant combination if none of them
+ # are found in the suite
+ disable_variant = not any(
+ [variant.value in suite_variants for variant in variant_combination]
+ )
+
+ for platform in Platforms:
+ if disable_variant:
+ platform_row = [False for _ in Apps]
+ else:
+ platform_row = [
+ all(
+ variants_matrix[variant][suite][platform][app]
+ for variant in variant_combination
+ if variant.value in suite_variants
+ )
+ for app in Apps
+ ]
+ suite_matrix.append(platform_row)
+
+ expanded_variant_matrix.append(suite_matrix)
+ decision_matrix[variant_combination] = expanded_variant_matrix
+
+ return decision_matrix
+
+ def _skip_with_restrictions(value, restrictions, requested=[]):
+ """Determines if we should skip an app, platform, or variant.
+
+ We add base here since it's the base category variant that
+ would always be displayed and it won't affect the app, or
+ platform selections.
+ """
+ if restrictions is not None and value not in restrictions + [
+ BASE_CATEGORY_NAME
+ ]:
+ return True
+ if requested and value not in requested + [BASE_CATEGORY_NAME]:
+ return True
+ return False
+
+ def build_category_matrix(**kwargs):
+ """Build a decision matrix for all the categories.
+
+ It will have the form:
+ - Category
+ - Variants
+ - ...
+ """
+ requested_variants = kwargs.get("requested_variants", [BASE_CATEGORY_NAME])
+ requested_platforms = kwargs.get("requested_platforms", [])
+ requested_apps = kwargs.get("requested_apps", [])
+
+ # Build the base decision matrix
+ decision_matrix = PerfParser._build_decision_matrix()
+
+ # Here, the variants are further restricted by the category settings
+ # using the `_skip_with_restrictions` method. This part also handles
+ # explicitly requested platforms, apps, and variants.
+ category_decision_matrix = {}
+ for category, category_info in PerfParser.categories.items():
+ category_matrix = copy.deepcopy(decision_matrix)
+
+ for variant_combination, variant_matrix in decision_matrix.items():
+ variant_runnable = True
+ if BASE_CATEGORY_NAME not in variant_combination:
+ # Make sure that all portions of the variant combination
+ # target at least one of the suites in the category
+ tmp_variant_combination = set(
+ [v.value for v in variant_combination]
+ )
+ for suite in Suites:
+ if suite.value not in category_info["suites"]:
+ continue
+ tmp_variant_combination = tmp_variant_combination - set(
+ [
+ variant.value
+ for variant in variant_combination
+ if variant.value
+ in PerfParser.suites[suite.value]["variants"]
+ ]
+ )
+ if tmp_variant_combination:
+ # If it's not empty, then some variants
+ # are non-existent
+ variant_runnable = False
+
+ for suite, platform, app in itertools.product(Suites, Platforms, Apps):
+ runnable = variant_runnable
+
+ # Disable this combination if there are any variant
+ # restrictions for this suite, or if the user didn't request it
+ # (and did request some variants). The same is done below with
+ # the apps, and platforms.
+ if any(
+ PerfParser._skip_with_restrictions(
+ variant.value if not isinstance(variant, str) else variant,
+ category_info.get("variant-restrictions", {}).get(
+ suite.value, None
+ ),
+ requested_variants,
+ )
+ for variant in variant_combination
+ ):
+ runnable = False
+
+ if PerfParser._skip_with_restrictions(
+ platform.value,
+ category_info.get("platform-restrictions", None),
+ requested_platforms,
+ ):
+ runnable = False
+
+ # If the platform is restricted, check if the appropriate
+ # flags were provided (or appropriate conditions hit). We do
+ # the same thing for apps below.
+ if (
+ PerfParser.platforms[platform.value].get("restriction", None)
+ is not None
+ ):
+ runnable = runnable and PerfParser.platforms[platform.value][
+ "restriction"
+ ](**kwargs)
+
+ if PerfParser._skip_with_restrictions(
+ app.value,
+ category_info.get("app-restrictions", {}).get(
+ suite.value, None
+ ),
+ requested_apps,
+ ):
+ runnable = False
+ if PerfParser.apps[app.value].get("restriction", None) is not None:
+ runnable = runnable and PerfParser.apps[app.value][
+ "restriction"
+ ](**kwargs)
+
+ category_matrix[variant_combination][suite][platform][app] = (
+ runnable and variant_matrix[suite][platform][app]
+ )
+
+ category_decision_matrix[category] = category_matrix
+
+ return category_decision_matrix
+
+ def _enable_restriction(restriction, **kwargs):
+ """Used to simplify checking a restriction."""
+ return restriction is not None and restriction(**kwargs)
+
+ def _category_suites(category_info):
+ """Returns all the suite enum entries in this category."""
+ return [suite for suite in Suites if suite.value in category_info["suites"]]
+
+ def _add_variant_queries(
+ category_info, variant_matrix, variant_combination, platform, queries, app=None
+ ):
+ """Used to add the variant queries to various categories."""
+ for variant in variant_combination:
+ for suite in PerfParser._category_suites(category_info):
+ if (app is not None and variant_matrix[suite][platform][app]) or (
+ app is None and any(variant_matrix[suite][platform])
+ ):
+ queries[suite.value].append(
+ PerfParser.variants[variant.value]["query"]
+ )
+
+ def _build_categories(category, category_info, category_matrix):
+ """Builds the categories to display."""
+ categories = {}
+
+ for variant_combination, variant_matrix in category_matrix.items():
+ base_category = BASE_CATEGORY_NAME in variant_combination
+
+ for platform in Platforms:
+ if not any(
+ any(variant_matrix[suite][platform])
+ for suite in PerfParser._category_suites(category_info)
+ ):
+ # There are no apps available on this platform in either
+ # of the requested suites
+ continue
+
+ # This code has the effect of restricting all suites to
+ # a platform. This means categories with mixed suites will
+ # be available even if some suites will no longer run
+ # given this platform constraint. The reasoning for this is that
+ # it's unexpected to receive desktop tests when you explicitly
+ # request android.
+ platform_queries = {
+ suite: (
+ category_info["query"][suite]
+ + [PerfParser.platforms[platform.value]["query"]]
+ )
+ for suite in category_info["suites"]
+ }
+
+ platform_category_name = f"{category} {platform.value}"
+ platform_category_info = {
+ "queries": platform_queries,
+ "tasks": category_info["tasks"],
+ "platform": platform,
+ "app": None,
+ "suites": category_info["suites"],
+ "base-category": base_category,
+ "base-category-name": category,
+ "description": category_info["description"],
+ }
+ for app in Apps:
+ if not any(
+ variant_matrix[suite][platform][app]
+ for suite in PerfParser._category_suites(category_info)
+ ):
+ # This app is not available on the given platform
+ # for any of the suites
+ continue
+
+ # Add the queries for the app for any suites that need it and
+ # the variant queries if needed
+ app_queries = copy.deepcopy(platform_queries)
+ for suite in Suites:
+ if suite.value not in app_queries:
+ continue
+ app_queries[suite.value].append(
+ PerfParser.apps[app.value]["query"]
+ )
+ if not base_category:
+ PerfParser._add_variant_queries(
+ category_info,
+ variant_matrix,
+ variant_combination,
+ platform,
+ app_queries,
+ app=app,
+ )
+
+ app_category_name = f"{platform_category_name} {app.value}"
+ if not base_category:
+ app_category_name = (
+ f"{app_category_name} "
+ f"{'+'.join([v.value for v in variant_combination])}"
+ )
+ categories[app_category_name] = {
+ "queries": app_queries,
+ "tasks": category_info["tasks"],
+ "platform": platform,
+ "app": app,
+ "suites": category_info["suites"],
+ "base-category": base_category,
+ "description": category_info["description"],
+ }
+
+ if not base_category:
+ platform_category_name = (
+ f"{platform_category_name} "
+ f"{'+'.join([v.value for v in variant_combination])}"
+ )
+ PerfParser._add_variant_queries(
+ category_info,
+ variant_matrix,
+ variant_combination,
+ platform,
+ platform_queries,
+ )
+ categories[platform_category_name] = platform_category_info
+
+ return categories
+
+ def _handle_variant_negations(category, category_info, **kwargs):
+ """Handle variant negations.
+
+ The reason why we're negating variants here instead of where we add
+ them to the queries is because we need to iterate over all of the variants
+ but when we add them, we only look at the variants in the combination. It's
+ possible to combine these, but that increases the complexity of the code
+ by quite a bit so it's best to do it separately.
+ """
+ for variant in Variants:
+ if category_info["base-category"] and variant.value in kwargs.get(
+ "requested_variants", [BASE_CATEGORY_NAME]
+ ):
+ # When some particular variant(s) are requested, and we are at a
+ # base category, don't negate it. Otherwise, if the variant
+ # wasn't requested negate it
+ continue
+ if variant.value in category:
+ # If this variant is in the category name, skip negations
+ continue
+ if not PerfParser._check_platform(
+ category_info["platform"],
+ PerfParser.variants[variant.value]["platforms"],
+ ):
+ # Make sure the variant applies to the platform
+ continue
+
+ for suite in category_info["suites"]:
+ if variant.value not in PerfParser.suites[suite]["variants"]:
+ continue
+ category_info["queries"][suite].append(
+ PerfParser.variants[variant.value]["negation"]
+ )
+
+ def _handle_app_negations(category, category_info, **kwargs):
+ """Handle app negations.
+
+ This is where the global chrome/safari negations get added. We use kwargs
+ along with the app restriction method to make this decision.
+ """
+ for app in Apps:
+ if PerfParser.apps[app.value].get("negation", None) is None:
+ continue
+ elif any(
+ PerfParser.apps[app.value]["negation"]
+ in category_info["queries"][suite]
+ for suite in category_info["suites"]
+ ):
+ # Already added the negations
+ continue
+ if category_info.get("app", None) is not None:
+ # We only need to handle this for categories that
+ # don't specify an app
+ continue
+
+ if PerfParser.apps[app.value].get("restriction", None) is None:
+ # If this app has no restriction flag, it means we should select it
+ # as much as possible and not negate it. However, if specific apps were requested,
+ # we should allow the negation to proceed since a `negation` field
+ # was provided (checked above), assuming this app was requested.
+ requested_apps = kwargs.get("requested_apps", [])
+ if requested_apps and app.value in requested_apps:
+ # Apps were requested, and this was is included
+ continue
+ elif not requested_apps:
+ # Apps were not requested, so we should keep this one
+ continue
+
+ if PerfParser._enable_restriction(
+ PerfParser.apps[app.value].get("restriction", None), **kwargs
+ ):
+ continue
+
+ for suite in category_info["suites"]:
+ if app.value not in PerfParser.suites[suite]["apps"]:
+ continue
+ category_info["queries"][suite].append(
+ PerfParser.apps[app.value]["negation"]
+ )
+
+ def _handle_negations(category, category_info, **kwargs):
+ """This method handles negations.
+
+ This method should only include things that should be globally applied
+ to all the queries. The apps are included as chrome is negated if
+ --chrome isn't provided, and the variants are negated here too.
+ """
+ PerfParser._handle_variant_negations(category, category_info, **kwargs)
+ PerfParser._handle_app_negations(category, category_info, **kwargs)
+
+ def get_categories(**kwargs):
+ """Get the categories to be displayed.
+
+ The categories are built using the decision matrices from `build_category_matrix`.
+ The methods above provide more detail on how this is done. Here, we use
+ this matrix to determine if we should show a category to a user.
+
+ We also apply the negations for restricted apps/platforms and variants
+ at the end before displaying the categories.
+ """
+ categories = {}
+
+ # Setup the restrictions, and ease-of-use variants requested (if any)
+ for variant in Variants:
+ if PerfParser._enable_restriction(
+ PerfParser.variants[variant.value].get("restriction", None), **kwargs
+ ):
+ kwargs.setdefault("requested_variants", []).append(variant.value)
+
+ category_decision_matrix = PerfParser.build_category_matrix(**kwargs)
+
+ # Now produce the categories by finding all the entries that are True
+ for category, category_matrix in category_decision_matrix.items():
+ categories.update(
+ PerfParser._build_categories(
+ category, PerfParser.categories[category], category_matrix
+ )
+ )
+
+ # Handle the restricted app queries, and variant negations
+ for category, category_info in categories.items():
+ PerfParser._handle_negations(category, category_info, **kwargs)
+
+ return categories
+
+ def inject_change_detector(base_cmd, all_tasks, selected_tasks):
+ query = "'perftest 'mwu 'detect"
+ mwu_task = PerfParser.get_tasks(base_cmd, [], query, all_tasks)
+
+ if len(mwu_task) > 1 or len(mwu_task) == 0:
+ raise InvalidRegressionDetectorQuery(
+ f"Expected 1 task from change detector "
+ f"query, but found {len(mwu_task)}"
+ )
+
+ selected_tasks |= set(mwu_task)
+
+ def check_cached_revision(selected_tasks, base_commit=None):
+ """
+ If the base_commit parameter does not exist, remove expired cache data.
+ Cache data format:
+ {
+ base_commit[str]: [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-12",
+ "tasks": ["a-task"],
+ },
+ {
+ "base_revision_treeherder": "999998888",
+ "date": "2023-03-12",
+ "tasks": ["b-task"],
+ },
+ ]
+ }
+
+ The list represents different pushes with different task selections.
+
+ TODO: See if we can request additional tests on a given base revision.
+
+ :param selected_tasks list: The list of tasks selected by the user
+ :param base_commit str: The base commit to search
+ :return: The base_revision_treeherder if found, else None
+ """
+ today = datetime.now()
+ expired_date = (today - timedelta(weeks=2)).strftime("%Y-%m-%d")
+ today = today.strftime("%Y-%m-%d")
+
+ if not cache_file.is_file():
+ return
+
+ with cache_file.open("r") as f:
+ cache_data = json.load(f)
+
+ # Remove expired cache data
+ if base_commit is None:
+ for cached_base_commit in list(cache_data):
+ if not isinstance(cache_data[cached_base_commit], list):
+ # TODO: Remove in the future, this is for backwards-compatibility
+ # with the previous cache structure
+ cache_data.pop(cached_base_commit)
+ else:
+ # Go through the pushes, and expire any that are too old
+ new_pushes = []
+ for push in cache_data[cached_base_commit]:
+ if push["date"] > expired_date:
+ new_pushes.append(push)
+ # If no pushes are left after expiration, expire the base commit
+ if new_pushes:
+ cache_data[cached_base_commit] = new_pushes
+ else:
+ cache_data.pop(cached_base_commit)
+ with cache_file.open("w") as f:
+ json.dump(cache_data, f, indent=4)
+
+ cached_base_commit = cache_data.get(base_commit, None)
+ if cached_base_commit:
+ for push in cached_base_commit:
+ if set(selected_tasks) <= set(push["tasks"]):
+ return push["base_revision_treeherder"]
+
+ def save_revision_treeherder(selected_tasks, base_commit, base_revision_treeherder):
+ """
+ Save the base revision of treeherder to the cache.
+ See "check_cached_revision" for more information about the data structure.
+
+ :param selected_tasks list: The list of tasks selected by the user
+ :param base_commit str: The base commit to save
+ :param base_revision_treeherder str: The base revision of treeherder to save
+ :return: None
+ """
+ today = datetime.now().strftime("%Y-%m-%d")
+ new_revision = {
+ "base_revision_treeherder": base_revision_treeherder,
+ "date": today,
+ "tasks": list(selected_tasks),
+ }
+ cache_data = {}
+
+ if cache_file.is_file():
+ with cache_file.open("r") as f:
+ cache_data = json.load(f)
+ cache_data.setdefault(base_commit, []).append(new_revision)
+ else:
+ cache_data[base_commit] = [new_revision]
+
+ with cache_file.open(mode="w") as f:
+ json.dump(cache_data, f, indent=4)
+
+ def found_android_tasks(selected_tasks):
+ """
+ Check if any of the selected tasks are android.
+
+ :param selected_tasks list: List of tasks selected.
+ :return bool: True if android tasks were found, False otherwise.
+ """
+ return any("android" in task for task in selected_tasks)
+
+ def setup_try_config(
+ try_config_params, extra_args, selected_tasks, base_revision_treeherder=None
+ ):
+ """
+ Setup the try config for a push.
+
+ :param try_config_params dict: The current try config to be modified.
+ :param extra_args list: A list of extra options to add to the tasks being run.
+ :param selected_tasks list: List of tasks selected. Used for determining if android
+ tasks are selected to disable artifact mode.
+ :param base_revision_treeherder str: The base revision of treeherder to save
+ :return: None
+ """
+ if try_config_params is None:
+ try_config_params = {}
+
+ try_config = try_config_params.setdefault("try_task_config", {})
+ env = try_config.setdefault("env", {})
+ if extra_args:
+ args = " ".join(extra_args)
+ env["PERF_FLAGS"] = args
+ if base_revision_treeherder:
+ # Reset updated since we no longer need to worry
+ # about failing while we're on a base commit
+ env["PERF_BASE_REVISION"] = base_revision_treeherder
+ if PerfParser.found_android_tasks(selected_tasks) and try_config.get(
+ "use-artifact-builds", False
+ ):
+ # XXX: Fix artifact mode on android (no bug)
+ try_config["use-artifact-builds"] = False
+ print("Disabling artifact mode due to android task selection")
+
+ def perf_push_to_try(
+ selected_tasks,
+ selected_categories,
+ queries,
+ try_config_params,
+ dry_run,
+ single_run,
+ extra_args,
+ comparator,
+ comparator_args,
+ alert_summary_id,
+ ):
+ """Perf-specific push to try method.
+
+ This makes use of logic from the CompareParser to do something
+ very similar except with log redirection. We get the comparison
+ revisions, then use the repository object to update between revisions
+ and the LogProcessor for parsing out the revisions that are used
+ to build the Perfherder links.
+ """
+ vcs = get_repository_object(build.topsrcdir)
+ compare_commit, current_revision_ref = PerfParser.get_revisions_to_run(
+ vcs, None
+ )
+
+ # Build commit message, and limit first line to 200 characters
+ selected_categories_msg = ", ".join(selected_categories)
+ if len(selected_categories_msg) > 200:
+ selected_categories_msg = f"{selected_categories_msg[:200]}...\n...{selected_categories_msg[200:]}"
+ msg = "Perf selections={} \nQueries={}".format(
+ selected_categories_msg,
+ json.dumps(queries, indent=4),
+ )
+ if alert_summary_id:
+ msg = f"Perf alert summary id={alert_summary_id}"
+
+ # Get the comparator to run
+ comparator_klass = get_comparator(comparator)
+ comparator_obj = comparator_klass(
+ vcs, compare_commit, current_revision_ref, comparator_args
+ )
+ base_comparator = True
+ if comparator_klass.__name__ != "BasePerfComparator":
+ base_comparator = False
+
+ new_revision_treeherder = ""
+ base_revision_treeherder = ""
+ try:
+ # redirect_stdout allows us to feed each line into
+ # a processor that we can use to catch the revision
+ # while providing real-time output
+ log_processor = LogProcessor()
+
+ # Push the base revision first. This lets the new revision appear
+ # first in the Treeherder view, and it also lets us enhance the new
+ # revision with information about the base run.
+ base_revision_treeherder = None
+ if base_comparator:
+ # Don't cache the base revision when a custom comparison is being performed
+ # since the base revision is now unique and not general to all pushes
+ base_revision_treeherder = PerfParser.check_cached_revision(
+ selected_tasks, compare_commit
+ )
+
+ if not (dry_run or single_run or base_revision_treeherder):
+ # Setup the base revision, and try config. This lets us change the options
+ # we run the tests with through the PERF_FLAGS environment variable.
+ base_extra_args = list(extra_args)
+ base_try_config_params = copy.deepcopy(try_config_params)
+ comparator_obj.setup_base_revision(base_extra_args)
+ PerfParser.setup_try_config(
+ base_try_config_params, base_extra_args, selected_tasks
+ )
+
+ with redirect_stdout(log_processor):
+ # XXX Figure out if we can use the `again` selector in some way
+ # Right now we would need to modify it to be able to do this.
+ # XXX Fix up the again selector for the perf selector (if it makes sense to)
+ push_to_try(
+ "perf-again",
+ "{msg}".format(msg=msg),
+ try_task_config=generate_try_task_config(
+ "fuzzy", selected_tasks, params=base_try_config_params
+ ),
+ stage_changes=False,
+ dry_run=dry_run,
+ closed_tree=False,
+ allow_log_capture=True,
+ )
+
+ base_revision_treeherder = log_processor.revision
+ if base_comparator:
+ PerfParser.save_revision_treeherder(
+ selected_tasks, compare_commit, base_revision_treeherder
+ )
+
+ comparator_obj.teardown_base_revision()
+
+ new_extra_args = list(extra_args)
+ comparator_obj.setup_new_revision(new_extra_args)
+ PerfParser.setup_try_config(
+ try_config_params,
+ new_extra_args,
+ selected_tasks,
+ base_revision_treeherder=base_revision_treeherder,
+ )
+
+ with redirect_stdout(log_processor):
+ push_to_try(
+ "perf",
+ "{msg}".format(msg=msg),
+ # XXX Figure out if changing `fuzzy` to `perf` will break something
+ try_task_config=generate_try_task_config(
+ "fuzzy", selected_tasks, params=try_config_params
+ ),
+ stage_changes=False,
+ dry_run=dry_run,
+ closed_tree=False,
+ allow_log_capture=True,
+ )
+
+ new_revision_treeherder = log_processor.revision
+ comparator_obj.teardown_new_revision()
+
+ finally:
+ comparator_obj.teardown()
+
+ return base_revision_treeherder, new_revision_treeherder
+
+ def run(
+ update=False,
+ show_all=False,
+ parameters=None,
+ try_config_params=None,
+ dry_run=False,
+ single_run=False,
+ query=None,
+ detect_changes=False,
+ rebuild=1,
+ clear_cache=False,
+ **kwargs,
+ ):
+ # Setup fzf
+ fzf = fzf_bootstrap(update)
+
+ if not fzf:
+ print(FZF_NOT_FOUND)
+ return 1
+
+ if clear_cache:
+ print(f"Removing cached {cache_file} file")
+ cache_file.unlink(missing_ok=True)
+
+ all_tasks, dep_cache, cache_dir = setup_tasks_for_fzf(
+ not dry_run,
+ parameters,
+ full=True,
+ disable_target_task_filter=False,
+ )
+ base_cmd = build_base_cmd(
+ fzf,
+ dep_cache,
+ cache_dir,
+ show_estimates=False,
+ preview_script=PREVIEW_SCRIPT,
+ )
+
+ # Perform the selection, then push to try and return the revisions
+ queries = []
+ selected_categories = []
+ alert_summary_id = kwargs.get("alert")
+ if alert_summary_id:
+ alert_tasks = requests.get(
+ TREEHERDER_ALERT_TASKS_URL % alert_summary_id,
+ headers={"User-Agent": "mozilla-central"},
+ )
+ if alert_tasks.status_code != 200:
+ print(
+ "\nFailed to obtain tasks from alert due to:\n"
+ f"Alert ID: {alert_summary_id}\n"
+ f"Status Code: {alert_tasks.status_code}\n"
+ f"Response Message: {alert_tasks.json()}\n"
+ )
+ alert_tasks.raise_for_status()
+ alert_tasks = set([task for task in alert_tasks.json()["tasks"] if task])
+ selected_tasks = alert_tasks & set(all_tasks)
+ if not selected_tasks:
+ raise Exception("Alert ID has no task to run.")
+ elif len(selected_tasks) != len(alert_tasks):
+ print(
+ "\nAll the tasks of the Alert Summary couldn't be found in the taskgraph.\n"
+ f"Not exist tasks: {alert_tasks - set(all_tasks)}\n"
+ )
+ elif not show_all:
+ # Expand the categories first
+ categories = PerfParser.get_categories(**kwargs)
+ PerfParser.build_category_description(base_cmd, categories)
+
+ selected_tasks, selected_categories, queries = PerfParser.get_perf_tasks(
+ base_cmd, all_tasks, categories, query=query
+ )
+ else:
+ selected_tasks = PerfParser.get_tasks(base_cmd, queries, query, all_tasks)
+
+ if len(selected_tasks) == 0:
+ print("No tasks selected")
+ return None
+
+ total_task_count = len(selected_tasks) * rebuild
+ if total_task_count > MAX_PERF_TASKS:
+ print(
+ "\n\n----------------------------------------------------------------------------------------------\n"
+ f"You have selected {total_task_count} total test runs! (selected tasks({len(selected_tasks)}) * rebuild"
+ f" count({rebuild}) \nThese tests won't be triggered as the current maximum for a single ./mach try "
+ f"perf run is {MAX_PERF_TASKS}. \nIf this was unexpected, please file a bug in Testing :: Performance."
+ "\n----------------------------------------------------------------------------------------------\n\n"
+ )
+ return None
+
+ if detect_changes:
+ PerfParser.inject_change_detector(base_cmd, all_tasks, selected_tasks)
+
+ return PerfParser.perf_push_to_try(
+ selected_tasks,
+ selected_categories,
+ queries,
+ try_config_params,
+ dry_run,
+ single_run,
+ kwargs.get("extra_args", []),
+ kwargs.get("comparator", "BasePerfComparator"),
+ kwargs.get("comparator_args", []),
+ alert_summary_id,
+ )
+
+ def run_category_checks():
+ # XXX: Add a jsonschema check for the category definition
+ # Make sure the queries don't specify variants in them
+ variant_queries = {
+ suite: [
+ PerfParser.variants[variant]["query"]
+ for variant in suite_info.get(
+ "variants", list(PerfParser.variants.keys())
+ )
+ ]
+ + [
+ PerfParser.variants[variant]["negation"]
+ for variant in suite_info.get(
+ "variants", list(PerfParser.variants.keys())
+ )
+ ]
+ for suite, suite_info in PerfParser.suites.items()
+ }
+
+ for category, category_info in PerfParser.categories.items():
+ for suite, query in category_info["query"].items():
+ if len(variant_queries[suite]) == 0:
+ # This suite has no variants
+ continue
+ if any(any(v in q for q in query) for v in variant_queries[suite]):
+ raise InvalidCategoryException(
+ f"The '{category}' category suite query for '{suite}' "
+ f"uses a variant in it's query '{query}'."
+ "If you don't want a particular variant use the "
+ "`variant-restrictions` field in the category."
+ )
+
+ return True
+
+ def setup_apk_upload(framework, apk_upload_path):
+ """Setup the APK for uploading to test on try.
+
+ There are two ways of performing the upload:
+ (1) Passing a path to an APK with:
+ --browsertime-upload-apk <PATH/FILE.APK>
+ --mozperftest-upload-apk <PATH/FILE.APK>
+ (2) Setting MOZ_FIREFOX_ANDROID_APK_OUTPUT to a path that will
+ always point to an APK (<PATH/FILE.APK>) that we can upload.
+
+ The file is always copied to testing/raptor/raptor/user_upload.apk to
+ integrate with minimal changes for simpler cases when using raptor-browsertime.
+
+ For mozperftest, the APK is always uploaded here for the same reasons:
+ python/mozperftest/mozperftest/user_upload.apk
+ """
+ frameworks_to_locations = {
+ "browsertime": pathlib.Path(
+ build.topsrcdir, "testing", "raptor", "raptor", "user_upload.apk"
+ ),
+ "mozperftest": pathlib.Path(
+ build.topsrcdir,
+ "python",
+ "mozperftest",
+ "mozperftest",
+ "user_upload.apk",
+ ),
+ }
+
+ print("Setting up custom APK upload")
+ if apk_upload_path in ("firefox-android"):
+ apk_upload_path = MOZ_FIREFOX_ANDROID_APK_OUTPUT
+ if apk_upload_path is None:
+ raise APKNotFound(
+ "MOZ_FIREFOX_ANDROID_APK_OUTPUT is not defined. It should "
+ "point to an APK to upload."
+ )
+ apk_upload_path = pathlib.Path(apk_upload_path)
+ if not apk_upload_path.exists() or apk_upload_path.is_dir():
+ raise APKNotFound(
+ "MOZ_FIREFOX_ANDROID_APK_OUTPUT needs to point to an APK."
+ )
+ else:
+ apk_upload_path = pathlib.Path(apk_upload_path)
+ if not apk_upload_path.exists():
+ raise APKNotFound(f"Path does not exist: {str(apk_upload_path)}")
+
+ print("\nCopying file in-tree for upload...")
+ shutil.copyfile(
+ str(apk_upload_path),
+ frameworks_to_locations[framework],
+ )
+
+ hg_cmd = ["hg", "add", str(frameworks_to_locations[framework])]
+ print(
+ f"\nRunning the following hg command (RAM warnings are expected):\n"
+ f" {hg_cmd}"
+ )
+ subprocess.check_output(hg_cmd)
+ print(
+ "\nAPK is setup for uploading. Please commit the changes, "
+ "and re-run this command. \nEnsure you supply the --android, "
+ "and select the correct tasks (fenix, geckoview) or use "
+ "--show-all for mozperftest task selection. \nFor Fenix, ensure "
+ "you also provide the --fenix flag."
+ )
+
+ def build_category_description(base_cmd, categories):
+ descriptions = {}
+
+ for category in categories:
+ if categories[category].get("description"):
+ descriptions[category] = categories[category].get("description")
+
+ description_file = pathlib.Path(
+ get_state_dir(), "try_perf_categories_info.json"
+ )
+ with description_file.open("w") as f:
+ json.dump(descriptions, f, indent=4)
+
+ preview_option = base_cmd.index("--preview") + 1
+ base_cmd[preview_option] = (
+ base_cmd[preview_option] + f' -d "{description_file}" -l "{{}}"'
+ )
+
+ for idx, cmd in enumerate(base_cmd):
+ if "--preview-window" in cmd:
+ base_cmd[idx] += ":wrap"
+
+
+def get_compare_url(revisions, perfcompare_beta=False):
+ """Setup the comparison link."""
+ if perfcompare_beta:
+ return PERFCOMPARE_BASE_URL % revisions
+ return PERFHERDER_BASE_URL % revisions
+
+
+def run(**kwargs):
+ if (
+ kwargs.get("browsertime_upload_apk") is not None
+ or kwargs.get("mozperftest_upload_apk") is not None
+ ):
+ framework = "browsertime"
+ upload_apk = kwargs.get("browsertime_upload_apk")
+ if upload_apk is None:
+ framework = "mozperftest"
+ upload_apk = kwargs.get("mozperftest_upload_apk")
+
+ PerfParser.setup_apk_upload(framework, upload_apk)
+ return
+
+ # Make sure the categories are following
+ # the rules we've setup
+ PerfParser.run_category_checks()
+ PerfParser.check_cached_revision([])
+
+ revisions = PerfParser.run(
+ profile=kwargs.get("try_config_params", {})
+ .get("try_task_config", {})
+ .get("gecko-profile", False),
+ rebuild=kwargs.get("try_config_params", {})
+ .get("try_task_config", {})
+ .get("rebuild", 1),
+ **kwargs,
+ )
+
+ if revisions is None:
+ return
+
+ # Provide link to perfherder for comparisons now
+ if not kwargs.get("single_run", False):
+ perfcompare_url = get_compare_url(
+ revisions, perfcompare_beta=kwargs.get("perfcompare_beta", False)
+ )
+ original_try_url = TREEHERDER_TRY_BASE_URL % revisions[0]
+ local_change_try_url = TREEHERDER_TRY_BASE_URL % revisions[1]
+ print(
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison here "
+ "once the tests are complete (ensure you select the right "
+ "framework): %s\n" % perfcompare_url
+ )
+ print("\n*******************************************************")
+ print("* 2 commits/try-runs are created... *")
+ print("*******************************************************")
+ print(f"Base revision's try run: {original_try_url}")
+ print(f"Local revision's try run: {local_change_try_url}\n")
+ print(
+ "If you need any help, you can find us in the #perf-help Matrix channel:\n"
+ "https://matrix.to/#/#perf-help:mozilla.org\n"
+ )
+ print(
+ "For more information on the performance tests, see our PerfDocs here:\n"
+ "https://firefox-source-docs.mozilla.org/testing/perfdocs/"
+ )
diff --git a/tools/tryselect/selectors/perf_preview.py b/tools/tryselect/selectors/perf_preview.py
new file mode 100644
index 0000000000..55219d3300
--- /dev/null
+++ b/tools/tryselect/selectors/perf_preview.py
@@ -0,0 +1,62 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""This script is intended to be called through fzf as a preview formatter."""
+
+
+import argparse
+import json
+import os
+import pathlib
+import sys
+
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(0, os.path.join(os.path.dirname(here), "util"))
+
+
+def process_args():
+ """Process preview arguments."""
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument(
+ "-t",
+ "--tasklist",
+ type=str,
+ default=None,
+ help="Path to temporary file containing the selected tasks",
+ )
+ argparser.add_argument(
+ "-d",
+ "--description",
+ type=str,
+ default=None,
+ help="Path to description file containing the item description",
+ )
+ argparser.add_argument(
+ "-l",
+ "--line",
+ type=str,
+ default=None,
+ help="Current line that the user is pointing",
+ )
+ return argparser.parse_args()
+
+
+def plain_display(taskfile, description, line):
+ """Original preview window display."""
+ with open(taskfile) as f:
+ tasklist = [line.strip() for line in f]
+ print("\n".join(sorted(tasklist)))
+
+ if description is None or line is None:
+ return
+ line = line.replace("'", "")
+ with pathlib.Path(description).open("r") as f:
+ description_dict = json.load(f)
+ if line in description_dict:
+ print(f"\n* Desc:\n{description_dict[line]}")
+
+
+if __name__ == "__main__":
+ args = process_args()
+ plain_display(args.tasklist, args.description, args.line)
diff --git a/tools/tryselect/selectors/perfselector/__init__.py b/tools/tryselect/selectors/perfselector/__init__.py
new file mode 100644
index 0000000000..c580d191c1
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/tools/tryselect/selectors/perfselector/classification.py b/tools/tryselect/selectors/perfselector/classification.py
new file mode 100644
index 0000000000..cabf2a323e
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/classification.py
@@ -0,0 +1,387 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import enum
+
+
+class ClassificationEnum(enum.Enum):
+ """This class provides the ability to use Enums as array indices."""
+
+ @property
+ def value(self):
+ return self._value_["value"]
+
+ def __index__(self):
+ return self._value_["index"]
+
+ def __int__(self):
+ return self._value_["index"]
+
+
+class Platforms(ClassificationEnum):
+ ANDROID_A51 = {"value": "android-a51", "index": 0}
+ ANDROID = {"value": "android", "index": 1}
+ WINDOWS = {"value": "windows", "index": 2}
+ LINUX = {"value": "linux", "index": 3}
+ MACOSX = {"value": "macosx", "index": 4}
+ DESKTOP = {"value": "desktop", "index": 5}
+
+
+class Apps(ClassificationEnum):
+ FIREFOX = {"value": "firefox", "index": 0}
+ CHROME = {"value": "chrome", "index": 1}
+ CHROMIUM = {"value": "chromium", "index": 2}
+ GECKOVIEW = {"value": "geckoview", "index": 3}
+ FENIX = {"value": "fenix", "index": 4}
+ CHROME_M = {"value": "chrome-m", "index": 5}
+ SAFARI = {"value": "safari", "index": 6}
+ CHROMIUM_RELEASE = {"value": "custom-car", "index": 7}
+ CHROMIUM_RELEASE_M = {"value": "cstm-car-m", "index": 8}
+
+
+class Suites(ClassificationEnum):
+ RAPTOR = {"value": "raptor", "index": 0}
+ TALOS = {"value": "talos", "index": 1}
+ AWSY = {"value": "awsy", "index": 2}
+
+
+class Variants(ClassificationEnum):
+ FISSION = {"value": "fission", "index": 0}
+ BYTECODE_CACHED = {"value": "bytecode-cached", "index": 1}
+ LIVE_SITES = {"value": "live-sites", "index": 2}
+ PROFILING = {"value": "profiling", "index": 3}
+ SWR = {"value": "swr", "index": 4}
+
+
+"""
+The following methods and constants are used for restricting
+certain platforms and applications such as chrome, safari, and
+android tests. These all require a flag such as --android to
+enable (see build_category_matrix for more info).
+"""
+
+
+def check_for_android(android=False, **kwargs):
+ return android
+
+
+def check_for_fenix(fenix=False, **kwargs):
+ return fenix or ("fenix" in kwargs.get("requested_apps", []))
+
+
+def check_for_chrome(chrome=False, **kwargs):
+ return chrome
+
+
+def check_for_custom_car(custom_car=False, **kwargs):
+ return custom_car
+
+
+def check_for_safari(safari=False, **kwargs):
+ return safari
+
+
+def check_for_live_sites(live_sites=False, **kwargs):
+ return live_sites
+
+
+def check_for_profile(profile=False, **kwargs):
+ return profile
+
+
+class ClassificationProvider:
+ @property
+ def platforms(self):
+ return {
+ Platforms.ANDROID_A51.value: {
+ "query": "'android 'a51 'shippable 'aarch64",
+ "restriction": check_for_android,
+ "platform": Platforms.ANDROID.value,
+ },
+ Platforms.ANDROID.value: {
+ # The android, and android-a51 queries are expected to be the same,
+ # we don't want to run the tests on other mobile platforms.
+ "query": "'android 'a51 'shippable 'aarch64",
+ "restriction": check_for_android,
+ "platform": Platforms.ANDROID.value,
+ },
+ Platforms.WINDOWS.value: {
+ "query": "!-32 'windows 'shippable",
+ "platform": Platforms.DESKTOP.value,
+ },
+ Platforms.LINUX.value: {
+ "query": "!clang 'linux 'shippable",
+ "platform": Platforms.DESKTOP.value,
+ },
+ Platforms.MACOSX.value: {
+ "query": "'osx 'shippable",
+ "platform": Platforms.DESKTOP.value,
+ },
+ Platforms.DESKTOP.value: {
+ "query": "!android 'shippable !-32 !clang",
+ "platform": Platforms.DESKTOP.value,
+ },
+ }
+
+ @property
+ def apps(self):
+ return {
+ Apps.FIREFOX.value: {
+ "query": "!chrom !geckoview !fenix !safari !m-car",
+ "platforms": [Platforms.DESKTOP.value],
+ },
+ Apps.CHROME.value: {
+ "query": "'chrome",
+ "negation": "!chrom",
+ "restriction": check_for_chrome,
+ "platforms": [Platforms.DESKTOP.value],
+ },
+ Apps.CHROMIUM.value: {
+ "query": "'chromium",
+ "negation": "!chrom",
+ "restriction": check_for_chrome,
+ "platforms": [Platforms.DESKTOP.value],
+ },
+ Apps.GECKOVIEW.value: {
+ "query": "'geckoview",
+ "negation": "!geckoview",
+ "platforms": [Platforms.ANDROID.value],
+ },
+ Apps.FENIX.value: {
+ "query": "'fenix",
+ "negation": "!fenix",
+ "restriction": check_for_fenix,
+ "platforms": [Platforms.ANDROID.value],
+ },
+ Apps.CHROME_M.value: {
+ "query": "'chrome-m",
+ "negation": "!chrom",
+ "restriction": check_for_chrome,
+ "platforms": [Platforms.ANDROID.value],
+ },
+ Apps.SAFARI.value: {
+ "query": "'safari",
+ "negation": "!safari",
+ "restriction": check_for_safari,
+ "platforms": [Platforms.MACOSX.value],
+ },
+ Apps.CHROMIUM_RELEASE.value: {
+ "query": "'m-car",
+ "negation": "!m-car",
+ "restriction": check_for_custom_car,
+ "platforms": [
+ Platforms.LINUX.value,
+ Platforms.WINDOWS.value,
+ Platforms.MACOSX.value,
+ ],
+ },
+ Apps.CHROMIUM_RELEASE_M.value: {
+ "query": "'m-car",
+ "negation": "!m-car",
+ "restriction": check_for_custom_car,
+ "platforms": [Platforms.ANDROID.value],
+ },
+ }
+
+ @property
+ def variants(self):
+ return {
+ Variants.FISSION.value: {
+ "query": "!nofis",
+ "negation": "'nofis",
+ "platforms": [Platforms.ANDROID.value],
+ "apps": [Apps.FENIX.value, Apps.GECKOVIEW.value],
+ },
+ Variants.BYTECODE_CACHED.value: {
+ "query": "'bytecode",
+ "negation": "!bytecode",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+ Variants.LIVE_SITES.value: {
+ "query": "'live",
+ "negation": "!live",
+ "restriction": check_for_live_sites,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": [ # XXX No live CaR tests
+ Apps.FIREFOX.value,
+ Apps.CHROME.value,
+ Apps.CHROMIUM.value,
+ Apps.FENIX.value,
+ Apps.GECKOVIEW.value,
+ Apps.SAFARI.value,
+ ],
+ },
+ Variants.PROFILING.value: {
+ "query": "'profil",
+ "negation": "!profil",
+ "restriction": check_for_profile,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": [Apps.FIREFOX.value, Apps.GECKOVIEW.value, Apps.FENIX.value],
+ },
+ Variants.SWR.value: {
+ "query": "'swr",
+ "negation": "!swr",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+ }
+
+ @property
+ def suites(self):
+ return {
+ Suites.RAPTOR.value: {
+ "apps": list(self.apps.keys()),
+ "platforms": list(self.platforms.keys()),
+ "variants": [
+ Variants.FISSION.value,
+ Variants.LIVE_SITES.value,
+ Variants.PROFILING.value,
+ Variants.BYTECODE_CACHED.value,
+ ],
+ },
+ Suites.TALOS.value: {
+ "apps": [Apps.FIREFOX.value],
+ "platforms": [Platforms.DESKTOP.value],
+ "variants": [
+ Variants.PROFILING.value,
+ Variants.SWR.value,
+ ],
+ },
+ Suites.AWSY.value: {
+ "apps": [Apps.FIREFOX.value],
+ "platforms": [Platforms.DESKTOP.value],
+ "variants": [],
+ },
+ }
+
+ """
+ Here you can find the base categories that are defined for the perf
+ selector. The following fields are available:
+ * query: Set the queries to use for each suite you need.
+ * suites: The suites that are needed for this category.
+ * tasks: A hard-coded list of tasks to select.
+ * platforms: The platforms that it can run on.
+ * app-restrictions: A list of apps that the category can run.
+ * variant-restrictions: A list of variants available for each suite.
+
+ Note that setting the App/Variant-Restriction fields should be used to
+ restrict the available apps and variants, not expand them.
+ """
+
+ @property
+ def categories(self):
+ return {
+ "Pageload": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'tp6 !tp6-bench"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "tasks": [],
+ "description": "A group of tests that measures various important pageload metrics. More information "
+ "can about what is exactly measured can found here:"
+ " https://firefox-source-docs.mozilla.org/testing/perfdocs/raptor.html#desktop",
+ },
+ "Speedometer 3": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'speedometer3"],
+ },
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.FISSION.value]},
+ "suites": [Suites.RAPTOR.value],
+ "app-restrictions": {},
+ "tasks": [],
+ "description": "A group of Speedometer3 tests on various platforms and architectures, speedometer3 is"
+ "currently the best benchmark we have for a baseline on real-world web performance",
+ },
+ "Responsiveness": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'responsive"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "app-restrictions": {
+ Suites.RAPTOR.value: [
+ Apps.FIREFOX.value,
+ Apps.CHROME.value,
+ Apps.CHROMIUM.value,
+ Apps.FENIX.value,
+ Apps.GECKOVIEW.value,
+ ],
+ },
+ "tasks": [],
+ "description": "A group of tests that ensure that the interactive part of the browser stays fast and"
+ "responsive",
+ },
+ "Benchmarks": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'benchmark !tp6-bench"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "tasks": [],
+ "description": "A group of tests that benchmark how the browser performs in various categories. "
+ "More information about what exact benchmarks we run can be found here: "
+ "https://firefox-source-docs.mozilla.org/testing/perfdocs/raptor.html#benchmarks",
+ },
+ "DAMP (Devtools)": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'damp"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ "description": "The DAMP tests are a group of tests that measure the performance of the browsers "
+ "devtools under certain conditiones. More information on the DAMP tests can be found"
+ " here: https://firefox-source-docs.mozilla.org/devtools/tests/performance-tests"
+ "-damp.html#what-does-it-do",
+ },
+ "Talos PerfTests": {
+ "query": {
+ Suites.TALOS.value: ["'talos"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ "description": "This selects all of the talos performance tests. More information can be found here: "
+ "https://firefox-source-docs.mozilla.org/testing/perfdocs/talos.html#test-types",
+ },
+ "Resource Usage": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'xperf | 'tp5"],
+ Suites.RAPTOR.value: ["'power 'osx"],
+ Suites.AWSY.value: ["'awsy"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value, Suites.AWSY.value],
+ "platform-restrictions": [Platforms.DESKTOP.value],
+ "variant-restrictions": {
+ Suites.RAPTOR.value: [],
+ Suites.TALOS.value: [],
+ },
+ "app-restrictions": {
+ Suites.RAPTOR.value: [Apps.FIREFOX.value],
+ Suites.TALOS.value: [Apps.FIREFOX.value],
+ },
+ "tasks": [],
+ "description": "A group of tests that monitor resource usage of various metrics like power, CPU, and"
+ "memory",
+ },
+ "Graphics, & Media Playback": {
+ "query": {
+ # XXX This might not be an exhaustive list for talos atm
+ Suites.TALOS.value: ["'talos 'svgr | 'bcv | 'webgl"],
+ Suites.RAPTOR.value: ["'browsertime 'youtube-playback"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.FISSION.value]},
+ "app-restrictions": {
+ Suites.RAPTOR.value: [
+ Apps.FIREFOX.value,
+ Apps.CHROME.value,
+ Apps.CHROMIUM.value,
+ Apps.FENIX.value,
+ Apps.GECKOVIEW.value,
+ ],
+ },
+ "tasks": [],
+ "description": "A group of tests that monitor key graphics and media metrics to keep the browser fast",
+ },
+ }
diff --git a/tools/tryselect/selectors/perfselector/perfcomparators.py b/tools/tryselect/selectors/perfselector/perfcomparators.py
new file mode 100644
index 0000000000..fce35fe562
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/perfcomparators.py
@@ -0,0 +1,258 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import importlib
+import inspect
+import pathlib
+
+BUILTIN_COMPARATORS = {}
+
+
+class ComparatorNotFound(Exception):
+ """Raised when we can't find the specified comparator.
+
+ Triggered when either the comparator name is incorrect for a builtin one,
+ or when a path to a specified comparator cannot be found.
+ """
+
+ pass
+
+
+class GithubRequestFailure(Exception):
+ """Raised when we hit a failure during PR link parsing."""
+
+ pass
+
+
+class BadComparatorArgs(Exception):
+ """Raised when the args given to the comparator are incorrect."""
+
+ pass
+
+
+def comparator(comparator_klass):
+ BUILTIN_COMPARATORS[comparator_klass.__name__] = comparator_klass
+ return comparator_klass
+
+
+@comparator
+class BasePerfComparator:
+ def __init__(self, vcs, compare_commit, current_revision_ref, comparator_args):
+ """Initialize the standard/default settings for Comparators.
+
+ :param vcs object: Used for updating the local repo.
+ :param compare_commit str: The base revision found for the local repo.
+ :param current_revision_ref str: The current revision of the local repo.
+ :param comparator_args list: List of comparator args in the format NAME=VALUE.
+ """
+ self.vcs = vcs
+ self.compare_commit = compare_commit
+ self.current_revision_ref = current_revision_ref
+ self.comparator_args = comparator_args
+
+ # Used to ensure that the local repo gets cleaned up appropriately on failures
+ self._updated = False
+
+ def setup_base_revision(self, extra_args):
+ """Setup the base try run/revision.
+
+ In this case, we update to the repo to the base revision and
+ push that to try. The extra_args can be used to set additional
+ arguments for Raptor (not available for other harnesses).
+
+ :param extra_args list: A list of extra arguments to pass to the try tasks.
+ """
+ self.vcs.update(self.compare_commit)
+ self._updated = True
+
+ def teardown_base_revision(self):
+ """Teardown the setup for the base revision."""
+ if self._updated:
+ self.vcs.update(self.current_revision_ref)
+ self._updated = False
+
+ def setup_new_revision(self, extra_args):
+ """Setup the new try run/revision.
+
+ Note that the extra_args are reset between the base, and new revision runs.
+
+ :param extra_args list: A list of extra arguments to pass to the try tasks.
+ """
+ pass
+
+ def teardown_new_revision(self):
+ """Teardown the new run/revision setup."""
+ pass
+
+ def teardown(self):
+ """Teardown for failures.
+
+ This method can be used for ensuring that the repo is cleaned up
+ when a failure is hit at any point in the process of doing the
+ new/base revision setups, or the pushes to try.
+ """
+ self.teardown_base_revision()
+
+
+def get_github_pull_request_info(link):
+ """Returns information about a PR link.
+
+ This method accepts a Github link in either of these formats:
+ https://github.com/mozilla-mobile/firefox-android/pull/1627,
+ https://github.com/mozilla-mobile/firefox-android/pull/1876/commits/17c7350cc37a4a85cea140a7ce54e9fd037b5365 #noqa
+
+ and returns the Github link, branch, and revision of the commit.
+ """
+ from urllib.parse import urlparse
+
+ import requests
+
+ # Parse the url, and get all the necessary info
+ parsed_url = urlparse(link)
+ path_parts = parsed_url.path.strip("/").split("/")
+ owner, repo = path_parts[0], path_parts[1]
+ pr_number = path_parts[-1]
+
+ if "/pull/" not in parsed_url.path:
+ raise GithubRequestFailure(
+ f"Link for Github PR is invalid (missing /pull/): {link}"
+ )
+
+ # Get the commit being targeted in the PR
+ pr_commit = None
+ if "/commits/" in parsed_url.path:
+ pr_commit = path_parts[-1]
+ pr_number = path_parts[-3]
+
+ # Make the request, and get the PR info, otherwise,
+ # raise an exception if the response code is not 200
+ api_url = f"https://api.github.com/repos/{owner}/{repo}/pulls/{pr_number}"
+ response = requests.get(api_url)
+ if response.status_code == 200:
+ link_info = response.json()
+ return (
+ link_info["head"]["repo"]["html_url"],
+ pr_commit if pr_commit else link_info["head"]["sha"],
+ link_info["head"]["ref"],
+ )
+
+ raise GithubRequestFailure(
+ f"The following url returned a non-200 status code: {api_url}"
+ )
+
+
+@comparator
+class BenchmarkComparator(BasePerfComparator):
+ def _get_benchmark_info(self, arg_prefix):
+ # Get the flag from the comparator args
+ benchmark_info = {"repo": None, "branch": None, "revision": None, "link": None}
+ for arg in self.comparator_args:
+ if arg.startswith(arg_prefix):
+ _, settings = arg.split(arg_prefix)
+ setting, val = settings.split("=")
+ if setting not in benchmark_info:
+ raise BadComparatorArgs(
+ f"Unknown argument provided `{setting}`. Only the following "
+ f"are available (prefixed with `{arg_prefix}`): "
+ f"{list(benchmark_info.keys())}"
+ )
+ benchmark_info[setting] = val
+
+ # Parse the link for any required information
+ if benchmark_info.get("link", None) is not None:
+ (
+ benchmark_info["repo"],
+ benchmark_info["revision"],
+ benchmark_info["branch"],
+ ) = get_github_pull_request_info(benchmark_info["link"])
+
+ return benchmark_info
+
+ def _setup_benchmark_args(self, extra_args, benchmark_info):
+ # Setup the arguments for Raptor
+ extra_args.append(f"benchmark-repository={benchmark_info['repo']}")
+ extra_args.append(f"benchmark-revision={benchmark_info['revision']}")
+
+ if benchmark_info.get("branch", None):
+ extra_args.append(f"benchmark-branch={benchmark_info['branch']}")
+
+ def setup_base_revision(self, extra_args):
+ """Sets up the options for a base benchmark revision run.
+
+ Checks for a `base-link` in the
+ command and adds the appropriate commands to the extra_args
+ which will be added to the PERF_FLAGS environment variable.
+
+ If that isn't provided, then you must provide the repo, branch,
+ and revision directly through these (branch is optional):
+
+ base-repo=https://github.com/mozilla-mobile/firefox-android
+ base-branch=main
+ base-revision=17c7350cc37a4a85cea140a7ce54e9fd037b5365
+
+ Otherwise, we'll use the default mach try perf
+ base behaviour.
+
+ TODO: Get the information automatically from a commit link. Github
+ API doesn't provide the branch name from a link like that.
+ """
+ base_info = self._get_benchmark_info("base-")
+
+ # If no options were provided, use the default BasePerfComparator behaviour
+ if not any(v is not None for v in base_info.values()):
+ raise BadComparatorArgs(
+ f"Could not find the correct base-revision arguments in: {self.comparator_args}"
+ )
+
+ self._setup_benchmark_args(extra_args, base_info)
+
+ def setup_new_revision(self, extra_args):
+ """Sets up the options for a new benchmark revision run.
+
+ Same as `setup_base_revision`, except it uses
+ `new-` as the prefix instead of `base-`.
+ """
+ new_info = self._get_benchmark_info("new-")
+
+ # If no options were provided, use the default BasePerfComparator behaviour
+ if not any(v is not None for v in new_info.values()):
+ raise BadComparatorArgs(
+ f"Could not find the correct new-revision arguments in: {self.comparator_args}"
+ )
+
+ self._setup_benchmark_args(extra_args, new_info)
+
+
+def get_comparator(comparator):
+ if comparator in BUILTIN_COMPARATORS:
+ return BUILTIN_COMPARATORS[comparator]
+
+ file = pathlib.Path(comparator)
+ if not file.exists():
+ raise ComparatorNotFound(
+ f"Expected either a path to a file containing a comparator, or a "
+ f"builtin comparator from this list: {BUILTIN_COMPARATORS.keys()}"
+ )
+
+ # Importing a source file directly
+ spec = importlib.util.spec_from_file_location(name=file.name, location=comparator)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ members = inspect.getmembers(
+ module,
+ lambda c: inspect.isclass(c)
+ and issubclass(c, BasePerfComparator)
+ and c != BasePerfComparator,
+ )
+
+ if not members:
+ raise ComparatorNotFound(
+ f"The path {comparator} was found but it was not a valid comparator. "
+ f"Ensure it is a subclass of BasePerfComparator and optionally contains the "
+ f"following methods: "
+ f"{', '.join(inspect.getmembers(BasePerfComparator, predicate=inspect.ismethod))}"
+ )
+
+ return members[0][-1]
diff --git a/tools/tryselect/selectors/perfselector/utils.py b/tools/tryselect/selectors/perfselector/utils.py
new file mode 100644
index 0000000000..105d003091
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/utils.py
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+import sys
+
+REVISION_MATCHER = re.compile(r"remote:.*/try/rev/([\w]*)[ \t]*$")
+
+
+class LogProcessor:
+ def __init__(self):
+ self.buf = ""
+ self.stdout = sys.__stdout__
+ self._revision = None
+
+ @property
+ def revision(self):
+ return self._revision
+
+ def write(self, buf):
+ while buf:
+ try:
+ newline_index = buf.index("\n")
+ except ValueError:
+ # No newline, wait for next call
+ self.buf += buf
+ break
+
+ # Get data up to next newline and combine with previously buffered data
+ data = self.buf + buf[: newline_index + 1]
+ buf = buf[newline_index + 1 :]
+
+ # Reset buffer then output line
+ self.buf = ""
+ if data.strip() == "":
+ continue
+ self.stdout.write(data.strip("\n") + "\n")
+
+ # Check if a temporary commit wa created
+ match = REVISION_MATCHER.match(data)
+ if match:
+ # Last line found is the revision we want
+ self._revision = match.group(1)
diff --git a/tools/tryselect/selectors/preview.py b/tools/tryselect/selectors/preview.py
new file mode 100644
index 0000000000..1d232af9e0
--- /dev/null
+++ b/tools/tryselect/selectors/preview.py
@@ -0,0 +1,102 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""This script is intended to be called through fzf as a preview formatter."""
+
+
+import argparse
+import os
+import sys
+
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(0, os.path.join(os.path.dirname(here), "util"))
+from estimates import duration_summary
+
+
+def process_args():
+ """Process preview arguments."""
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument(
+ "-s",
+ "--show-estimates",
+ action="store_true",
+ help="Show task duration estimates (default: False)",
+ )
+ argparser.add_argument(
+ "-g",
+ "--graph-cache",
+ type=str,
+ default=None,
+ help="Filename of task graph dependencies",
+ )
+ argparser.add_argument(
+ "-c",
+ "--cache_dir",
+ type=str,
+ default=None,
+ help="Path to cache directory containing task durations",
+ )
+ argparser.add_argument(
+ "-t",
+ "--tasklist",
+ type=str,
+ default=None,
+ help="Path to temporary file containing the selected tasks",
+ )
+ return argparser.parse_args()
+
+
+def plain_display(taskfile):
+ """Original preview window display."""
+ with open(taskfile) as f:
+ tasklist = [line.strip() for line in f]
+ print("\n".join(sorted(tasklist)))
+
+
+def duration_display(graph_cache_file, taskfile, cache_dir):
+ """Preview window display with task durations + metadata."""
+ with open(taskfile) as f:
+ tasklist = [line.strip() for line in f]
+
+ durations = duration_summary(graph_cache_file, tasklist, cache_dir)
+ output = ""
+ max_columns = int(os.environ["FZF_PREVIEW_COLUMNS"])
+
+ output += "\nSelected tasks take {}\n".format(durations["selected_duration"])
+ output += "+{} dependencies, total {}\n".format(
+ durations["dependency_count"],
+ durations["selected_duration"] + durations["dependency_duration"],
+ )
+
+ if durations.get("percentile"):
+ output += "This is in the top {}% of requests\n".format(
+ 100 - durations["percentile"]
+ )
+
+ output += "Estimated finish in {} at {}".format(
+ durations["wall_duration_seconds"], durations["eta_datetime"].strftime("%H:%M")
+ )
+
+ duration_width = 5 # show five numbers at most.
+ output += "{:>{width}}\n".format("Duration", width=max_columns)
+ for task in tasklist:
+ duration = durations["task_durations"].get(task, 0.0)
+ output += "{:{align}{width}} {:{nalign}{nwidth}}s\n".format(
+ task,
+ duration,
+ align="<",
+ width=max_columns - (duration_width + 2), # 2: space and 's'
+ nalign=">",
+ nwidth=duration_width,
+ )
+
+ print(output)
+
+
+if __name__ == "__main__":
+ args = process_args()
+ if args.show_estimates and os.path.isdir(args.cache_dir):
+ duration_display(args.graph_cache, args.tasklist, args.cache_dir)
+ else:
+ plain_display(args.tasklist)
diff --git a/tools/tryselect/selectors/release.py b/tools/tryselect/selectors/release.py
new file mode 100644
index 0000000000..994bbe644d
--- /dev/null
+++ b/tools/tryselect/selectors/release.py
@@ -0,0 +1,159 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+
+import attr
+import yaml
+from mozilla_version.gecko import FirefoxVersion
+
+from ..cli import BaseTryParser
+from ..push import push_to_try, vcs
+
+TARGET_TASKS = {
+ "staging": "staging_release_builds",
+ "release-sim": "release_simulation",
+}
+
+
+def read_file(path):
+ with open(path) as fh:
+ return fh.read()
+
+
+class ReleaseParser(BaseTryParser):
+ name = "release"
+ arguments = [
+ [
+ ["-v", "--version"],
+ {
+ "metavar": "STR",
+ "required": True,
+ "action": "store",
+ "type": FirefoxVersion.parse,
+ "help": "The version number to use for the staging release.",
+ },
+ ],
+ [
+ ["--migration"],
+ {
+ "metavar": "STR",
+ "action": "append",
+ "dest": "migrations",
+ "choices": [
+ "central-to-beta",
+ "beta-to-release",
+ "early-to-late-beta",
+ "release-to-esr",
+ ],
+ "help": "Migration to run for the release (can be specified multiple times).",
+ },
+ ],
+ [
+ ["--no-limit-locales"],
+ {
+ "action": "store_false",
+ "dest": "limit_locales",
+ "help": "Don't build a limited number of locales in the staging release.",
+ },
+ ],
+ [
+ ["--tasks"],
+ {
+ "choices": TARGET_TASKS.keys(),
+ "default": "staging",
+ "help": "Which tasks to run on-push.",
+ },
+ ],
+ ]
+ common_groups = ["push"]
+ task_configs = ["disable-pgo", "worker-overrides"]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.set_defaults(migrations=[])
+
+
+def run(
+ version,
+ migrations,
+ limit_locales,
+ tasks,
+ try_config_params=None,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ app_version = attr.evolve(version, beta_number=None, is_esr=False)
+
+ files_to_change = {
+ "browser/config/version.txt": "{}\n".format(app_version),
+ "browser/config/version_display.txt": "{}\n".format(version),
+ "config/milestone.txt": "{}\n".format(app_version),
+ }
+ with open("browser/config/version.txt") as f:
+ current_version = FirefoxVersion.parse(f.read())
+ format_options = {
+ "current_major_version": current_version.major_number,
+ "next_major_version": version.major_number,
+ "current_weave_version": current_version.major_number + 2,
+ "next_weave_version": version.major_number + 2,
+ }
+
+ if "beta-to-release" in migrations and "early-to-late-beta" not in migrations:
+ migrations.append("early-to-late-beta")
+
+ release_type = version.version_type.name.lower()
+ if release_type not in ("beta", "release", "esr"):
+ raise Exception(
+ "Can't do staging release for version: {} type: {}".format(
+ version, version.version_type
+ )
+ )
+ elif release_type == "esr":
+ release_type += str(version.major_number)
+ task_config = {"version": 2, "parameters": try_config_params or {}}
+ task_config["parameters"].update(
+ {
+ "target_tasks_method": TARGET_TASKS[tasks],
+ "optimize_target_tasks": True,
+ "release_type": release_type,
+ }
+ )
+
+ with open(os.path.join(vcs.path, "taskcluster/ci/config.yml")) as f:
+ migration_configs = yaml.safe_load(f)
+ for migration in migrations:
+ migration_config = migration_configs["merge-automation"]["behaviors"][migration]
+ for path, from_, to in migration_config["replacements"]:
+ if path in files_to_change:
+ contents = files_to_change[path]
+ else:
+ contents = read_file(path)
+ from_ = from_.format(**format_options)
+ to = to.format(**format_options)
+ files_to_change[path] = contents.replace(from_, to)
+
+ if limit_locales:
+ files_to_change["browser/locales/l10n-changesets.json"] = read_file(
+ os.path.join(vcs.path, "browser/locales/l10n-onchange-changesets.json")
+ )
+ files_to_change["browser/locales/shipped-locales"] = "en-US\n" + read_file(
+ os.path.join(vcs.path, "browser/locales/onchange-locales")
+ )
+
+ msg = "staging release: {}".format(version)
+ return push_to_try(
+ "release",
+ message.format(msg=msg),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ try_task_config=task_config,
+ files_to_change=files_to_change,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/scriptworker.py b/tools/tryselect/selectors/scriptworker.py
new file mode 100644
index 0000000000..08020390c2
--- /dev/null
+++ b/tools/tryselect/selectors/scriptworker.py
@@ -0,0 +1,174 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import sys
+
+import requests
+from gecko_taskgraph.util.taskgraph import find_existing_tasks
+from taskgraph.parameters import Parameters
+from taskgraph.util.taskcluster import find_task_id, get_artifact, get_session
+
+from ..cli import BaseTryParser
+from ..push import push_to_try
+
+TASK_TYPES = {
+ "linux-signing": [
+ "build-signing-linux-shippable/opt",
+ "build-signing-linux64-shippable/opt",
+ "build-signing-win64-shippable/opt",
+ "build-signing-win32-shippable/opt",
+ "repackage-signing-win64-shippable/opt",
+ "repackage-signing-win32-shippable/opt",
+ "repackage-signing-msi-win32-shippable/opt",
+ "repackage-signing-msi-win64-shippable/opt",
+ "mar-signing-linux64-shippable/opt",
+ ],
+ "linux-signing-partial": ["partials-signing-linux64-shippable/opt"],
+ "mac-signing": ["build-signing-macosx64-shippable/opt"],
+ "beetmover-candidates": ["beetmover-repackage-linux64-shippable/opt"],
+ "bouncer-submit": ["release-bouncer-sub-firefox"],
+ "balrog-submit": [
+ "release-balrog-submit-toplevel-firefox",
+ "balrog-linux64-shippable/opt",
+ ],
+ "tree": ["release-early-tagging-firefox", "release-version-bump-firefox"],
+}
+
+RELEASE_TO_BRANCH = {
+ "beta": "releases/mozilla-beta",
+ "release": "releases/mozilla-release",
+}
+
+
+class ScriptworkerParser(BaseTryParser):
+ name = "scriptworker"
+ arguments = [
+ [
+ ["task_type"],
+ {
+ "choices": ["list"] + list(TASK_TYPES.keys()),
+ "metavar": "TASK-TYPE",
+ "help": "Scriptworker task types to run. (Use `list` to show possibilities)",
+ },
+ ],
+ [
+ ["--release-type"],
+ {
+ "choices": ["nightly"] + list(RELEASE_TO_BRANCH.keys()),
+ "default": "beta",
+ "help": "Release type to run",
+ },
+ ],
+ ]
+
+ common_groups = ["push"]
+ task_configs = ["worker-overrides", "routes"]
+
+
+def get_releases(branch):
+ response = requests.get(
+ "https://shipitapi-public.services.mozilla.com/releases",
+ params={"product": "firefox", "branch": branch, "status": "shipped"},
+ headers={"Accept": "application/json"},
+ )
+ response.raise_for_status()
+ return response.json()
+
+
+def get_release_graph(release):
+ for phase in release["phases"]:
+ if phase["name"] in ("ship_firefox",):
+ return phase["actionTaskId"]
+ raise Exception("No ship phase.")
+
+
+def get_nightly_graph():
+ return find_task_id(
+ "gecko.v2.mozilla-central.latest.taskgraph.decision-nightly-desktop"
+ )
+
+
+def print_available_task_types():
+ print("Available task types:")
+ for task_type, tasks in TASK_TYPES.items():
+ print(" " * 4 + "{}:".format(task_type))
+ for task in tasks:
+ print(" " * 8 + "- {}".format(task))
+
+
+def get_hg_file(parameters, path):
+ session = get_session()
+ response = session.get(parameters.file_url(path))
+ response.raise_for_status()
+ return response.content
+
+
+def run(
+ task_type,
+ release_type,
+ try_config_params=None,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ if task_type == "list":
+ print_available_task_types()
+ sys.exit(0)
+
+ if release_type == "nightly":
+ previous_graph = get_nightly_graph()
+ else:
+ release = get_releases(RELEASE_TO_BRANCH[release_type])[-1]
+ previous_graph = get_release_graph(release)
+ existing_tasks = find_existing_tasks([previous_graph])
+
+ previous_parameters = Parameters(
+ strict=False, **get_artifact(previous_graph, "public/parameters.yml")
+ )
+
+ # Copy L10n configuration from the commit the release we are using was
+ # based on. This *should* ensure that the chunking of L10n tasks is the
+ # same between graphs.
+ files_to_change = {
+ path: get_hg_file(previous_parameters, path)
+ for path in [
+ "browser/locales/l10n-changesets.json",
+ "browser/locales/shipped-locales",
+ ]
+ }
+
+ task_config = {"version": 2, "parameters": try_config_params or {}}
+ task_config["parameters"]["optimize_target_tasks"] = True
+ task_config["parameters"]["existing_tasks"] = existing_tasks
+ for param in (
+ "app_version",
+ "build_number",
+ "next_version",
+ "release_history",
+ "release_product",
+ "release_type",
+ "version",
+ ):
+ task_config["parameters"][param] = previous_parameters[param]
+
+ try_config = task_config["parameters"].setdefault("try_task_config", {})
+ try_config["tasks"] = TASK_TYPES[task_type]
+ for label in try_config["tasks"]:
+ if label in existing_tasks:
+ del existing_tasks[label]
+
+ msg = "scriptworker tests: {}".format(task_type)
+ return push_to_try(
+ "scriptworker",
+ message.format(msg=msg),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ try_task_config=task_config,
+ files_to_change=files_to_change,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/syntax.py b/tools/tryselect/selectors/syntax.py
new file mode 100644
index 0000000000..29b80f519a
--- /dev/null
+++ b/tools/tryselect/selectors/syntax.py
@@ -0,0 +1,708 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import re
+import sys
+from collections import defaultdict
+
+import mozpack.path as mozpath
+from moztest.resolve import TestResolver
+
+from ..cli import BaseTryParser
+from ..push import build, push_to_try
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+class SyntaxParser(BaseTryParser):
+ name = "syntax"
+ arguments = [
+ [
+ ["paths"],
+ {
+ "nargs": "*",
+ "default": [],
+ "help": "Paths to search for tests to run on try.",
+ },
+ ],
+ [
+ ["-b", "--build"],
+ {
+ "dest": "builds",
+ "default": "do",
+ "help": "Build types to run (d for debug, o for optimized).",
+ },
+ ],
+ [
+ ["-p", "--platform"],
+ {
+ "dest": "platforms",
+ "action": "append",
+ "help": "Platforms to run (required if not found in the environment as "
+ "AUTOTRY_PLATFORM_HINT).",
+ },
+ ],
+ [
+ ["-u", "--unittests"],
+ {
+ "dest": "tests",
+ "action": "append",
+ "help": "Test suites to run in their entirety.",
+ },
+ ],
+ [
+ ["-t", "--talos"],
+ {
+ "action": "append",
+ "help": "Talos suites to run.",
+ },
+ ],
+ [
+ ["-j", "--jobs"],
+ {
+ "action": "append",
+ "help": "Job tasks to run.",
+ },
+ ],
+ [
+ ["--tag"],
+ {
+ "dest": "tags",
+ "action": "append",
+ "help": "Restrict tests to the given tag (may be specified multiple times).",
+ },
+ ],
+ [
+ ["--and"],
+ {
+ "action": "store_true",
+ "dest": "intersection",
+ "help": "When -u and paths are supplied run only the intersection of the "
+ "tests specified by the two arguments.",
+ },
+ ],
+ [
+ ["--no-artifact"],
+ {
+ "action": "store_true",
+ "help": "Disable artifact builds even if --enable-artifact-builds is set "
+ "in the mozconfig.",
+ },
+ ],
+ [
+ ["-v", "--verbose"],
+ {
+ "dest": "verbose",
+ "action": "store_true",
+ "default": False,
+ "help": "Print detailed information about the resulting test selection "
+ "and commands performed.",
+ },
+ ],
+ ]
+
+ # Arguments we will accept on the command line and pass through to try
+ # syntax with no further intervention. The set is taken from
+ # http://trychooser.pub.build.mozilla.org with a few additions.
+ #
+ # Note that the meaning of store_false and store_true arguments is
+ # not preserved here, as we're only using these to echo the literal
+ # arguments to another consumer. Specifying either store_false or
+ # store_true here will have an equivalent effect.
+ pass_through_arguments = {
+ "--rebuild": {
+ "action": "store",
+ "dest": "rebuild",
+ "help": "Re-trigger all test jobs (up to 20 times)",
+ },
+ "--rebuild-talos": {
+ "action": "store",
+ "dest": "rebuild_talos",
+ "help": "Re-trigger all talos jobs",
+ },
+ "--interactive": {
+ "action": "store_true",
+ "dest": "interactive",
+ "help": "Allow ssh-like access to running test containers",
+ },
+ "--no-retry": {
+ "action": "store_true",
+ "dest": "no_retry",
+ "help": "Do not retrigger failed tests",
+ },
+ "--setenv": {
+ "action": "append",
+ "dest": "setenv",
+ "help": "Set the corresponding variable in the test environment for "
+ "applicable harnesses.",
+ },
+ "-f": {
+ "action": "store_true",
+ "dest": "failure_emails",
+ "help": "Request failure emails only",
+ },
+ "--failure-emails": {
+ "action": "store_true",
+ "dest": "failure_emails",
+ "help": "Request failure emails only",
+ },
+ "-e": {
+ "action": "store_true",
+ "dest": "all_emails",
+ "help": "Request all emails",
+ },
+ "--all-emails": {
+ "action": "store_true",
+ "dest": "all_emails",
+ "help": "Request all emails",
+ },
+ "--artifact": {
+ "action": "store_true",
+ "dest": "artifact",
+ "help": "Force artifact builds where possible.",
+ },
+ "--upload-xdbs": {
+ "action": "store_true",
+ "dest": "upload_xdbs",
+ "help": "Upload XDB compilation db files generated by hazard build",
+ },
+ }
+ task_configs = []
+
+ def __init__(self, *args, **kwargs):
+ BaseTryParser.__init__(self, *args, **kwargs)
+
+ group = self.add_argument_group("pass-through arguments")
+ for arg, opts in self.pass_through_arguments.items():
+ group.add_argument(arg, **opts)
+
+
+class TryArgumentTokenizer:
+ symbols = [
+ ("separator", ","),
+ ("list_start", r"\["),
+ ("list_end", r"\]"),
+ ("item", r"([^,\[\]\s][^,\[\]]+)"),
+ ("space", r"\s+"),
+ ]
+ token_re = re.compile("|".join("(?P<%s>%s)" % item for item in symbols))
+
+ def tokenize(self, data):
+ for match in self.token_re.finditer(data):
+ symbol = match.lastgroup
+ data = match.group(symbol)
+ if symbol == "space":
+ pass
+ else:
+ yield symbol, data
+
+
+class TryArgumentParser:
+ """Simple three-state parser for handling expressions
+ of the from "foo[sub item, another], bar,baz". This takes
+ input from the TryArgumentTokenizer and runs through a small
+ state machine, returning a dictionary of {top-level-item:[sub_items]}
+ i.e. the above would result in
+ {"foo":["sub item", "another"], "bar": [], "baz": []}
+ In the case of invalid input a ValueError is raised."""
+
+ EOF = object()
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.tokens = None
+ self.current_item = None
+ self.data = {}
+ self.token = None
+ self.state = None
+
+ def parse(self, tokens):
+ self.reset()
+ self.tokens = tokens
+ self.consume()
+ self.state = self.item_state
+ while self.token[0] != self.EOF:
+ self.state()
+ return self.data
+
+ def consume(self):
+ try:
+ self.token = next(self.tokens)
+ except StopIteration:
+ self.token = (self.EOF, None)
+
+ def expect(self, *types):
+ if self.token[0] not in types:
+ raise ValueError(
+ "Error parsing try string, unexpected %s" % (self.token[0])
+ )
+
+ def item_state(self):
+ self.expect("item")
+ value = self.token[1].strip()
+ if value not in self.data:
+ self.data[value] = []
+ self.current_item = value
+ self.consume()
+ if self.token[0] == "separator":
+ self.consume()
+ elif self.token[0] == "list_start":
+ self.consume()
+ self.state = self.subitem_state
+ elif self.token[0] == self.EOF:
+ pass
+ else:
+ raise ValueError
+
+ def subitem_state(self):
+ self.expect("item")
+ value = self.token[1].strip()
+ self.data[self.current_item].append(value)
+ self.consume()
+ if self.token[0] == "separator":
+ self.consume()
+ elif self.token[0] == "list_end":
+ self.consume()
+ self.state = self.after_list_end_state
+ else:
+ raise ValueError
+
+ def after_list_end_state(self):
+ self.expect("separator")
+ self.consume()
+ self.state = self.item_state
+
+
+def parse_arg(arg):
+ tokenizer = TryArgumentTokenizer()
+ parser = TryArgumentParser()
+ return parser.parse(tokenizer.tokenize(arg))
+
+
+class AutoTry:
+ # Maps from flavors to the job names needed to run that flavour
+ flavor_jobs = {
+ "mochitest": ["mochitest-1", "mochitest-e10s-1"],
+ "xpcshell": ["xpcshell"],
+ "chrome": ["mochitest-o"],
+ "browser-a11y": ["mochitest-ba"],
+ "browser-media": ["mochitest-bmda"],
+ "browser-chrome": [
+ "mochitest-browser-chrome-1",
+ "mochitest-e10s-browser-chrome-1",
+ "mochitest-browser-chrome-e10s-1",
+ ],
+ "devtools-chrome": [
+ "mochitest-devtools-chrome-1",
+ "mochitest-e10s-devtools-chrome-1",
+ "mochitest-devtools-chrome-e10s-1",
+ ],
+ "crashtest": ["crashtest", "crashtest-e10s"],
+ "reftest": ["reftest", "reftest-e10s"],
+ "remote": ["mochitest-remote"],
+ "web-platform-tests": ["web-platform-tests-1"],
+ }
+
+ flavor_suites = {
+ "mochitest": "mochitests",
+ "xpcshell": "xpcshell",
+ "chrome": "mochitest-o",
+ "browser-chrome": "mochitest-bc",
+ "browser-a11y": "mochitest-ba",
+ "browser-media": "mochitest-bmda",
+ "devtools-chrome": "mochitest-dt",
+ "crashtest": "crashtest",
+ "reftest": "reftest",
+ "web-platform-tests": "web-platform-tests",
+ }
+
+ compiled_suites = [
+ "cppunit",
+ "gtest",
+ "jittest",
+ ]
+
+ common_suites = [
+ "cppunit",
+ "crashtest",
+ "firefox-ui-functional",
+ "geckoview",
+ "geckoview-junit",
+ "gtest",
+ "jittest",
+ "jsreftest",
+ "marionette",
+ "marionette-e10s",
+ "mochitests",
+ "reftest",
+ "robocop",
+ "web-platform-tests",
+ "xpcshell",
+ ]
+
+ def __init__(self):
+ self.topsrcdir = build.topsrcdir
+ self._resolver = None
+
+ @property
+ def resolver(self):
+ if self._resolver is None:
+ self._resolver = TestResolver.from_environment(cwd=here)
+ return self._resolver
+
+ @classmethod
+ def split_try_string(cls, data):
+ return re.findall(r"(?:\[.*?\]|\S)+", data)
+
+ def paths_by_flavor(self, paths=None, tags=None):
+ paths_by_flavor = defaultdict(set)
+
+ if not (paths or tags):
+ return dict(paths_by_flavor)
+
+ tests = list(self.resolver.resolve_tests(paths=paths, tags=tags))
+
+ for t in tests:
+ if t["flavor"] in self.flavor_suites:
+ flavor = t["flavor"]
+ if "subsuite" in t and t["subsuite"] == "devtools":
+ flavor = "devtools-chrome"
+
+ if "subsuite" in t and t["subsuite"] == "a11y":
+ flavor = "browser-a11y"
+
+ if "subsuite" in t and t["subsuite"] == "media-bc":
+ flavor = "browser-media"
+
+ if flavor in ["crashtest", "reftest"]:
+ manifest_relpath = os.path.relpath(t["manifest"], self.topsrcdir)
+ paths_by_flavor[flavor].add(os.path.dirname(manifest_relpath))
+ elif "dir_relpath" in t:
+ paths_by_flavor[flavor].add(t["dir_relpath"])
+ else:
+ file_relpath = os.path.relpath(t["path"], self.topsrcdir)
+ dir_relpath = os.path.dirname(file_relpath)
+ paths_by_flavor[flavor].add(dir_relpath)
+
+ for flavor, path_set in paths_by_flavor.items():
+ paths_by_flavor[flavor] = self.deduplicate_prefixes(path_set, paths)
+
+ return dict(paths_by_flavor)
+
+ def deduplicate_prefixes(self, path_set, input_paths):
+ # Removes paths redundant to test selection in the given path set.
+ # If a path was passed on the commandline that is the prefix of a
+ # path in our set, we only need to include the specified prefix to
+ # run the intended tests (every test in "layout/base" will run if
+ # "layout" is passed to the reftest harness).
+ removals = set()
+ additions = set()
+
+ for path in path_set:
+ full_path = path
+ while path:
+ path, _ = os.path.split(path)
+ if path in input_paths:
+ removals.add(full_path)
+ additions.add(path)
+
+ return additions | (path_set - removals)
+
+ def remove_duplicates(self, paths_by_flavor, tests):
+ rv = {}
+ for item in paths_by_flavor:
+ if self.flavor_suites[item] not in tests:
+ rv[item] = paths_by_flavor[item].copy()
+ return rv
+
+ def calc_try_syntax(
+ self,
+ platforms,
+ tests,
+ talos,
+ jobs,
+ builds,
+ paths_by_flavor,
+ tags,
+ extras,
+ intersection,
+ ):
+ parts = ["try:"]
+
+ if platforms:
+ parts.extend(["-b", builds, "-p", ",".join(platforms)])
+
+ suites = tests if not intersection else {}
+ paths = set()
+ for flavor, flavor_tests in paths_by_flavor.items():
+ suite = self.flavor_suites[flavor]
+ if suite not in suites and (not intersection or suite in tests):
+ for job_name in self.flavor_jobs[flavor]:
+ for test in flavor_tests:
+ paths.add("{}:{}".format(flavor, test))
+ suites[job_name] = tests.get(suite, [])
+
+ # intersection implies tests are expected
+ if intersection and not suites:
+ raise ValueError("No tests found matching filters")
+
+ if extras.get("artifact") and any([p.endswith("-nightly") for p in platforms]):
+ print(
+ 'You asked for |--artifact| but "-nightly" platforms don\'t have artifacts. '
+ "Running without |--artifact| instead."
+ )
+ del extras["artifact"]
+
+ if extras.get("artifact"):
+ rejected = []
+ for suite in suites.keys():
+ if any([suite.startswith(c) for c in self.compiled_suites]):
+ rejected.append(suite)
+ if rejected:
+ raise ValueError(
+ "You can't run {} with "
+ "--artifact option.".format(", ".join(rejected))
+ )
+
+ if extras.get("artifact") and "all" in suites.keys():
+ non_compiled_suites = set(self.common_suites) - set(self.compiled_suites)
+ message = (
+ "You asked for |-u all| with |--artifact| but compiled-code tests ({tests})"
+ " can't run against an artifact build. Running (-u {non_compiled_suites}) "
+ "instead."
+ )
+ string_format = {
+ "tests": ",".join(self.compiled_suites),
+ "non_compiled_suites": ",".join(non_compiled_suites),
+ }
+ print(message.format(**string_format))
+ del suites["all"]
+ suites.update({suite_name: None for suite_name in non_compiled_suites})
+
+ if suites:
+ parts.append("-u")
+ parts.append(
+ ",".join(
+ "{}{}".format(k, "[%s]" % ",".join(v) if v else "")
+ for k, v in sorted(suites.items())
+ )
+ )
+
+ if talos:
+ parts.append("-t")
+ parts.append(
+ ",".join(
+ "{}{}".format(k, "[%s]" % ",".join(v) if v else "")
+ for k, v in sorted(talos.items())
+ )
+ )
+
+ if jobs:
+ parts.append("-j")
+ parts.append(",".join(jobs))
+
+ if tags:
+ parts.append(" ".join("--tag %s" % t for t in tags))
+
+ if paths:
+ parts.append("--try-test-paths %s" % " ".join(sorted(paths)))
+
+ args_by_dest = {
+ v["dest"]: k for k, v in SyntaxParser.pass_through_arguments.items()
+ }
+ for dest, value in extras.items():
+ assert dest in args_by_dest
+ arg = args_by_dest[dest]
+ action = SyntaxParser.pass_through_arguments[arg]["action"]
+ if action == "store":
+ parts.append(arg)
+ parts.append(value)
+ if action == "append":
+ for e in value:
+ parts.append(arg)
+ parts.append(e)
+ if action in ("store_true", "store_false"):
+ parts.append(arg)
+
+ return " ".join(parts)
+
+ def normalise_list(self, items, allow_subitems=False):
+ rv = defaultdict(list)
+ for item in items:
+ parsed = parse_arg(item)
+ for key, values in parsed.items():
+ rv[key].extend(values)
+
+ if not allow_subitems:
+ if not all(item == [] for item in rv.values()):
+ raise ValueError("Unexpected subitems in argument")
+ return rv.keys()
+ else:
+ return rv
+
+ def validate_args(self, **kwargs):
+ tests_selected = kwargs["tests"] or kwargs["paths"] or kwargs["tags"]
+ if kwargs["platforms"] is None and (kwargs["jobs"] is None or tests_selected):
+ if "AUTOTRY_PLATFORM_HINT" in os.environ:
+ kwargs["platforms"] = [os.environ["AUTOTRY_PLATFORM_HINT"]]
+ elif tests_selected:
+ print("Must specify platform when selecting tests.")
+ sys.exit(1)
+ else:
+ print(
+ "Either platforms or jobs must be specified as an argument to autotry."
+ )
+ sys.exit(1)
+
+ try:
+ platforms = (
+ self.normalise_list(kwargs["platforms"]) if kwargs["platforms"] else {}
+ )
+ except ValueError as e:
+ print("Error parsing -p argument:\n%s" % e)
+ sys.exit(1)
+
+ try:
+ tests = (
+ self.normalise_list(kwargs["tests"], allow_subitems=True)
+ if kwargs["tests"]
+ else {}
+ )
+ except ValueError as e:
+ print("Error parsing -u argument ({}):\n{}".format(kwargs["tests"], e))
+ sys.exit(1)
+
+ try:
+ talos = (
+ self.normalise_list(kwargs["talos"], allow_subitems=True)
+ if kwargs["talos"]
+ else []
+ )
+ except ValueError as e:
+ print("Error parsing -t argument:\n%s" % e)
+ sys.exit(1)
+
+ try:
+ jobs = self.normalise_list(kwargs["jobs"]) if kwargs["jobs"] else {}
+ except ValueError as e:
+ print("Error parsing -j argument:\n%s" % e)
+ sys.exit(1)
+
+ paths = []
+ for p in kwargs["paths"]:
+ p = mozpath.normpath(os.path.abspath(p))
+ if not (os.path.isdir(p) and p.startswith(self.topsrcdir)):
+ print(
+ 'Specified path "%s" is not a directory under the srcdir,'
+ " unable to specify tests outside of the srcdir" % p
+ )
+ sys.exit(1)
+ if len(p) <= len(self.topsrcdir):
+ print(
+ 'Specified path "%s" is at the top of the srcdir and would'
+ " select all tests." % p
+ )
+ sys.exit(1)
+ paths.append(os.path.relpath(p, self.topsrcdir))
+
+ try:
+ tags = self.normalise_list(kwargs["tags"]) if kwargs["tags"] else []
+ except ValueError as e:
+ print("Error parsing --tags argument:\n%s" % e)
+ sys.exit(1)
+
+ extra_values = {k["dest"] for k in SyntaxParser.pass_through_arguments.values()}
+ extra_args = {k: v for k, v in kwargs.items() if k in extra_values and v}
+
+ return kwargs["builds"], platforms, tests, talos, jobs, paths, tags, extra_args
+
+ def run(self, **kwargs):
+ if not any(kwargs[item] for item in ("paths", "tests", "tags")):
+ kwargs["paths"] = set()
+ kwargs["tags"] = set()
+
+ builds, platforms, tests, talos, jobs, paths, tags, extra = self.validate_args(
+ **kwargs
+ )
+
+ if paths or tags:
+ paths = [
+ os.path.relpath(os.path.normpath(os.path.abspath(item)), self.topsrcdir)
+ for item in paths
+ ]
+ paths_by_flavor = self.paths_by_flavor(paths=paths, tags=tags)
+
+ if not paths_by_flavor and not tests:
+ print(
+ "No tests were found when attempting to resolve paths:\n\n\t%s"
+ % paths
+ )
+ sys.exit(1)
+
+ if not kwargs["intersection"]:
+ paths_by_flavor = self.remove_duplicates(paths_by_flavor, tests)
+ else:
+ paths_by_flavor = {}
+
+ # No point in dealing with artifacts if we aren't running any builds
+ local_artifact_build = False
+ if platforms:
+ local_artifact_build = kwargs.get("local_artifact_build", False)
+
+ # Add --artifact if --enable-artifact-builds is set ...
+ if local_artifact_build:
+ extra["artifact"] = True
+ # ... unless --no-artifact is explicitly given.
+ if kwargs["no_artifact"]:
+ if "artifact" in extra:
+ del extra["artifact"]
+
+ try:
+ msg = self.calc_try_syntax(
+ platforms,
+ tests,
+ talos,
+ jobs,
+ builds,
+ paths_by_flavor,
+ tags,
+ extra,
+ kwargs["intersection"],
+ )
+ except ValueError as e:
+ print(e)
+ sys.exit(1)
+
+ if local_artifact_build and not kwargs["no_artifact"]:
+ print(
+ "mozconfig has --enable-artifact-builds; including "
+ "--artifact flag in try syntax (use --no-artifact "
+ "to override)"
+ )
+
+ if kwargs["verbose"] and paths_by_flavor:
+ print("The following tests will be selected: ")
+ for flavor, paths in paths_by_flavor.items():
+ print("{}: {}".format(flavor, ",".join(paths)))
+
+ if kwargs["verbose"]:
+ print("The following try syntax was calculated:\n%s" % msg)
+
+ push_to_try(
+ "syntax",
+ kwargs["message"].format(msg=msg),
+ stage_changes=kwargs["stage_changes"],
+ dry_run=kwargs["dry_run"],
+ closed_tree=kwargs["closed_tree"],
+ push_to_lando=kwargs["push_to_lando"],
+ )
+
+
+def run(**kwargs):
+ at = AutoTry()
+ return at.run(**kwargs)