summaryrefslogtreecommitdiffstats
path: root/tools/tryselect
diff options
context:
space:
mode:
Diffstat (limited to 'tools/tryselect')
-rw-r--r--tools/tryselect/__init__.py3
-rw-r--r--tools/tryselect/cli.py175
-rw-r--r--tools/tryselect/docs/configuration.rst30
-rw-r--r--tools/tryselect/docs/img/add-new-jobs.pngbin0 -> 33561 bytes
-rw-r--r--tools/tryselect/docs/img/phab-treeherder-link.pngbin0 -> 35922 bytes
-rw-r--r--tools/tryselect/docs/index.rst92
-rw-r--r--tools/tryselect/docs/presets.rst85
-rw-r--r--tools/tryselect/docs/selectors/again.rst36
-rw-r--r--tools/tryselect/docs/selectors/auto.rst24
-rw-r--r--tools/tryselect/docs/selectors/chooser.rst32
-rw-r--r--tools/tryselect/docs/selectors/compare.rst17
-rw-r--r--tools/tryselect/docs/selectors/empty.rst21
-rw-r--r--tools/tryselect/docs/selectors/fuzzy.rst371
-rw-r--r--tools/tryselect/docs/selectors/fzf.pngbin0 -> 44467 bytes
-rw-r--r--tools/tryselect/docs/selectors/index.rst44
-rw-r--r--tools/tryselect/docs/selectors/release.rst31
-rw-r--r--tools/tryselect/docs/selectors/scriptworker.rst31
-rw-r--r--tools/tryselect/docs/selectors/syntax.rst41
-rw-r--r--tools/tryselect/docs/tasks.rst152
-rw-r--r--tools/tryselect/lando.py452
-rw-r--r--tools/tryselect/mach_commands.py514
-rw-r--r--tools/tryselect/preset.py107
-rw-r--r--tools/tryselect/push.py257
-rw-r--r--tools/tryselect/selectors/__init__.py3
-rw-r--r--tools/tryselect/selectors/again.py151
-rw-r--r--tools/tryselect/selectors/auto.py118
-rw-r--r--tools/tryselect/selectors/chooser/.eslintrc.js16
-rw-r--r--tools/tryselect/selectors/chooser/__init__.py120
-rw-r--r--tools/tryselect/selectors/chooser/app.py176
-rw-r--r--tools/tryselect/selectors/chooser/static/filter.js116
-rw-r--r--tools/tryselect/selectors/chooser/static/select.js46
-rw-r--r--tools/tryselect/selectors/chooser/static/style.css107
-rw-r--r--tools/tryselect/selectors/chooser/templates/chooser.html78
-rw-r--r--tools/tryselect/selectors/chooser/templates/close.html11
-rw-r--r--tools/tryselect/selectors/chooser/templates/layout.html71
-rw-r--r--tools/tryselect/selectors/compare.py66
-rw-r--r--tools/tryselect/selectors/coverage.py452
-rw-r--r--tools/tryselect/selectors/empty.py43
-rw-r--r--tools/tryselect/selectors/fuzzy.py284
-rw-r--r--tools/tryselect/selectors/perf.py1511
-rw-r--r--tools/tryselect/selectors/perf_preview.py62
-rw-r--r--tools/tryselect/selectors/perfselector/__init__.py3
-rw-r--r--tools/tryselect/selectors/perfselector/classification.py387
-rw-r--r--tools/tryselect/selectors/perfselector/perfcomparators.py258
-rw-r--r--tools/tryselect/selectors/perfselector/utils.py44
-rw-r--r--tools/tryselect/selectors/preview.py102
-rw-r--r--tools/tryselect/selectors/release.py159
-rw-r--r--tools/tryselect/selectors/scriptworker.py174
-rw-r--r--tools/tryselect/selectors/syntax.py708
-rw-r--r--tools/tryselect/task_config.py642
-rw-r--r--tools/tryselect/tasks.py209
-rw-r--r--tools/tryselect/test/conftest.py106
-rw-r--r--tools/tryselect/test/cram.toml5
-rw-r--r--tools/tryselect/test/python.toml31
-rw-r--r--tools/tryselect/test/setup.sh101
-rw-r--r--tools/tryselect/test/test_again.py73
-rw-r--r--tools/tryselect/test/test_auto.py31
-rw-r--r--tools/tryselect/test/test_auto.t61
-rw-r--r--tools/tryselect/test/test_chooser.py84
-rw-r--r--tools/tryselect/test/test_empty.t62
-rw-r--r--tools/tryselect/test/test_fuzzy.py125
-rw-r--r--tools/tryselect/test/test_fuzzy.t252
-rw-r--r--tools/tryselect/test/test_message.t73
-rw-r--r--tools/tryselect/test/test_mozharness_integration.py145
-rw-r--r--tools/tryselect/test/test_perf.py1425
-rw-r--r--tools/tryselect/test/test_perfcomparators.py150
-rw-r--r--tools/tryselect/test/test_preset.t390
-rw-r--r--tools/tryselect/test/test_presets.py58
-rw-r--r--tools/tryselect/test/test_push.py54
-rw-r--r--tools/tryselect/test/test_release.py43
-rw-r--r--tools/tryselect/test/test_scriptworker.py39
-rw-r--r--tools/tryselect/test/test_task_configs.py257
-rw-r--r--tools/tryselect/test/test_tasks.py93
-rw-r--r--tools/tryselect/try_presets.yml298
-rw-r--r--tools/tryselect/util/__init__.py3
-rw-r--r--tools/tryselect/util/dicttools.py50
-rw-r--r--tools/tryselect/util/estimates.py124
-rw-r--r--tools/tryselect/util/fzf.py424
-rw-r--r--tools/tryselect/util/manage_estimates.py132
-rw-r--r--tools/tryselect/util/ssh.py24
-rw-r--r--tools/tryselect/watchman.json15
81 files changed, 13360 insertions, 0 deletions
diff --git a/tools/tryselect/__init__.py b/tools/tryselect/__init__.py
new file mode 100644
index 0000000000..c580d191c1
--- /dev/null
+++ b/tools/tryselect/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/tools/tryselect/cli.py b/tools/tryselect/cli.py
new file mode 100644
index 0000000000..11d1867e47
--- /dev/null
+++ b/tools/tryselect/cli.py
@@ -0,0 +1,175 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import subprocess
+import tempfile
+from argparse import ArgumentParser
+
+from .task_config import all_task_configs
+
+COMMON_ARGUMENT_GROUPS = {
+ "push": [
+ [
+ ["-m", "--message"],
+ {
+ "const": "editor",
+ "default": "{msg}",
+ "nargs": "?",
+ "help": "Use the specified commit message, or create it in your "
+ "$EDITOR if blank. Defaults to computed message.",
+ },
+ ],
+ [
+ ["--closed-tree"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Push despite a closed try tree",
+ },
+ ],
+ [
+ ["--push-to-lando"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Submit changes for Lando to push to try.",
+ },
+ ],
+ ],
+ "preset": [
+ [
+ ["--save"],
+ {
+ "default": None,
+ "help": "Save selection for future use with --preset.",
+ },
+ ],
+ [
+ ["--preset"],
+ {
+ "default": None,
+ "help": "Load a saved selection.",
+ },
+ ],
+ [
+ ["--list-presets"],
+ {
+ "action": "store_const",
+ "dest": "preset_action",
+ "const": "list",
+ "default": None,
+ "help": "List available preset selections.",
+ },
+ ],
+ [
+ ["--edit-presets"],
+ {
+ "action": "store_const",
+ "dest": "preset_action",
+ "const": "edit",
+ "default": None,
+ "help": "Edit the preset file.",
+ },
+ ],
+ ],
+ "task": [
+ [
+ ["--full"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Use the full set of tasks as input to fzf (instead of "
+ "target tasks).",
+ },
+ ],
+ [
+ ["-p", "--parameters"],
+ {
+ "default": None,
+ "help": "Use the given parameters.yml to generate tasks, "
+ "defaults to a default set of parameters",
+ },
+ ],
+ ],
+}
+
+NO_PUSH_ARGUMENT_GROUP = [
+ [
+ ["--stage-changes"],
+ {
+ "dest": "stage_changes",
+ "action": "store_true",
+ "help": "Locally stage changes created by this command but do not "
+ "push to try.",
+ },
+ ],
+ [
+ ["--no-push"],
+ {
+ "dest": "dry_run",
+ "action": "store_true",
+ "help": "Do not push to try as a result of running this command (if "
+ "specified this command will only print calculated try "
+ "syntax and selection info and not change files).",
+ },
+ ],
+]
+
+
+class BaseTryParser(ArgumentParser):
+ name = "try"
+ common_groups = ["push", "preset"]
+ arguments = []
+ task_configs = []
+
+ def __init__(self, *args, **kwargs):
+ ArgumentParser.__init__(self, *args, **kwargs)
+
+ group = self.add_argument_group("{} arguments".format(self.name))
+ for cli, kwargs in self.arguments:
+ group.add_argument(*cli, **kwargs)
+
+ for name in self.common_groups:
+ group = self.add_argument_group("{} arguments".format(name))
+ arguments = COMMON_ARGUMENT_GROUPS[name]
+
+ # Preset arguments are all mutually exclusive.
+ if name == "preset":
+ group = group.add_mutually_exclusive_group()
+
+ for cli, kwargs in arguments:
+ group.add_argument(*cli, **kwargs)
+
+ if name == "push":
+ group_no_push = group.add_mutually_exclusive_group()
+ arguments = NO_PUSH_ARGUMENT_GROUP
+ for cli, kwargs in arguments:
+ group_no_push.add_argument(*cli, **kwargs)
+
+ group = self.add_argument_group("task configuration arguments")
+ self.task_configs = {c: all_task_configs[c]() for c in self.task_configs}
+ for cfg in self.task_configs.values():
+ cfg.add_arguments(group)
+
+ def validate(self, args):
+ if hasattr(args, "message"):
+ if args.message == "editor":
+ if "EDITOR" not in os.environ:
+ self.error(
+ "must set the $EDITOR environment variable to use blank --message"
+ )
+
+ with tempfile.NamedTemporaryFile(mode="r") as fh:
+ subprocess.call([os.environ["EDITOR"], fh.name])
+ args.message = fh.read().strip()
+
+ if "{msg}" not in args.message:
+ args.message = "{}\n\n{}".format(args.message, "{msg}")
+
+ def parse_known_args(self, *args, **kwargs):
+ args, remainder = ArgumentParser.parse_known_args(self, *args, **kwargs)
+ self.validate(args)
+ return args, remainder
diff --git a/tools/tryselect/docs/configuration.rst b/tools/tryselect/docs/configuration.rst
new file mode 100644
index 0000000000..6743d5f385
--- /dev/null
+++ b/tools/tryselect/docs/configuration.rst
@@ -0,0 +1,30 @@
+Configuring Try
+===============
+
+
+Getting Level 1 Commit Access
+-----------------------------
+
+In order to push to try, `Level 1 Commit Access`_ is required. Please see `Becoming a Mozilla
+Committer`_ for more information on how to obtain this.
+
+
+Configuring Version Control
+---------------------------
+
+After you have level 1 access, you'll need to do a little bit of setup before you can push. Both
+``hg`` and ``git`` are supported, move on to the appropriate section below.
+
+
+Configuring Try with Mercurial / Git
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The recommended way to push to try is via the ``mach try`` command. This requires the
+``push-to-try`` extension which can be installed by running:
+
+.. code-block:: shell
+
+ $ mach vcs-setup
+
+.. _Level 1 Commit Access: https://www.mozilla.org/en-US/about/governance/policies/commit/access-policy/
+.. _Becoming a Mozilla Committer: https://www.mozilla.org/en-US/about/governance/policies/commit/
diff --git a/tools/tryselect/docs/img/add-new-jobs.png b/tools/tryselect/docs/img/add-new-jobs.png
new file mode 100644
index 0000000000..513565cb14
--- /dev/null
+++ b/tools/tryselect/docs/img/add-new-jobs.png
Binary files differ
diff --git a/tools/tryselect/docs/img/phab-treeherder-link.png b/tools/tryselect/docs/img/phab-treeherder-link.png
new file mode 100644
index 0000000000..52b58b6231
--- /dev/null
+++ b/tools/tryselect/docs/img/phab-treeherder-link.png
Binary files differ
diff --git a/tools/tryselect/docs/index.rst b/tools/tryselect/docs/index.rst
new file mode 100644
index 0000000000..8978a72eaf
--- /dev/null
+++ b/tools/tryselect/docs/index.rst
@@ -0,0 +1,92 @@
+Pushing to Try
+==============
+
+"Pushing to Try" allows developers to build and test their changes on Mozilla's automation servers
+without requiring their code to be reviewed and landed.
+
+First, :doc:`ensure that you can push to Try <configuration>`.
+Try knows how to run tasks that are defined in-tree,
+such as ``build-linux64/opt`` (build Firefox for Linux). To manually select some tasks for
+Try to process, run the following command:
+
+.. code-block:: shell
+
+ ./mach try fuzzy
+
+After submitting your requested tasks, you'll be given a link to your "push" in Treeherder.
+It may take a few minutes for your push to appear in Treeherder! Be patient, and it will automatically
+update when Try begins processing your work.
+
+Another very useful Try command is ``./mach try auto``, which will automatically select the tasks
+that are mostly likely to be affected by your changes.
+See the :doc:`selectors page <selectors/index>` to view all the other ways to select which tasks to push.
+
+It is possible to set environment variables, notably :doc:`MOZ_LOG </xpcom/logging>`, when pushing to
+try:
+
+.. code-block:: shell
+
+ ./mach try fuzzy --env MOZ_LOG=cubeb:4
+
+Resolving "<Try build> is damaged and can't be opened" error
+------------------------------------------------------------
+
+To run a try build on macOS, you need to get around Apple's restrictions on downloaded applications.
+
+These restrictions differ based on your hardware: Apple Silicon machines (M1 etc.) are much stricter.
+
+For Apple Silicon machines, you will need to download the target.dmg artifact from the
+"repackage-macosx64-shippable/opt" job.
+This is a universal build (i.e. it contains both x86_64 and arm64 code), and it is signed but not notarized.
+You can trigger this job using ``./mach try fuzzy --full``.
+
+On Intel Macs, you can run unsigned builds, once you get around the quarantining (see below),
+so you can just download the "target.dmg" from a regular opt build.
+
+Regardless of hardware, you need to make sure that there is no quarantining attribute on
+the downloaded dmg file before you attempt to run it:
+Apple automatically quarantines apps that are downloaded with a browser from an untrusted
+location. This "quarantine status" can be cleared by doing ``xattr -c <Try build>`` after
+downloading. You can avoid this "quarantine status" by downloading the build from the command
+line instead, such as by using ``curl``:
+
+.. code-block:: shell
+
+ curl -L <artifact-url> -o <file-name>
+
+.. _attach-job-review:
+
+Adding Try jobs to a Phabricator patch
+--------------------------------------
+
+For every patch submitted for review in Phabricator, a new Try run is automatically created.
+A link called ``Treeherder Jobs`` can be found in the ``Diff Detail`` section of the review in
+Phabricator.
+
+.. image:: img/phab-treeherder-link.png
+
+This run is created for static analysis, linting and other tasks. Attaching new jobs to the run is
+easy and doesn't require more actions from the developer.
+Click on the down-arrow to access the actions menu, select the relevant jobs
+and, click on ``Trigger X new jobs`` (located on the top of the job).
+
+.. image:: img/add-new-jobs.png
+
+Table of Contents
+-----------------
+
+.. toctree::
+ :maxdepth: 2
+
+ configuration
+ selectors/index
+ presets
+ tasks
+
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/tools/tryselect/docs/presets.rst b/tools/tryselect/docs/presets.rst
new file mode 100644
index 0000000000..a3368cf8b5
--- /dev/null
+++ b/tools/tryselect/docs/presets.rst
@@ -0,0 +1,85 @@
+Presets
+=======
+
+Some selectors, such as ``fuzzy`` and ``syntax``, allow saving and loading presets from a file. This is a
+good way to reuse a selection, either at a later date or by sharing with others. Look for a
+'preset' section in ``mach <selector> --help`` to determine whether the selector supports this
+functionality.
+
+Using Presets
+-------------
+
+To save a preset, run:
+
+.. code-block:: shell
+
+ $ mach try <selector> --save <name> <args>
+
+For example, to save a preset that selects all Windows mochitests:
+
+.. code-block:: shell
+
+ $ mach try fuzzy --save all-windows-mochitests --query "'win 'mochitest"
+ preset saved, run with: --preset=all-windows-mochitests
+
+Then run that saved preset like this:
+
+.. code-block:: shell
+
+ $ mach try --preset all-windows-mochitests
+
+To see a list of all available presets run:
+
+.. code-block:: shell
+
+ $ mach try --list-presets
+
+
+Editing and Sharing Presets
+---------------------------
+
+Presets can be defined in one of two places, in your home directory or in a file checked into
+mozilla-central.
+
+Local Presets
+~~~~~~~~~~~~~
+
+These are defined in your ``$MOZBUILD_STATE_DIR``, typically ``~/.mozbuild/try_presets.yml``.
+Presets defined here are your own personal collection of presets. You can modify them by running:
+
+.. code-block:: shell
+
+ $ ./mach try --edit-presets
+
+
+Shared Presets
+~~~~~~~~~~~~~~
+
+You can also check presets into mozilla-central in `tools/tryselect/try_presets.yml`_. These presets
+will be available to all users of ``mach try``, so please be mindful when editing this file. Make
+sure the name of the preset is scoped appropriately (i.e doesn't contain any team or module specific
+terminology). It is good practice to prefix the preset name with the name of the team or module that
+will get the most use out of it.
+
+
+Preset Format
+~~~~~~~~~~~~~
+
+Presets are simple key/value objects, with the name as the key and a metadata object as the value.
+For example, the preset saved above would look something like this in ``try_presets.yml``:
+
+.. code-block:: yaml
+
+ all-windows-mochitests:
+ selector: fuzzy
+ description: >-
+ Runs all windows mochitests.
+ query:
+ - "'win 'mochitest"
+
+The ``selector`` key (required) allows ``mach try`` to determine which subcommand to dispatch to.
+The ``description`` key (optional in user presets but required for shared presets) is a human
+readable string describing what the preset selects and when to use it. All other values in the
+preset are forwarded to the specified selector as is.
+
+.. _tools/tryselect/try_presets.yml: https://searchfox.org/mozilla-central/source/tools/tryselect/try_presets.yml
diff --git a/tools/tryselect/docs/selectors/again.rst b/tools/tryselect/docs/selectors/again.rst
new file mode 100644
index 0000000000..da592b7a34
--- /dev/null
+++ b/tools/tryselect/docs/selectors/again.rst
@@ -0,0 +1,36 @@
+Again Selector
+==============
+
+When you push to try, the computed ``try_task_config.json`` is saved in a
+history file under ``~/.mozbuild/srcdirs/<srcdir hash>/history`` (note: the
+``syntax`` selector does not use ``try_task_config.json`` yet so does not save
+any history). You can then use the ``again`` selector to re-push any of your
+previously generated task configs.
+
+In the simple case, you can re-run your last try push with:
+
+.. code-block:: shell
+
+ $ mach try again
+
+If you want to re-push a task config a little further down the history stack,
+first you need to figure out its index with:
+
+.. code-block:: shell
+
+ $ mach try again --list
+
+Then run:
+
+.. code-block:: shell
+
+ $ mach try again --index <index>
+
+Note that index ``0`` is always the most recent ``try_task_config.json`` in the
+history. You can clear your history with:
+
+.. code-block:: shell
+
+ $ mach try again --purge
+
+Only the 10 most recent pushes will be saved in your history.
diff --git a/tools/tryselect/docs/selectors/auto.rst b/tools/tryselect/docs/selectors/auto.rst
new file mode 100644
index 0000000000..c5f6d4df5e
--- /dev/null
+++ b/tools/tryselect/docs/selectors/auto.rst
@@ -0,0 +1,24 @@
+Auto Selector
+=============
+
+This selector automatically determines the most efficient set of tests and
+tasks to run against your push. It accomplishes this via a combination of
+machine learning and manual heuristics. The tasks selected here should match
+pretty closely to what would be scheduled if your patch were pushed to
+autoland.
+
+It is the officially recommended selector to use when you are unsure of which
+tasks should run on your push.
+
+To use:
+
+.. code-block:: bash
+
+ $ mach try auto
+
+Unlike other try selectors, tasks are not chosen locally. Rather they will be
+computed by the decision task.
+
+Like most other selectors, ``mach try auto`` supports many of the standard
+templates such as ``--artifact`` or ``--env``. See ``mach try auto --help`` for
+the full list of supported templates.
diff --git a/tools/tryselect/docs/selectors/chooser.rst b/tools/tryselect/docs/selectors/chooser.rst
new file mode 100644
index 0000000000..6c59d54009
--- /dev/null
+++ b/tools/tryselect/docs/selectors/chooser.rst
@@ -0,0 +1,32 @@
+Chooser Selector
+================
+
+When pushing to try, there are a very large amount of builds and tests to choose from. Often too
+many to remember, making it easy to forget a set of tasks which should otherwise have been run.
+
+This selector allows you to select tasks from a web interface that lists all the possible build and
+test tasks and allows you to select them from a list. It is similar in concept to the old `try
+syntax chooser`_ page, except that the values are dynamically generated using the :ref:`taskgraph<TaskCluster Task-Graph Generation>` as an
+input. This ensures that it will never be out of date.
+
+To use:
+
+.. code-block:: shell
+
+ $ mach try chooser
+
+This will spin up a local web server (using Flask) which serves the chooser app. After making your
+selection, simply press ``Push`` and the rest will be handled from there. No need to copy/paste any
+syntax strings or the like.
+
+You can run:
+
+.. code-block:: shell
+
+ $ mach try chooser --full
+
+To generate the interface using the full :ref:`taskgraph<TaskCluster Task-Graph Generation>` instead. This will include tasks that don't run
+on mozilla-central.
+
+
+.. _try syntax chooser: https://mozilla-releng.net/trychooser
diff --git a/tools/tryselect/docs/selectors/compare.rst b/tools/tryselect/docs/selectors/compare.rst
new file mode 100644
index 0000000000..a87b263030
--- /dev/null
+++ b/tools/tryselect/docs/selectors/compare.rst
@@ -0,0 +1,17 @@
+Compare Selector
+================
+
+When this command runs it pushes two identical try jobs to treeherder. The first
+job is on the current commit you are on, and the second one is a commit
+specified in the command line arguments. This selector is aimed at helping
+engineers test performance enhancements or resolve performance regressions.
+
+Currently the only way you can select jobs is through fuzzy but we are
+planning on expanding to other choosing frameworks also.
+
+You pass the commit you want to compare against as a commit hash as either
+``-cc`` or ``--compare-commit``, an example is show below
+
+.. code-block:: shell
+
+ $ mach try compare --compare-commit <commit-hash>
diff --git a/tools/tryselect/docs/selectors/empty.rst b/tools/tryselect/docs/selectors/empty.rst
new file mode 100644
index 0000000000..c3ea61b9ce
--- /dev/null
+++ b/tools/tryselect/docs/selectors/empty.rst
@@ -0,0 +1,21 @@
+Empty Selector
+==============
+
+The ``mach try empty`` subcommand is very simple, it won't schedule any additional tasks. You'll
+still see lint tasks and python-unittest tasks if applicable, this is due to a configuration option
+in taskcluster.
+
+Other than those, your try run will be empty. You can use treeherder's ``Add new jobs`` feature to
+selectively add additional tasks after the fact.
+
+.. note::
+
+ To use ``Add new jobs`` you'll need to be logged in and have commit access level 1, just as if
+ you were pushing to try.
+
+To do this:
+
+ 1. Click the drop-down arrow at the top right of your commit.
+ 2. Select ``Add new jobs`` (it may take a couple seconds to load).
+ 3. Choose your desired tasks by clicking them one at a time.
+ 4. At the top of your commit, select ``Trigger New Jobs``.
diff --git a/tools/tryselect/docs/selectors/fuzzy.rst b/tools/tryselect/docs/selectors/fuzzy.rst
new file mode 100644
index 0000000000..d50f801eb0
--- /dev/null
+++ b/tools/tryselect/docs/selectors/fuzzy.rst
@@ -0,0 +1,371 @@
+Fuzzy Selector
+==============
+
+The fuzzy selector uses a tool called `fzf`_. It allows you to filter down all of the task labels
+from a terminal based UI and an intelligent fuzzy finding algorithm. If the ``fzf`` binary is not
+installed, you'll be prompted to bootstrap it on first run.
+
+
+Understanding the Interface
+---------------------------
+
+When you run ``mach try fuzzy`` an interface similar to the one below will open. This is `fzf`_.
+
+.. image:: fzf.png
+
+There's a lot to unpack here, so let's examine each component a bit more closely.
+
+ A. The set of tasks that match the currently typed-out query. In the above image only tasks that
+ match the query ``'linux64 mochibrochr`` are displayed.
+
+ B. The set of selected tasks. These are the tasks that will be scheduled once you hit ``Enter``.
+ In other words, if the task you want does not appear here, *it won't be scheduled*.
+
+ C. Count information of the form ``x/y (z)``, where ``x`` is the number of tasks that match the
+ current query, ``y`` is the total number of tasks and ``z`` is the number of tasks you have
+ selected.
+
+ D. The input bar for entering queries. As you type you'll notice the list of tasks in ``A``
+ starts to update immediately. In the image above, the query ``'linux64 mochibrochr`` is entered.
+ Correspondingly only tasks matching that query are displayed.
+
+In general terms, you first find tasks on the left. Then you move them over to the right by
+selecting them. Once you are satisfied with your selection, press ``Enter`` to push to try.
+
+
+Selecting Tasks
+---------------
+
+There are few ways you can select tasks. If you are feeling a bit overwhelmed, it might be best to
+stick with the mouse to start:
+
+ 1. Enter a query (e.g ``mochitest``) to reduce the task list a little.
+ 2. Scroll up and look for the task(s) you want.
+ 3. ``Right-Click`` as many tasks as desired to select them.
+ 4. Optionally delete your query, go back to step 1) and repeat.
+ 5. Press ``Enter`` to push (or ``Esc`` to cancel).
+
+.. note::
+
+ Dependencies are automatically filled in, so you can select a test task without needing
+ to select the build it depends on.
+
+As you ``Right-Click``, notice that a little arrow appears to the left of the task label. This
+indicates that the task is selected and exists in the preview pane to the right.
+
+Once you are a bit more comfortable with the interface, using the keyboard is much better at quickly
+selecting tasks. Here are the main shortcuts you'll need:
+
+.. code-block:: text
+
+ Ctrl-K / Up => Move cursor up
+ Ctrl-J / Down => Move cursor down
+ Tab => Select task + move cursor down
+ Shift-Tab => Select task + move cursor up
+ Ctrl-A => Select all currently filtered tasks
+ Ctrl-T => Toggle select all currently filtered tasks
+ Ctrl-D => De-select all selected tasks (both filtered and not)
+ Alt-Bspace => Clear query from input bar
+ Enter => Accept selection and exit
+ Ctrl-C / Esc => Cancel selection and exit
+ ? => Toggle preview pane
+
+
+The process for selecting tasks is otherwise the same as for a mouse. A particularly fast and
+powerful way to select tasks is to:
+
+.. code-block:: text
+
+ Write a precise query => Ctrl-A => Alt-Bspace => Repeat
+
+As before, when you are satisfied with your selection press ``Enter`` and all the tasks in the
+preview pane will be pushed to try. If you change your mind you can press ``Esc`` or ``Ctrl-C`` to
+exit the interface without pushing anything.
+
+.. note::
+
+ Initially ``fzf`` will automatically select whichever task is under your cursor. This is a
+ convenience feature for the case where you are only selecting a single task. This feature will be
+ turned off as soon as you *lock in* a selection with ``Right-Click``, ``Tab`` or ``Ctrl-A``.
+
+
+Writing Queries
+---------------
+
+Queries are built from a series of terms, each separated by a space. Terms are logically joined by
+the AND operator. For example:
+
+.. code-block:: text
+
+ 'windows 'mochitest
+
+This query has two terms, and is the equivalent of saying: Give me all the tasks that match both the
+term ``'windows'`` and the term ``'mochitest'``. In other words, this query matches all Windows
+mochitest tasks.
+
+The single quote prefix before each term tells ``fzf`` to use exact substring matches, so only tasks
+that contain both the literal string ``windows`` AND the literal string ``mochitest`` will be
+matched.
+
+Another thing to note is that the order of the terms makes no difference, so ``'windows 'mochitest``
+and ``'mochitest 'windows`` are equivalent.
+
+
+Fuzzy terms
+~~~~~~~~~~~
+
+If a term is *not* prefixed with a single quote, that makes it a fuzzy term. This means the
+characters in the term need to show up in order, but not in sequence. E.g the fuzzy term ``max``
+would match the string ``mozilla firefox`` (as first there is an ``m``, then an ``a`` and finally an
+``x``), but not the string ``firefox by mozilla`` (since the ``x`` is now out of order). Here's a
+less contrived example:
+
+.. code-block:: text
+
+ wndws mchtst
+
+Like the query above, this one would also select all Windows mochitest tasks. But it will
+additionally select:
+
+.. code-block:: text
+
+ test-macosx1014-64-shippable/opt-talos-sessionrestore-many-windows-e10s
+
+This is because both sequences of letters (``wndws`` and ``mchtst``) independently appear in order
+somewhere in this string (remember the order of the terms makes no difference).
+
+At first fuzzy terms may not seem very useful, but they are actually extremely powerful! Let's use
+the term from the interface image above, ``'linux64 mochibrochr``, as an example. First, just notice
+how in the image ``fzf`` highlights the characters that constitute the match in green. Next, notice
+how typing ``mochibrochr`` can quickly get us all mochitest browser-chrome tasks. The power of fuzzy
+terms is that you don't need to memorize the exact task labels you are looking for. Just start
+typing something you think is vaguely correct and chances are you'll see the task you're looking for.
+
+
+Term Modifiers
+~~~~~~~~~~~~~~
+
+The following modifiers can be applied to a search term:
+
+.. code-block:: text
+
+ 'word => exact match (line must contain the literal string "word")
+ ^word => exact prefix match (line must start with literal "word")
+ word$ => exact suffix match (line must end with literal "word")
+ !word => exact negation match (line must not contain literal "word")
+ 'a | 'b => OR operator (joins two exact match operators together)
+
+For example:
+
+.. code-block:: text
+
+ ^start 'exact | 'other !ignore fuzzy end$
+
+would match the string:
+
+.. code-block:: text
+
+ starting to bake isn't exactly fun, but pizza is yummy in the end
+
+.. note::
+
+ The best way to learn how to write queries is to run ``mach try fuzzy --no-push`` and play
+ around with all of these modifiers!
+
+
+Specifying Queries on the Command Line
+--------------------------------------
+
+Sometimes it's more convenient to skip the interactive interface and specify a query on the command
+line with ``-q/--query``. This is equivalent to opening the interface then typing:
+``<query><ctrl-a><enter>``.
+
+For example:
+
+.. code-block:: shell
+
+ # selects all mochitest tasks
+ $ mach try fuzzy --query "mochitest"
+
+You can pass in multiple queries at once and the results of each will be joined together:
+
+.. code-block:: shell
+
+ # selects all mochitest and reftest tasks
+ $ mach try fuzzy -q "mochitest" -q "reftest"
+
+If instead you want the intersection of queries, you can pass in ``-x/--and``:
+
+.. code-block:: shell
+
+ # selects all windows mochitest tasks
+ $ mach try fuzzy --and -q "mochitest" -q "windows"
+
+
+Modifying Presets
+~~~~~~~~~~~~~~~~~
+
+:doc:`Presets <../presets>` make it easy to run a pre-determined set of tasks. But sometimes you
+might not want to run that set exactly as is, you may only want to use the preset as a starting
+point then add or remove tasks as needed. This can be accomplished with ``-q/--query`` or
+``-i/--interactive``.
+
+Here are some examples of adding tasks to a preset:
+
+.. code-block:: shell
+
+ # selects all perf tasks plus all mochitest-chrome tasks
+ $ mach try fuzzy --preset perf -q "mochitest-chrome"
+
+ # adds tasks to the perf preset interactively
+ $ mach try fuzzy --preset perf -i
+
+Similarly, ``-x/--and`` can be used to filter down a preset by taking the intersection of the two
+sets:
+
+.. code-block:: shell
+
+ # limits perf tasks to windows only
+ $ mach try fuzzy --preset perf -xq "windows"
+
+ # limits perf tasks interactively
+ $ mach try fuzzy --preset perf -xi
+
+
+Shell Conflicts
+~~~~~~~~~~~~~~~
+
+Unfortunately ``fzf``'s query language uses some characters (namely ``'``, ``!`` and ``$``) that can
+interfere with your shell when using ``-q/--query``. Below are some tips for how to type out a query
+on the command line.
+
+The ``!`` character is typically used for history expansion. If you don't use this feature, the
+easiest way to specify queries on the command line is to disable it:
+
+.. code-block:: shell
+
+ # bash
+ $ set +H
+ $ ./mach try fuzzy -q "'foo !bar"
+
+ # zsh
+ $ setopt no_banghist
+ $ ./mach try fuzzy -q "'foo !bar"
+
+If using ``bash``, add ``set +H`` to your ``~/.bashrc``, ``~/.bash_profile`` or equivalent. If using
+``zsh``, add ``setopt no_banghist`` to your ``~/.zshrc`` or equivalent.
+
+If you don't want to disable history expansion, you can escape your queries like this:
+
+.. code-block:: shell
+
+ # bash
+ $ ./mach try fuzzy -q $'\'foo !bar'
+
+ # zsh
+ $ ./mach try fuzzy -q "'foo \!bar"
+
+
+The third option is to use ``-e/--exact`` which reverses the behaviour of the ``'`` character (see
+:ref:`additional-arguments` for more details). Using this flag means you won't need to escape the
+``'`` character as often and allows you to run your queries like this:
+
+.. code-block:: shell
+
+ # bash and zsh
+ $ ./mach try fuzzy -eq 'foo !bar'
+
+This method is only useful if you find you almost always prefix terms with ``'`` (and rarely use
+fuzzy terms). Otherwise as soon as you want to use a fuzzy match you'll run into the same problem as
+before.
+
+.. note:: All the examples in these three approaches will select the same set of tasks.
+
+If you use ``fish`` shell, you won't need to escape ``!``, however you will need to escape ``$``:
+
+.. code-block:: shell
+
+ # fish
+ $ ./mach try fuzzy -q "'foo !bar baz\$"
+
+
+Test Paths
+----------
+
+One or more paths to a file or directory may be specified as positional arguments. When
+specifying paths, the list of available tasks to choose from is filtered down such that
+only suites that have tests in a specified path can be selected. Notably, only the first
+chunk of each suite/platform appears. When the tasks are scheduled, only tests that live
+under one of the specified paths will be run.
+
+.. note::
+
+ When using paths, be aware that all tests under the specified paths will run in the
+ same chunk. This might produce a different ordering from what gets run on production
+ branches, and may yield different results.
+
+ For suites that restart the browser between each manifest (like mochitest), this
+ shouldn't be as big of a concern.
+
+Paths can be used with the interactive ``fzf`` window, or using the ``-q/--query`` argument.
+For example, running:
+
+.. code-block:: shell
+
+ $ mach try fuzzy layout/reftests/reftest-sanity -q "!pgo !cov !asan 'linux64"
+
+Would produce the following ``try_task_config.json``:
+
+.. code-block:: json
+
+ {
+ "env":{
+ "MOZHARNESS_TEST_PATHS":"{\"reftest\":\"layout/reftests/reftest-sanity\"}"
+ },
+ "tasks":[
+ "test-linux64-qr/debug-reftest-e10s-1",
+ "test-linux64-qr/opt-reftest-e10s-1",
+ "test-linux64/debug-reftest-e10s-1",
+ "test-linux64/debug-reftest-no-accel-e10s-1",
+ "test-linux64/opt-reftest-e10s-1",
+ "test-linux64/opt-reftest-no-accel-e10s-1",
+ ]
+ }
+
+Inside of these tasks, the reftest harness will only run tests that live under
+``layout/reftests/reftest-sanity``.
+
+
+.. _additional-arguments:
+
+Additional Arguments
+--------------------
+
+There are a few additional command line arguments you may wish to use:
+
+``-e/--exact``
+By default, ``fzf`` treats terms as a fuzzy match and prefixing a term with ``'`` turns it into an exact
+match. If passing in ``--exact``, this behaviour is reversed. Non-prefixed terms become exact, and a
+``'`` prefix makes a term fuzzy.
+
+``--full``
+By default, only target tasks (e.g tasks that would normally run on mozilla-central)
+are generated. Passing in ``--full`` allows you to select from all tasks. This is useful for
+things like nightly or release tasks.
+
+``-u/--update``
+Update the bootstrapped ``fzf`` binary to the latest version.
+
+For a full list of command line arguments, run:
+
+.. code-block:: shell
+
+ $ mach try fuzzy --help
+
+For more information on using ``fzf``, run:
+
+.. code-block:: shell
+
+ $ man fzf
+
+.. _fzf: https://github.com/junegunn/fzf
diff --git a/tools/tryselect/docs/selectors/fzf.png b/tools/tryselect/docs/selectors/fzf.png
new file mode 100644
index 0000000000..a64f4b04f3
--- /dev/null
+++ b/tools/tryselect/docs/selectors/fzf.png
Binary files differ
diff --git a/tools/tryselect/docs/selectors/index.rst b/tools/tryselect/docs/selectors/index.rst
new file mode 100644
index 0000000000..be618202d6
--- /dev/null
+++ b/tools/tryselect/docs/selectors/index.rst
@@ -0,0 +1,44 @@
+Selectors
+=========
+
+These are the currently implemented try selectors:
+
+* :doc:`auto <auto>`: Have tasks chosen for you automatically.
+* :doc:`fuzzy <fuzzy>`: Select tasks using a fuzzy finding algorithm and
+ a terminal interface.
+* :doc:`chooser <chooser>`: Select tasks using a web interface.
+* :doc:`again <again>`: Re-run a previous ``try_task_config.json`` based
+ push.
+* :doc:`empty <empty>`: Don't select any tasks. Taskcluster will still run
+ some tasks automatically (like lint and python unittest tasks). Further tasks
+ can be chosen with treeherder's ``Add New Jobs`` feature.
+* :doc:`syntax <syntax>`: Select tasks using classic try syntax.
+* :doc:`release <release>`: Prepare a tree for doing a staging release.
+* :doc:`scriptworker <scriptworker>`: Run scriptworker tasks against a recent release.
+* :doc:`compare <compare>`: Push two identical try jobs, one on your current commit and another of your choice
+
+You can run them with:
+
+.. code-block:: shell
+
+ $ mach try <selector>
+
+See selector specific options by running:
+
+.. code-block:: shell
+
+ $ mach try <selector> --help
+
+.. toctree::
+ :caption: Available Selectors
+ :maxdepth: 1
+ :hidden:
+
+ Auto <auto>
+ Fuzzy <fuzzy>
+ Chooser <chooser>
+ Again <again>
+ Empty <empty>
+ Syntax <syntax>
+ Release <release>
+ Scriptworker <scriptworker>
diff --git a/tools/tryselect/docs/selectors/release.rst b/tools/tryselect/docs/selectors/release.rst
new file mode 100644
index 0000000000..946266249b
--- /dev/null
+++ b/tools/tryselect/docs/selectors/release.rst
@@ -0,0 +1,31 @@
+Release Selector
+================
+
+This command configures the tree in preparation for doing a staging release,
+and pushes the result to try. The changes that that are made include:
+
+- Updating the version number.
+- Applying the migrations that are done as part of merge day.
+- Disabling repacking most locales. (This can be disabled by passing ``--no-limit-locales``).
+
+For staging a beta release, run the following (with an appropriate version number):
+
+.. code-block:: shell
+
+ $ mach try release --version 64.0b5 --migration central-to-beta
+
+For staging a final release (rc or patch), run the following (with an appropriate version number)
+
+.. code-block:: shell
+
+ $ mach try release --version 64.0 --migration central-to-beta --migration beta-to-release
+
+Once the decision task is on the push is complete, you can start the release
+through `staging ship-it instance <https://shipit.staging.mozilla-releng.net/new>`_\ [#shipit]_.
+
+.. note::
+
+ If pushing from beta or release, the corresponding migration should not be
+ passed, as they have already been applied.
+
+.. [#shipit] This is only available to release engineering and release management (as of 2018-10-15).
diff --git a/tools/tryselect/docs/selectors/scriptworker.rst b/tools/tryselect/docs/selectors/scriptworker.rst
new file mode 100644
index 0000000000..a3cba08cbe
--- /dev/null
+++ b/tools/tryselect/docs/selectors/scriptworker.rst
@@ -0,0 +1,31 @@
+Scriptworker Selector
+=====================
+
+This command runs a selection of scriptworker tasks against builds from a
+recent release. This is aimed at release engineering, to test changes to
+scriptworker implementations. It currently requires being connected to
+Mozilla's internal datacenter VPN with access to shipit\ [#shipit]_.
+
+There are a number of preset groups of tasks to run. To run a particular
+set of tasks, pass the name of the set to ``mach try scriptworker``:
+
+.. code-block:: shell
+
+ $ mach try scriptworker linux-signing
+
+To get the list of task sets, along with the list of tasks they will run:
+
+.. code-block:: shell
+
+ $ mach try scriptworker list
+
+The selector defaults to using tasks from the most recent beta, to use tasks
+from a different release, pass ``--release-type <release-type>``:
+
+.. code-block:: shell
+
+ $ mach try scriptworker --release-type release linux-signing
+
+
+.. [#shipit] The shipit API is not currently publicly available, and is used
+ to find the release graph to pull previous tasks from.
diff --git a/tools/tryselect/docs/selectors/syntax.rst b/tools/tryselect/docs/selectors/syntax.rst
new file mode 100644
index 0000000000..b64bb65ab7
--- /dev/null
+++ b/tools/tryselect/docs/selectors/syntax.rst
@@ -0,0 +1,41 @@
+Syntax Selector
+===============
+
+.. warning::
+
+ Try syntax is antiquated and hard to understand. If you aren't already
+ familiar with try syntax, you might want to use the ``fuzzy`` selector
+ instead.
+
+Try syntax is a command line string that goes into the commit message. Using
+``mach try syntax`` will automatically create a temporary commit with your
+chosen syntax and then delete it again after pushing to try.
+
+Try syntax can contain all kinds of different options parsed by various
+places in various repos, but the majority are parsed by `try_option_syntax.py`_.
+The most common arguments include:
+
+ * ``-b/--build`` - One of ``d``, ``o`` or ``do``. This is the build type,
+ either opt, debug or both (required).
+ * ``-p/--platform`` - The platforms you want to build and/or run tests on
+ (required).
+ * ``-u/--unittests`` - The test tasks you want to run (optional).
+ * ``-t/--talos`` - The talos tasks you want to run (optional).
+
+Here are some examples:
+
+.. code-block:: shell
+
+ $ mach try syntax -b do -p linux,macosx64 -u mochitest-e10s-1,crashtest -t none
+ $ mach try syntax -b d -p win64 -u all
+ $ mach try syntax -b o -p linux64
+
+Unfortunately, knowing the magic strings that make it work can be a bit of a
+guessing game. If you are unsure of what string will get your task to run, try
+using :doc:`mach try fuzzy <fuzzy>` instead.
+
+While using ``mach try syntax -b do -p all -u all -t all`` will work, heavy use
+of ``all`` is discouraged as it consumes a lot of unnecessary resources (some of
+which are hardware constrained).
+
+.. _try_option_syntax.py: https://searchfox.org/mozilla-central/source/taskcluster/gecko_taskgraph/try_option_syntax.py
diff --git a/tools/tryselect/docs/tasks.rst b/tools/tryselect/docs/tasks.rst
new file mode 100644
index 0000000000..61de9ec9ed
--- /dev/null
+++ b/tools/tryselect/docs/tasks.rst
@@ -0,0 +1,152 @@
+Task Generation
+===============
+
+Many selectors (including ``chooser``, ``coverage`` and ``fuzzy``) source their available tasks
+directly from the :ref:`taskgraph <TaskCluster Task-Graph Generation>` module by building the taskgraph
+locally. This means that the list of available tasks will never be stale. While this is very
+powerful, it comes with a large enough performance cost to get annoying (around twenty seconds).
+
+The result of the taskgraph generation will be cached, so this penalty will only be incurred
+whenever a file in the ``/taskcluster`` directory is modified. Unfortunately this directory changes
+pretty frequently, so developers can expect to rebuild the cache each time they pull in
+``mozilla-central``. Developers who regularly work on ``/taskcluster`` can expect to rebuild even
+more frequently.
+
+
+Configuring Watchman
+--------------------
+
+It's possible to bypass this penalty completely by using the file watching service `watchman`_. If
+you use the ``fsmonitor`` mercurial extension, you already have ``watchman`` installed.
+
+.. note::
+
+ If you aren't using `fsmonitor`_ but end up installng watchman anyway, you
+ might as well enable it for a faster Mercurial experience.
+
+Otherwise, `install watchman`_. If using Linux you'll likely run into the `inotify limits`_ outlined
+on that page due to the size of ``mozilla-central``. You can `read this page`_ for more information
+on how to bump the limits permanently.
+
+Next run the following commands:
+
+.. code-block:: shell
+
+ $ cd path/to/mozilla-central
+ $ watchman watch .
+ $ watchman -j < tools/tryselect/watchman.json
+
+You should see output like:
+
+.. code-block:: json
+
+ {
+ "triggerid": "rebuild-taskgraph-cache",
+ "disposition": "created",
+ "version": "20200920.192359.0"
+ }
+
+That's it. Now anytime a file under ``/taskcluster`` is modified (either by your editor, or by
+updating version control), the taskgraph cache will be rebuilt in the background, allowing you to
+skip the wait the next time you run ``mach try``.
+
+.. note::
+
+ Watchman triggers are persistent and don't need to be added more than once.
+ See `Managing Triggers`_ for how to remove a trigger.
+
+You can test that everything is working by running these commands:
+
+.. code-block:: shell
+
+ $ statedir=`mach python -c "from mach.util import get_state_dir; print(get_state_dir(specific_to_topsrcdir=True))"`
+ $ rm -rf $statedir/cache/taskgraph
+ $ touch taskcluster/mach_commands.py
+ # wait a minute for generation to trigger and finish
+ $ ls $statedir/cache/taskgraph
+
+If the ``target_task_set`` file exists, you are good to go. If not you can look at the ``watchman``
+log to see if there were any errors. This typically lives somewhere like
+``/usr/local/var/run/watchman/$USER-state/log``. In this case please file a bug under ``Firefox
+Build System :: Try`` and include the relevant portion of the log.
+
+
+Running Watchman on Startup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Watchman is both a client and a service all in one. When running a ``watchman`` command, the client
+binary will start the service in the background if it isn't running. This means on reboot the
+service won't be running and you'll need to start the service each time by invoking the client
+binary (e.g by running ``watchman version``).
+
+If you'd like this to happen automatically, you can use your favourite platform specific way of
+running commands at startup (``crontab``, ``rc.local``, etc.). Watchman stores separate state for
+each user, so be sure you run the command as the user that set up the triggers.
+
+Setting up a systemd Service
+++++++++++++++++++++++++++++
+
+If ``systemd`` is an option you can create a service:
+
+.. code-block:: ini
+
+ [Unit]
+ Description=Watchman for %i
+ After=network.target
+
+ [Service]
+ Type=simple
+ User=%i
+ ExecStart=/usr/local/bin/watchman --log-level 1 watch-list -f
+ ExecStop=/usr/local/bin/watchman shutdown-server
+
+ [Install]
+ WantedBy=multi-user.target
+
+Save this to a file called ``/etc/systemd/system/watchman@.service``. Then run:
+
+.. code-block:: shell
+
+ $ sudo systemctl enable watchman@$USER.service
+ $ sudo systemctl start watchman@$USER.service
+
+The next time you reboot, the watchman service should start automatically.
+
+
+Managing Triggers
+~~~~~~~~~~~~~~~~~
+
+When adding a trigger watchman writes it to disk. Typically it'll be a path similar to
+``/usr/local/var/run/watchman/$USER-state/state``. While editing that file by hand would work, the
+watchman binary provides an interface for managing your triggers.
+
+To see all directories you are currently watching:
+
+.. code-block:: shell
+
+ $ watchman watch-list
+
+To view triggers that are active in a specified watch:
+
+.. code-block:: shell
+
+ $ watchman trigger-list <path>
+
+To delete a trigger from a specified watch:
+
+.. code-block:: shell
+
+ $ watchman trigger-del <path> <name>
+
+In the above two examples, replace ``<path>`` with the path of the watch, presumably
+``mozilla-central``. Using ``.`` works as well if that is already your working directory. For more
+information on managing triggers and a reference of other commands, see the `official docs`_.
+
+
+.. _watchman: https://facebook.github.io/watchman/
+.. _fsmonitor: https://www.mercurial-scm.org/wiki/FsMonitorExtension
+.. _install watchman: https://facebook.github.io/watchman/docs/install.html
+.. _inotify limits: https://facebook.github.io/watchman/docs/install.html#linux-inotify-limits
+.. _read this page: https://github.com/guard/listen/wiki/Increasing-the-amount-of-inotify-watchers
+.. _this hint: https://github.com/facebook/watchman/commit/2985377eaf8c8538b28fae9add061b67991a87c2
+.. _official docs: https://facebook.github.io/watchman/docs/cmd/trigger.html
diff --git a/tools/tryselect/lando.py b/tools/tryselect/lando.py
new file mode 100644
index 0000000000..7abd2ddfae
--- /dev/null
+++ b/tools/tryselect/lando.py
@@ -0,0 +1,452 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Implements Auth0 Device Code flow and Lando try submission.
+
+See https://auth0.com/blog/securing-a-python-cli-application-with-auth0/ for more.
+"""
+
+from __future__ import annotations
+
+import base64
+import configparser
+import json
+import os
+import time
+import webbrowser
+from dataclasses import (
+ dataclass,
+ field,
+)
+from pathlib import Path
+from typing import (
+ List,
+ Optional,
+ Tuple,
+ Union,
+)
+
+import requests
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozversioncontrol import (
+ GitRepository,
+ HgRepository,
+)
+
+from .task_config import (
+ try_config_commit,
+)
+
+TOKEN_FILE = (
+ Path(get_state_dir(specific_to_topsrcdir=False)) / "lando_auth0_user_token.json"
+)
+
+# The supported variants of `Repository` for this workflow.
+SupportedVcsRepository = Union[GitRepository, HgRepository]
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+
+
+def convert_bytes_patch_to_base64(patch_bytes: bytes) -> str:
+ """Return a base64 encoded `str` representing the passed `bytes` patch."""
+ return base64.b64encode(patch_bytes).decode("ascii")
+
+
+def load_token_from_disk() -> Optional[dict]:
+ """Load and validate an existing Auth0 token from disk.
+
+ Return the token as a `dict` if it can be validated, or return `None`
+ if any error was encountered.
+ """
+ if not TOKEN_FILE.exists():
+ print("No existing Auth0 token found.")
+ return None
+
+ try:
+ user_token = json.loads(TOKEN_FILE.read_bytes())
+ except json.JSONDecodeError:
+ print("Existing Auth0 token could not be decoded as JSON.")
+ return None
+
+ return user_token
+
+
+def get_stack_info(vcs: SupportedVcsRepository) -> Tuple[str, List[str]]:
+ """Retrieve information about the current stack for submission via Lando.
+
+ Returns a tuple of the current public base commit as a Mercurial SHA,
+ and a list of ordered base64 encoded patches.
+ """
+ base_commit = vcs.base_ref_as_hg()
+ if not base_commit:
+ raise ValueError(
+ "Could not determine base Mercurial commit hash for submission."
+ )
+ print("Using", base_commit, "as the hg base commit.")
+
+ # Reuse the base revision when on Mercurial to avoid multiple calls to `hg log`.
+ branch_nodes_kwargs = {}
+ if isinstance(vcs, HgRepository):
+ branch_nodes_kwargs["base_ref"] = base_commit
+
+ nodes = vcs.get_branch_nodes(**branch_nodes_kwargs)
+ if not nodes:
+ raise ValueError("Could not find any commit hashes for submission.")
+ elif len(nodes) == 1:
+ print("Submitting a single try config commit.")
+ elif len(nodes) == 2:
+ print("Submitting 1 node and the try commit.")
+ else:
+ print("Submitting stack of", len(nodes) - 1, "nodes and the try commit.")
+
+ patches = vcs.get_commit_patches(nodes)
+ base64_patches = [
+ convert_bytes_patch_to_base64(patch_bytes) for patch_bytes in patches
+ ]
+ print("Patches gathered for submission.")
+
+ return base_commit, base64_patches
+
+
+@dataclass
+class Auth0Config:
+ """Helper class to interact with Auth0."""
+
+ domain: str
+ client_id: str
+ audience: str
+ scope: str
+ algorithms: list[str] = field(default_factory=lambda: ["RS256"])
+
+ @property
+ def base_url(self) -> str:
+ """Auth0 base URL."""
+ return f"https://{self.domain}"
+
+ @property
+ def device_code_url(self) -> str:
+ """URL of the Device Code API endpoint."""
+ return f"{self.base_url}/oauth/device/code"
+
+ @property
+ def issuer(self) -> str:
+ """Token issuer URL."""
+ return f"{self.base_url}/"
+
+ @property
+ def jwks_url(self) -> str:
+ """URL of the JWKS file."""
+ return f"{self.base_url}/.well-known/jwks.json"
+
+ @property
+ def oauth_token_url(self) -> str:
+ """URL of the OAuth Token endpoint."""
+ return f"{self.base_url}/oauth/token"
+
+ def request_device_code(self) -> dict:
+ """Request authorization from Auth0 using the Device Code Flow.
+
+ See https://auth0.com/docs/api/authentication#get-device-code for more.
+ """
+ response = requests.post(
+ self.device_code_url,
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ data={
+ "audience": self.audience,
+ "client_id": self.client_id,
+ "scope": self.scope,
+ },
+ )
+
+ response.raise_for_status()
+
+ return response.json()
+
+ def validate_token(self, user_token: dict) -> Optional[dict]:
+ """Verify the given user token is valid.
+
+ Validate the ID token, and validate the access token's expiration claim.
+ """
+ # Import `auth0-python` here to avoid `ImportError` in tests, since
+ # the `python-test` site won't have `auth0-python` installed.
+ import jwt
+ from auth0.authentication.token_verifier import (
+ AsymmetricSignatureVerifier,
+ TokenVerifier,
+ )
+ from auth0.exceptions import (
+ TokenValidationError,
+ )
+
+ signature_verifier = AsymmetricSignatureVerifier(self.jwks_url)
+ token_verifier = TokenVerifier(
+ audience=self.client_id,
+ issuer=self.issuer,
+ signature_verifier=signature_verifier,
+ )
+
+ try:
+ token_verifier.verify(user_token["id_token"])
+ except TokenValidationError as e:
+ print("Could not validate existing Auth0 ID token:", str(e))
+ return None
+
+ decoded_access_token = jwt.decode(
+ user_token["access_token"],
+ algorithms=self.algorithms,
+ options={"verify_signature": False},
+ )
+
+ access_token_expiration = decoded_access_token["exp"]
+
+ # Assert that the access token isn't expired or expiring within a minute.
+ if time.time() > access_token_expiration + 60:
+ print("Access token is expired.")
+ return None
+
+ user_token.update(
+ jwt.decode(
+ user_token["id_token"],
+ algorithms=self.algorithms,
+ options={"verify_signature": False},
+ )
+ )
+ print("Auth0 token validated.")
+ return user_token
+
+ def device_authorization_flow(self) -> dict:
+ """Perform the Device Authorization Flow.
+
+ See https://auth0.com/docs/get-started/authentication-and-authorization-flow/device-authorization-flow
+ for more.
+ """
+ start = time.perf_counter()
+
+ device_code_data = self.request_device_code()
+ print(
+ "1. On your computer or mobile device navigate to:",
+ device_code_data["verification_uri_complete"],
+ )
+ print("2. Enter the following code:", device_code_data["user_code"])
+
+ auth_msg = f"Auth0 token validation required at: {device_code_data['verification_uri_complete']}"
+ build.notify(auth_msg)
+
+ try:
+ webbrowser.open(device_code_data["verification_uri_complete"])
+ except webbrowser.Error:
+ print("Could not automatically open the web browser.")
+
+ device_code_lifetime_s = device_code_data["expires_in"]
+
+ # Print successive periods on the same line to avoid moving the link
+ # while the user is trying to click it.
+ print("Waiting...", end="", flush=True)
+ while time.perf_counter() - start < device_code_lifetime_s:
+ response = requests.post(
+ self.oauth_token_url,
+ data={
+ "client_id": self.client_id,
+ "device_code": device_code_data["device_code"],
+ "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
+ "scope": self.scope,
+ },
+ )
+ response_data = response.json()
+
+ if response.status_code == 200:
+ print("\nLogin successful.")
+ return response_data
+
+ if response_data["error"] not in ("authorization_pending", "slow_down"):
+ raise RuntimeError(response_data["error_description"])
+
+ time.sleep(device_code_data["interval"])
+ print(".", end="", flush=True)
+
+ raise ValueError("Timed out waiting for Auth0 device code authentication!")
+
+ def get_token(self) -> dict:
+ """Retrieve an access token for authentication.
+
+ If a cached token is found and can be confirmed to be valid, return it.
+ Otherwise, perform the Device Code Flow authorization to request a new
+ token, validate it and save it to disk.
+ """
+ # Load a cached token and validate it if one is available.
+ cached_token = load_token_from_disk()
+ user_token = self.validate_token(cached_token) if cached_token else None
+
+ # Login with the Device Authorization Flow if an existing token isn't found.
+ if not user_token:
+ new_token = self.device_authorization_flow()
+ user_token = self.validate_token(new_token)
+
+ if not user_token:
+ raise ValueError("Could not get an Auth0 token.")
+
+ # Save token to disk.
+ with TOKEN_FILE.open("w") as f:
+ json.dump(user_token, f, indent=2, sort_keys=True)
+
+ return user_token
+
+
+class LandoAPIException(Exception):
+ """Raised when Lando throws an exception."""
+
+ def __init__(self, detail: Optional[str] = None):
+ super().__init__(detail or "")
+
+
+@dataclass
+class LandoAPI:
+ """Helper class to interact with Lando-API."""
+
+ access_token: str
+ api_url: str
+
+ @property
+ def lando_try_api_url(self) -> str:
+ """URL of the Lando Try endpoint."""
+ return f"https://{self.api_url}/try/patches"
+
+ @property
+ def api_headers(self) -> dict[str, str]:
+ """Headers for use accessing and authenticating against the API."""
+ return {
+ "Authorization": f"Bearer {self.access_token}",
+ "Content-Type": "application/json",
+ }
+
+ @classmethod
+ def from_lando_config_file(cls, config_path: Path, section: str) -> LandoAPI:
+ """Build a `LandoConfig` from `section` in the file at `config_path`."""
+ if not config_path.exists():
+ raise ValueError(f"Could not find a Lando config file at `{config_path}`.")
+
+ lando_ini_contents = config_path.read_text()
+
+ parser = configparser.ConfigParser(delimiters="=")
+ parser.read_string(lando_ini_contents)
+
+ if not parser.has_section(section):
+ raise ValueError(f"Lando config file does not have a {section} section.")
+
+ auth0 = Auth0Config(
+ domain=parser.get(section, "auth0_domain"),
+ client_id=parser.get(section, "auth0_client_id"),
+ audience=parser.get(section, "auth0_audience"),
+ scope=parser.get(section, "auth0_scope"),
+ )
+
+ token = auth0.get_token()
+
+ return LandoAPI(
+ api_url=parser.get(section, "api_domain"),
+ access_token=token["access_token"],
+ )
+
+ def post(self, url: str, body: dict) -> dict:
+ """Make a POST request to Lando."""
+ response = requests.post(url, headers=self.api_headers, json=body)
+
+ try:
+ response_json = response.json()
+ except json.JSONDecodeError:
+ # If the server didn't send back a valid JSON object, raise a stack
+ # trace to the terminal which includes error details.
+ response.raise_for_status()
+
+ # Raise `ValueError` if the response wasn't JSON and we didn't raise
+ # from an invalid status.
+ raise LandoAPIException(
+ detail="Response was not valid JSON yet status was valid."
+ )
+
+ if response.status_code >= 400:
+ raise LandoAPIException(detail=response_json["detail"])
+
+ return response_json
+
+ def post_try_push_patches(
+ self,
+ patches: List[str],
+ patch_format: str,
+ base_commit: str,
+ ) -> dict:
+ """Send try push contents to Lando.
+
+ Send the list of base64-encoded `patches` in `patch_format` to Lando, to be applied to
+ the Mercurial `base_commit`, using the Auth0 `access_token` for authorization.
+ """
+ request_json_body = {
+ "base_commit": base_commit,
+ "patch_format": patch_format,
+ "patches": patches,
+ }
+
+ print("Submitting patches to Lando.")
+ response_json = self.post(self.lando_try_api_url, request_json_body)
+
+ return response_json
+
+
+def push_to_lando_try(vcs: SupportedVcsRepository, commit_message: str):
+ """Push a set of patches to Lando's try endpoint."""
+ # Map `Repository` subclasses to the `patch_format` value Lando expects.
+ PATCH_FORMAT_STRING_MAPPING = {
+ GitRepository: "git-format-patch",
+ HgRepository: "hgexport",
+ }
+ patch_format = PATCH_FORMAT_STRING_MAPPING.get(type(vcs))
+ if not patch_format:
+ # Other VCS types (namely `src`) are unsupported.
+ raise ValueError(f"Try push via Lando is not supported for `{vcs.name}`.")
+
+ # Use Lando Prod unless the `LANDO_TRY_USE_DEV` environment variable is defined.
+ lando_config_section = (
+ "lando-prod" if not os.getenv("LANDO_TRY_USE_DEV") else "lando-dev"
+ )
+
+ # Load Auth0 config from `.lando.ini`.
+ lando_ini_path = Path(vcs.path) / ".lando.ini"
+ lando_api = LandoAPI.from_lando_config_file(lando_ini_path, lando_config_section)
+
+ # Get the time when the push was initiated, not including Auth0 login time.
+ push_start_time = time.perf_counter()
+
+ with try_config_commit(vcs, commit_message):
+ try:
+ base_commit, patches = get_stack_info(vcs)
+ except ValueError as exc:
+ error_msg = "abort: error gathering patches for submission."
+ print(error_msg)
+ print(str(exc))
+ build.notify(error_msg)
+ return
+
+ try:
+ # Make the try request to Lando.
+ response_json = lando_api.post_try_push_patches(
+ patches, patch_format, base_commit
+ )
+ except LandoAPIException as exc:
+ error_msg = "abort: error submitting patches to Lando."
+ print(error_msg)
+ print(str(exc))
+ build.notify(error_msg)
+ return
+
+ duration = round(time.perf_counter() - push_start_time, ndigits=2)
+
+ job_id = response_json["id"]
+ success_msg = (
+ f"Lando try submission success, took {duration} seconds. "
+ f"Landing job id: {job_id}."
+ )
+ print(success_msg)
+ build.notify(success_msg)
diff --git a/tools/tryselect/mach_commands.py b/tools/tryselect/mach_commands.py
new file mode 100644
index 0000000000..3b74593423
--- /dev/null
+++ b/tools/tryselect/mach_commands.py
@@ -0,0 +1,514 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import argparse
+import importlib
+import os
+import sys
+
+from mach.decorators import Command, SubCommand
+from mach.util import get_state_dir
+from mozbuild.base import BuildEnvironmentNotFoundException
+from mozbuild.util import memoize
+
+CONFIG_ENVIRONMENT_NOT_FOUND = """
+No config environment detected. This means we are unable to properly
+detect test files in the specified paths or tags. Please run:
+
+ $ mach configure
+
+and try again.
+""".lstrip()
+
+
+class get_parser:
+ def __init__(self, selector):
+ self.selector = selector
+
+ def __call__(self):
+ mod = importlib.import_module("tryselect.selectors.{}".format(self.selector))
+ return getattr(mod, "{}Parser".format(self.selector.capitalize()))()
+
+
+def generic_parser():
+ from tryselect.cli import BaseTryParser
+
+ parser = BaseTryParser()
+ parser.add_argument("argv", nargs=argparse.REMAINDER)
+ return parser
+
+
+def init(command_context):
+ from tryselect import push
+
+ push.MAX_HISTORY = command_context._mach_context.settings["try"]["maxhistory"]
+
+
+@memoize
+def presets(command_context):
+ from tryselect.preset import MergedHandler
+
+ # Create our handler using both local and in-tree presets. The first
+ # path in this list will be treated as the 'user' file for the purposes
+ # of saving and editing. All subsequent paths are 'read-only'. We check
+ # an environment variable first for testing purposes.
+ if os.environ.get("MACH_TRY_PRESET_PATHS"):
+ preset_paths = os.environ["MACH_TRY_PRESET_PATHS"].split(os.pathsep)
+ else:
+ preset_paths = [
+ os.path.join(get_state_dir(), "try_presets.yml"),
+ os.path.join(
+ command_context.topsrcdir, "tools", "tryselect", "try_presets.yml"
+ ),
+ ]
+
+ return MergedHandler(*preset_paths)
+
+
+def handle_presets(
+ command_context, preset_action=None, save=None, preset=None, **kwargs
+):
+ """Handle preset related arguments.
+
+ This logic lives here so that the underlying selectors don't need
+ special preset handling. They can all save and load presets the same
+ way.
+ """
+ from tryselect.util.dicttools import merge
+
+ user_presets = presets(command_context).handlers[0]
+ if preset_action == "list":
+ presets(command_context).list()
+ sys.exit()
+
+ if preset_action == "edit":
+ user_presets.edit()
+ sys.exit()
+
+ parser = command_context._mach_context.handler.parser
+ subcommand = command_context._mach_context.handler.subcommand
+ if "preset" not in parser.common_groups:
+ return kwargs
+
+ default = parser.get_default
+ if save:
+ selector = (
+ subcommand or command_context._mach_context.settings["try"]["default"]
+ )
+
+ # Only save non-default values for simplicity.
+ kwargs = {k: v for k, v in kwargs.items() if v != default(k)}
+ user_presets.save(save, selector=selector, **kwargs)
+ print("preset saved, run with: --preset={}".format(save))
+ sys.exit()
+
+ if preset:
+ if preset not in presets(command_context):
+ command_context._mach_context.parser.error(
+ "preset '{}' does not exist".format(preset)
+ )
+
+ name = preset
+ preset = presets(command_context)[name]
+ selector = preset.pop("selector")
+ preset.pop("description", None) # description isn't used by any selectors
+
+ if not subcommand:
+ subcommand = selector
+ elif subcommand != selector:
+ print(
+ "error: preset '{}' exists for a different selector "
+ "(did you mean to run 'mach try {}' instead?)".format(name, selector)
+ )
+ sys.exit(1)
+
+ # Order of precedence is defaults -> presets -> cli. Configuration
+ # from the right overwrites configuration from the left.
+ defaults = {}
+ nondefaults = {}
+ for k, v in kwargs.items():
+ if v == default(k):
+ defaults[k] = v
+ else:
+ nondefaults[k] = v
+
+ kwargs = merge(defaults, preset, nondefaults)
+
+ return kwargs
+
+
+def handle_try_params(command_context, **kwargs):
+ from tryselect.util.dicttools import merge
+
+ to_validate = []
+ kwargs.setdefault("try_config_params", {})
+ for cls in command_context._mach_context.handler.parser.task_configs.values():
+ params = cls.get_parameters(**kwargs)
+ if params is not None:
+ to_validate.append(cls)
+ kwargs["try_config_params"] = merge(kwargs["try_config_params"], params)
+
+ for name in cls.dests:
+ del kwargs[name]
+
+ # Validate task_configs after they have all been parsed to avoid
+ # depending on the order they were processed.
+ for cls in to_validate:
+ cls.validate(**kwargs)
+ return kwargs
+
+
+def run(command_context, **kwargs):
+ kwargs = handle_presets(command_context, **kwargs)
+
+ if command_context._mach_context.handler.parser.task_configs:
+ kwargs = handle_try_params(command_context, **kwargs)
+
+ mod = importlib.import_module(
+ "tryselect.selectors.{}".format(
+ command_context._mach_context.handler.subcommand
+ )
+ )
+ return mod.run(**kwargs)
+
+
+@Command(
+ "try",
+ category="ci",
+ description="Push selected tasks to the try server",
+ parser=generic_parser,
+ virtualenv_name="try",
+)
+def try_default(command_context, argv=None, **kwargs):
+ """Push selected tests to the try server.
+
+ The |mach try| command is a frontend for scheduling tasks to
+ run on try server using selectors. A selector is a subcommand
+ that provides its own set of command line arguments and are
+ listed below.
+
+ If no subcommand is specified, the `auto` selector is run by
+ default. Run |mach try auto --help| for more information on
+ scheduling with the `auto` selector.
+ """
+ init(command_context)
+ subcommand = command_context._mach_context.handler.subcommand
+ # We do special handling of presets here so that `./mach try --preset foo`
+ # works no matter what subcommand 'foo' was saved with.
+ preset = kwargs["preset"]
+ if preset:
+ if preset not in presets(command_context):
+ command_context._mach_context.handler.parser.error(
+ "preset '{}' does not exist".format(preset)
+ )
+
+ subcommand = presets(command_context)[preset]["selector"]
+
+ sub = subcommand or command_context._mach_context.settings["try"]["default"]
+ return command_context._mach_context.commands.dispatch(
+ "try", command_context._mach_context, subcommand=sub, argv=argv, **kwargs
+ )
+
+
+@SubCommand(
+ "try",
+ "fuzzy",
+ description="Select tasks on try using a fuzzy finder",
+ parser=get_parser("fuzzy"),
+ virtualenv_name="try",
+)
+def try_fuzzy(command_context, **kwargs):
+ """Select which tasks to run with a fuzzy finding interface (fzf).
+
+ When entering the fzf interface you'll be confronted by two panes. The
+ one on the left contains every possible task you can schedule, the one
+ on the right contains the list of selected tasks. In other words, the
+ tasks that will be scheduled once you you press <enter>.
+
+ At first fzf will automatically select whichever task is under your
+ cursor, which simplifies the case when you are looking for a single
+ task. But normally you'll want to select many tasks. To accomplish
+ you'll generally start by typing a query in the search bar to filter
+ down the list of tasks (see Extended Search below). Then you'll either:
+
+ A) Move the cursor to each task you want and press <tab> to select it.
+ Notice it now shows up in the pane to the right.
+
+ OR
+
+ B) Press <ctrl-a> to select every task that matches your filter.
+
+ You can delete your query, type a new one and select further tasks as
+ many times as you like. Once you are happy with your selection, press
+ <enter> to push the selected tasks to try.
+
+ All selected task labels and their dependencies will be scheduled. This
+ means you can select a test task and its build will automatically be
+ filled in.
+
+
+ Keyboard Shortcuts
+ ------------------
+
+ When in the fuzzy finder interface, start typing to filter down the
+ task list. Then use the following keyboard shortcuts to select tasks:
+
+ Ctrl-K / Up => Move cursor up
+ Ctrl-J / Down => Move cursor down
+ Tab => Select task + move cursor down
+ Shift-Tab => Select task + move cursor up
+ Ctrl-A => Select all currently filtered tasks
+ Ctrl-D => De-select all currently filtered tasks
+ Ctrl-T => Toggle select all currently filtered tasks
+ Alt-Bspace => Clear query from input bar
+ Enter => Accept selection and exit
+ Ctrl-C / Esc => Cancel selection and exit
+ ? => Toggle preview pane
+
+ There are many more shortcuts enabled by default, you can also define
+ your own shortcuts by setting `--bind` in the $FZF_DEFAULT_OPTS
+ environment variable. See `man fzf` for more info.
+
+
+ Extended Search
+ ---------------
+
+ When typing in search terms, the following modifiers can be applied:
+
+ 'word: exact match (line must contain the literal string "word")
+ ^word: exact prefix match (line must start with literal "word")
+ word$: exact suffix match (line must end with literal "word")
+ !word: exact negation match (line must not contain literal "word")
+ 'a | 'b: OR operator (joins two exact match operators together)
+
+ For example:
+
+ ^start 'exact | !ignore fuzzy end$
+
+
+ Documentation
+ -------------
+
+ For more detailed documentation, please see:
+ https://firefox-source-docs.mozilla.org/tools/try/selectors/fuzzy.html
+ """
+ init(command_context)
+ if kwargs.pop("interactive"):
+ kwargs["query"].append("INTERACTIVE")
+
+ if kwargs.pop("intersection"):
+ kwargs["intersect_query"] = kwargs["query"]
+ del kwargs["query"]
+
+ if kwargs.get("save") and not kwargs.get("query"):
+ # If saving preset without -q/--query, allow user to use the
+ # interface to build the query.
+ kwargs_copy = kwargs.copy()
+ kwargs_copy["dry_run"] = True
+ kwargs_copy["save"] = None
+ kwargs["query"] = run(command_context, save_query=True, **kwargs_copy)
+ if not kwargs["query"]:
+ return
+
+ if kwargs.get("paths"):
+ kwargs["test_paths"] = kwargs["paths"]
+
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "chooser",
+ description="Schedule tasks by selecting them from a web interface.",
+ parser=get_parser("chooser"),
+ virtualenv_name="try",
+)
+def try_chooser(command_context, **kwargs):
+ """Push tasks selected from a web interface to try.
+
+ This selector will build the taskgraph and spin up a dynamically
+ created 'trychooser-like' web-page on the localhost. After a selection
+ has been made, pressing the 'Push' button will automatically push the
+ selection to try.
+ """
+ init(command_context)
+ command_context.activate_virtualenv()
+
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "auto",
+ description="Automatically determine which tasks to run. This runs the same "
+ "set of tasks that would be run on autoland. This "
+ "selector is EXPERIMENTAL.",
+ parser=get_parser("auto"),
+ virtualenv_name="try",
+)
+def try_auto(command_context, **kwargs):
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "again",
+ description="Schedule a previously generated (non try syntax) push again.",
+ parser=get_parser("again"),
+ virtualenv_name="try",
+)
+def try_again(command_context, **kwargs):
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "empty",
+ description="Push to try without scheduling any tasks.",
+ parser=get_parser("empty"),
+ virtualenv_name="try",
+)
+def try_empty(command_context, **kwargs):
+ """Push to try, running no builds or tests
+
+ This selector does not prompt you to run anything, it just pushes
+ your patches to try, running no builds or tests by default. After
+ the push finishes, you can manually add desired jobs to your push
+ via Treeherder's Add New Jobs feature, located in the per-push
+ menu.
+ """
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "syntax",
+ description="Select tasks on try using try syntax",
+ parser=get_parser("syntax"),
+ virtualenv_name="try",
+)
+def try_syntax(command_context, **kwargs):
+ """Push the current tree to try, with the specified syntax.
+
+ Build options, platforms and regression tests may be selected
+ using the usual try options (-b, -p and -u respectively). In
+ addition, tests in a given directory may be automatically
+ selected by passing that directory as a positional argument to the
+ command. For example:
+
+ mach try -b d -p linux64 dom testing/web-platform/tests/dom
+
+ would schedule a try run for linux64 debug consisting of all
+ tests under dom/ and testing/web-platform/tests/dom.
+
+ Test selection using positional arguments is available for
+ mochitests, reftests, xpcshell tests and web-platform-tests.
+
+ Tests may be also filtered by passing --tag to the command,
+ which will run only tests marked as having the specified
+ tags e.g.
+
+ mach try -b d -p win64 --tag media
+
+ would run all tests tagged 'media' on Windows 64.
+
+ If both positional arguments or tags and -u are supplied, the
+ suites in -u will be run in full. Where tests are selected by
+ positional argument they will be run in a single chunk.
+
+ If no build option is selected, both debug and opt will be
+ scheduled. If no platform is selected a default is taken from
+ the AUTOTRY_PLATFORM_HINT environment variable, if set.
+
+ The command requires either its own mercurial extension ("push-to-try",
+ installable from mach vcs-setup) or a git repo using git-cinnabar
+ (installable from mach vcs-setup).
+
+ """
+ init(command_context)
+ try:
+ if command_context.substs.get("MOZ_ARTIFACT_BUILDS"):
+ kwargs["local_artifact_build"] = True
+ except BuildEnvironmentNotFoundException:
+ # If we don't have a build locally, we can't tell whether
+ # an artifact build is desired, but we still want the
+ # command to succeed, if possible.
+ pass
+
+ config_status = os.path.join(command_context.topobjdir, "config.status")
+ if (kwargs["paths"] or kwargs["tags"]) and not config_status:
+ print(CONFIG_ENVIRONMENT_NOT_FOUND)
+ sys.exit(1)
+
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "coverage",
+ description="Select tasks on try using coverage data",
+ parser=get_parser("coverage"),
+ virtualenv_name="try",
+)
+def try_coverage(command_context, **kwargs):
+ """Select which tasks to use using coverage data."""
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "release",
+ description="Push the current tree to try, configured for a staging release.",
+ parser=get_parser("release"),
+ virtualenv_name="try",
+)
+def try_release(command_context, **kwargs):
+ """Push the current tree to try, configured for a staging release."""
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "scriptworker",
+ description="Run scriptworker tasks against a recent release.",
+ parser=get_parser("scriptworker"),
+ virtualenv_name="try",
+)
+def try_scriptworker(command_context, **kwargs):
+ """Run scriptworker tasks against a recent release.
+
+ Requires VPN and shipit access.
+ """
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "compare",
+ description="Push two try jobs, one on your current commit and another on the one you specify",
+ parser=get_parser("compare"),
+ virtualenv_name="try",
+)
+def try_compare(command_context, **kwargs):
+ init(command_context)
+ return run(command_context, **kwargs)
+
+
+@SubCommand(
+ "try",
+ "perf",
+ description="Try selector for running performance tests.",
+ parser=get_parser("perf"),
+ virtualenv_name="try",
+)
+def try_perf(command_context, **kwargs):
+ init(command_context)
+ return run(command_context, **kwargs)
diff --git a/tools/tryselect/preset.py b/tools/tryselect/preset.py
new file mode 100644
index 0000000000..dc8cba5c57
--- /dev/null
+++ b/tools/tryselect/preset.py
@@ -0,0 +1,107 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import shlex
+import subprocess
+
+import yaml
+
+
+# Ensure strings with ' like fzf query strings get double-quoted:
+def represent_str(self, data):
+ if "'" in data:
+ return self.represent_scalar("tag:yaml.org,2002:str", data, style='"')
+ return self.represent_scalar("tag:yaml.org,2002:str", data)
+
+
+yaml.SafeDumper.add_representer(str, represent_str)
+
+
+class PresetHandler:
+ def __init__(self, path):
+ self.path = path
+ self._presets = {}
+
+ @property
+ def presets(self):
+ if not self._presets and os.path.isfile(self.path):
+ with open(self.path) as fh:
+ self._presets = yaml.safe_load(fh) or {}
+
+ return self._presets
+
+ def __contains__(self, name):
+ return name in self.presets
+
+ def __getitem__(self, name):
+ return self.presets[name]
+
+ def __len__(self):
+ return len(self.presets)
+
+ def __str__(self):
+ if not self.presets:
+ return ""
+ return yaml.safe_dump(self.presets, default_flow_style=False)
+
+ def list(self):
+ if not self.presets:
+ print("no presets found")
+ else:
+ print(self)
+
+ def edit(self):
+ if "EDITOR" not in os.environ:
+ print(
+ "error: must set the $EDITOR environment variable to use --edit-presets"
+ )
+ return
+
+ subprocess.call(shlex.split(os.environ["EDITOR"]) + [self.path])
+
+ def save(self, name, **data):
+ self.presets[name] = data
+
+ with open(self.path, "w") as fh:
+ fh.write(str(self))
+
+
+class MergedHandler:
+ def __init__(self, *paths):
+ """Helper class for dealing with multiple preset files."""
+ self.handlers = [PresetHandler(p) for p in paths]
+
+ def __contains__(self, name):
+ return any(name in handler for handler in self.handlers)
+
+ def __getitem__(self, name):
+ for handler in self.handlers:
+ if name in handler:
+ return handler[name]
+ raise KeyError(name)
+
+ def __len__(self):
+ return sum(len(h) for h in self.handlers)
+
+ def __str__(self):
+ all_presets = {
+ k: v for handler in self.handlers for k, v in handler.presets.items()
+ }
+ return yaml.safe_dump(all_presets, default_flow_style=False)
+
+ def list(self):
+ if len(self) == 0:
+ print("no presets found")
+ return
+
+ for handler in self.handlers:
+ val = str(handler)
+ if val:
+ val = "\n ".join(
+ [""] + val.splitlines() + [""]
+ ) # indent all lines by 2 spaces
+ print("Presets from {}:".format(handler.path))
+ print(val)
diff --git a/tools/tryselect/push.py b/tools/tryselect/push.py
new file mode 100644
index 0000000000..cf5e646c8c
--- /dev/null
+++ b/tools/tryselect/push.py
@@ -0,0 +1,257 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+import sys
+import traceback
+
+import six
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozversioncontrol import MissingVCSExtension, get_repository_object
+
+from .lando import push_to_lando_try
+from .util.estimates import duration_summary
+from .util.manage_estimates import (
+ download_task_history_data,
+ make_trimmed_taskgraph_cache,
+)
+
+GIT_CINNABAR_NOT_FOUND = """
+Could not detect `git-cinnabar`.
+
+The `mach try` command requires git-cinnabar to be installed when
+pushing from git. Please install it by running:
+
+ $ ./mach vcs-setup
+""".lstrip()
+
+HG_PUSH_TO_TRY_NOT_FOUND = """
+Could not detect `push-to-try`.
+
+The `mach try` command requires the push-to-try extension enabled
+when pushing from hg. Please install it by running:
+
+ $ ./mach vcs-setup
+""".lstrip()
+
+VCS_NOT_FOUND = """
+Could not detect version control. Only `hg` or `git` are supported.
+""".strip()
+
+UNCOMMITTED_CHANGES = """
+ERROR please commit changes before continuing
+""".strip()
+
+MAX_HISTORY = 10
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+vcs = get_repository_object(build.topsrcdir)
+
+history_path = os.path.join(
+ get_state_dir(specific_to_topsrcdir=True), "history", "try_task_configs.json"
+)
+
+
+def write_task_config(try_task_config):
+ config_path = os.path.join(vcs.path, "try_task_config.json")
+ with open(config_path, "w") as fh:
+ json.dump(try_task_config, fh, indent=4, separators=(",", ": "), sort_keys=True)
+ fh.write("\n")
+ return config_path
+
+
+def write_task_config_history(msg, try_task_config):
+ if not os.path.isfile(history_path):
+ if not os.path.isdir(os.path.dirname(history_path)):
+ os.makedirs(os.path.dirname(history_path))
+ history = []
+ else:
+ with open(history_path) as fh:
+ history = fh.read().strip().splitlines()
+
+ history.insert(0, json.dumps([msg, try_task_config]))
+ history = history[:MAX_HISTORY]
+ with open(history_path, "w") as fh:
+ fh.write("\n".join(history))
+
+
+def check_working_directory(push=True):
+ if not push:
+ return
+
+ if not vcs.working_directory_clean():
+ print(UNCOMMITTED_CHANGES)
+ sys.exit(1)
+
+
+def generate_try_task_config(method, labels, params=None, routes=None):
+ params = params or {}
+
+ # The user has explicitly requested a set of jobs, so run them all
+ # regardless of optimization (unless the selector explicitly sets this to
+ # True). Their dependencies can be optimized though.
+ params.setdefault("optimize_target_tasks", False)
+
+ # Remove selected labels from 'existing_tasks' parameter if present
+ if "existing_tasks" in params:
+ params["existing_tasks"] = {
+ label: tid
+ for label, tid in params["existing_tasks"].items()
+ if label not in labels
+ }
+
+ try_config = params.setdefault("try_task_config", {})
+ try_config.setdefault("env", {})["TRY_SELECTOR"] = method
+
+ try_config["tasks"] = sorted(labels)
+
+ if routes:
+ try_config["routes"] = routes
+
+ try_task_config = {"version": 2, "parameters": params}
+ return try_task_config
+
+
+def task_labels_from_try_config(try_task_config):
+ if try_task_config["version"] == 2:
+ parameters = try_task_config.get("parameters", {})
+ if "try_task_config" in parameters:
+ return parameters["try_task_config"]["tasks"]
+ else:
+ return None
+ elif try_task_config["version"] == 1:
+ return try_task_config.get("tasks", list())
+ else:
+ return None
+
+
+def display_push_estimates(try_task_config):
+ task_labels = task_labels_from_try_config(try_task_config)
+ if task_labels is None:
+ return
+
+ cache_dir = os.path.join(
+ get_state_dir(specific_to_topsrcdir=True), "cache", "taskgraph"
+ )
+
+ graph_cache = None
+ dep_cache = None
+ target_file = None
+ for graph_cache_file in ["target_task_graph", "full_task_graph"]:
+ graph_cache = os.path.join(cache_dir, graph_cache_file)
+ if os.path.isfile(graph_cache):
+ dep_cache = graph_cache.replace("task_graph", "task_dependencies")
+ target_file = graph_cache.replace("task_graph", "task_set")
+ break
+
+ if not dep_cache:
+ return
+
+ download_task_history_data(cache_dir=cache_dir)
+ make_trimmed_taskgraph_cache(graph_cache, dep_cache, target_file=target_file)
+
+ durations = duration_summary(dep_cache, task_labels, cache_dir)
+
+ print(
+ "estimates: Runs {} tasks ({} selected, {} dependencies)".format(
+ durations["dependency_count"] + durations["selected_count"],
+ durations["selected_count"],
+ durations["dependency_count"],
+ )
+ )
+ print(
+ "estimates: Total task duration {}".format(
+ durations["dependency_duration"] + durations["selected_duration"]
+ )
+ )
+ if "percentile" in durations:
+ percentile = durations["percentile"]
+ if percentile > 50:
+ print("estimates: In the longest {}% of durations".format(100 - percentile))
+ else:
+ print("estimates: In the shortest {}% of durations".format(percentile))
+ print(
+ "estimates: Should take about {} (Finished around {})".format(
+ durations["wall_duration_seconds"],
+ durations["eta_datetime"].strftime("%Y-%m-%d %H:%M"),
+ )
+ )
+
+
+def push_to_try(
+ method,
+ msg,
+ try_task_config=None,
+ stage_changes=False,
+ dry_run=False,
+ closed_tree=False,
+ files_to_change=None,
+ allow_log_capture=False,
+ push_to_lando=False,
+):
+ push = not stage_changes and not dry_run
+ check_working_directory(push)
+
+ if try_task_config and method not in ("auto", "empty"):
+ try:
+ display_push_estimates(try_task_config)
+ except Exception:
+ traceback.print_exc()
+ print("warning: unable to display push estimates")
+
+ # Format the commit message
+ closed_tree_string = " ON A CLOSED TREE" if closed_tree else ""
+ commit_message = "{}{}\n\nPushed via `mach try {}`".format(
+ msg,
+ closed_tree_string,
+ method,
+ )
+
+ config_path = None
+ changed_files = []
+ if try_task_config:
+ if push and method not in ("again", "auto", "empty"):
+ write_task_config_history(msg, try_task_config)
+ config_path = write_task_config(try_task_config)
+ changed_files.append(config_path)
+
+ if (push or stage_changes) and files_to_change:
+ for path, content in files_to_change.items():
+ path = os.path.join(vcs.path, path)
+ with open(path, "wb") as fh:
+ fh.write(six.ensure_binary(content))
+ changed_files.append(path)
+
+ try:
+ if not push:
+ print("Commit message:")
+ print(commit_message)
+ if config_path:
+ print("Calculated try_task_config.json:")
+ with open(config_path) as fh:
+ print(fh.read())
+ return
+
+ vcs.add_remove_files(*changed_files)
+
+ try:
+ if push_to_lando:
+ push_to_lando_try(vcs, commit_message)
+ else:
+ vcs.push_to_try(commit_message, allow_log_capture=allow_log_capture)
+ except MissingVCSExtension as e:
+ if e.ext == "push-to-try":
+ print(HG_PUSH_TO_TRY_NOT_FOUND)
+ elif e.ext == "cinnabar":
+ print(GIT_CINNABAR_NOT_FOUND)
+ else:
+ raise
+ sys.exit(1)
+ finally:
+ if config_path and os.path.isfile(config_path):
+ os.remove(config_path)
diff --git a/tools/tryselect/selectors/__init__.py b/tools/tryselect/selectors/__init__.py
new file mode 100644
index 0000000000..c580d191c1
--- /dev/null
+++ b/tools/tryselect/selectors/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/tools/tryselect/selectors/again.py b/tools/tryselect/selectors/again.py
new file mode 100644
index 0000000000..434aed7cc1
--- /dev/null
+++ b/tools/tryselect/selectors/again.py
@@ -0,0 +1,151 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+
+from ..cli import BaseTryParser
+from ..push import history_path, push_to_try
+
+
+class AgainParser(BaseTryParser):
+ name = "again"
+ arguments = [
+ [
+ ["--index"],
+ {
+ "default": 0,
+ "const": "list",
+ "nargs": "?",
+ "help": "Index of entry in the history to re-push, "
+ "where '0' is the most recent (default 0). "
+ "Use --index without a value to display indices.",
+ },
+ ],
+ [
+ ["--list"],
+ {
+ "default": False,
+ "action": "store_true",
+ "dest": "list_configs",
+ "help": "Display history and exit",
+ },
+ ],
+ [
+ ["--list-tasks"],
+ {
+ "default": 0,
+ "action": "count",
+ "dest": "list_tasks",
+ "help": "Like --list, but display selected tasks "
+ "for each history entry, up to 10. Repeat "
+ "to display all selected tasks.",
+ },
+ ],
+ [
+ ["--purge"],
+ {
+ "default": False,
+ "action": "store_true",
+ "help": "Remove all history and exit",
+ },
+ ],
+ ]
+ common_groups = ["push"]
+
+
+def run(
+ index=0, purge=False, list_configs=False, list_tasks=0, message="{msg}", **pushargs
+):
+ if index == "list":
+ list_configs = True
+ else:
+ try:
+ index = int(index)
+ except ValueError:
+ print("error: '--index' must be an integer")
+ return 1
+
+ if purge:
+ os.remove(history_path)
+ return
+
+ if not os.path.isfile(history_path):
+ print("error: history file not found: {}".format(history_path))
+ return 1
+
+ with open(history_path) as fh:
+ history = fh.readlines()
+
+ if list_configs or list_tasks > 0:
+ for i, data in enumerate(history):
+ msg, config = json.loads(data)
+ version = config.get("version", "1")
+ settings = {}
+ if version == 1:
+ tasks = config["tasks"]
+ settings = config
+ elif version == 2:
+ try_config = config.get("parameters", {}).get("try_task_config", {})
+ tasks = try_config.get("tasks")
+ else:
+ tasks = None
+
+ if tasks is not None:
+ # Select only the things that are of interest to display.
+ settings = settings.copy()
+ env = settings.pop("env", {}).copy()
+ env.pop("TRY_SELECTOR", None)
+ for name in ("tasks", "version"):
+ settings.pop(name, None)
+
+ def pluralize(n, noun):
+ return "{n} {noun}{s}".format(
+ n=n, noun=noun, s="" if n == 1 else "s"
+ )
+
+ out = str(i) + ". (" + pluralize(len(tasks), "task")
+ if env:
+ out += ", " + pluralize(len(env), "env var")
+ if settings:
+ out += ", " + pluralize(len(settings), "setting")
+ out += ") " + msg
+ print(out)
+
+ if list_tasks > 0:
+ indent = " " * 4
+ if list_tasks > 1:
+ shown_tasks = tasks
+ else:
+ shown_tasks = tasks[:10]
+ print(indent + ("\n" + indent).join(shown_tasks))
+
+ num_hidden_tasks = len(tasks) - len(shown_tasks)
+ if num_hidden_tasks > 0:
+ print("{}... and {} more".format(indent, num_hidden_tasks))
+
+ if list_tasks and env:
+ for line in ("env: " + json.dumps(env, indent=2)).splitlines():
+ print(" " + line)
+
+ if list_tasks and settings:
+ for line in (
+ "settings: " + json.dumps(settings, indent=2)
+ ).splitlines():
+ print(" " + line)
+ else:
+ print(
+ "{index}. {msg}".format(
+ index=i,
+ msg=msg,
+ )
+ )
+
+ return
+
+ msg, try_task_config = json.loads(history[index])
+ return push_to_try(
+ "again", message.format(msg=msg), try_task_config=try_task_config, **pushargs
+ )
diff --git a/tools/tryselect/selectors/auto.py b/tools/tryselect/selectors/auto.py
new file mode 100644
index 0000000000..e7cc6c508c
--- /dev/null
+++ b/tools/tryselect/selectors/auto.py
@@ -0,0 +1,118 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from taskgraph.util.python_path import find_object
+
+from ..cli import BaseTryParser
+from ..push import push_to_try
+from ..util.dicttools import merge
+
+TRY_AUTO_PARAMETERS = {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium", # noqa
+ "optimize_target_tasks": True,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {},
+}
+
+
+class AutoParser(BaseTryParser):
+ name = "auto"
+ common_groups = ["push"]
+ task_configs = [
+ "artifact",
+ "env",
+ "chemspill-prio",
+ "disable-pgo",
+ "worker-overrides",
+ ]
+ arguments = [
+ [
+ ["--strategy"],
+ {
+ "default": None,
+ "help": "Override the default optimization strategy. Valid values "
+ "are the experimental strategies defined at the bottom of "
+ "`taskcluster/gecko_taskgraph/optimize/__init__.py`.",
+ },
+ ],
+ [
+ ["--tasks-regex"],
+ {
+ "default": [],
+ "action": "append",
+ "help": "Apply a regex filter to the tasks selected. Specifying "
+ "multiple times schedules the union of computed tasks.",
+ },
+ ],
+ [
+ ["--tasks-regex-exclude"],
+ {
+ "default": [],
+ "action": "append",
+ "help": "Apply a regex filter to the tasks selected. Specifying "
+ "multiple times excludes computed tasks matching any regex.",
+ },
+ ],
+ ]
+
+ def validate(self, args):
+ super().validate(args)
+
+ if args.strategy:
+ if ":" not in args.strategy:
+ args.strategy = "gecko_taskgraph.optimize:tryselect.{}".format(
+ args.strategy
+ )
+
+ try:
+ obj = find_object(args.strategy)
+ except (ImportError, AttributeError):
+ self.error("invalid module path '{}'".format(args.strategy))
+
+ if not isinstance(obj, dict):
+ self.error("object at '{}' must be a dict".format(args.strategy))
+
+
+def run(
+ message="{msg}",
+ stage_changes=False,
+ dry_run=False,
+ closed_tree=False,
+ strategy=None,
+ tasks_regex=None,
+ tasks_regex_exclude=None,
+ try_config_params=None,
+ push_to_lando=False,
+ **ignored
+):
+ msg = message.format(msg="Tasks automatically selected.")
+
+ params = TRY_AUTO_PARAMETERS.copy()
+ if try_config_params:
+ params = merge(params, try_config_params)
+
+ if strategy:
+ params["optimize_strategies"] = strategy
+
+ if tasks_regex or tasks_regex_exclude:
+ params.setdefault("try_task_config", {})["tasks-regex"] = {}
+ params["try_task_config"]["tasks-regex"]["include"] = tasks_regex
+ params["try_task_config"]["tasks-regex"]["exclude"] = tasks_regex_exclude
+
+ task_config = {
+ "version": 2,
+ "parameters": params,
+ }
+ return push_to_try(
+ "auto",
+ msg,
+ try_task_config=task_config,
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/chooser/.eslintrc.js b/tools/tryselect/selectors/chooser/.eslintrc.js
new file mode 100644
index 0000000000..861d6bafc2
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/.eslintrc.js
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+module.exports = {
+ env: {
+ jquery: true,
+ },
+ globals: {
+ apply: true,
+ applyChunks: true,
+ tasks: true,
+ },
+};
diff --git a/tools/tryselect/selectors/chooser/__init__.py b/tools/tryselect/selectors/chooser/__init__.py
new file mode 100644
index 0000000000..d6a32e08d0
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/__init__.py
@@ -0,0 +1,120 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import multiprocessing
+import os
+import time
+import webbrowser
+from threading import Timer
+
+from gecko_taskgraph.target_tasks import filter_by_uncommon_try_tasks
+
+from tryselect.cli import BaseTryParser
+from tryselect.push import (
+ check_working_directory,
+ generate_try_task_config,
+ push_to_try,
+)
+from tryselect.tasks import generate_tasks
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+class ChooserParser(BaseTryParser):
+ name = "chooser"
+ arguments = []
+ common_groups = ["push", "task"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "chemspill-prio",
+ "disable-pgo",
+ "env",
+ "existing-tasks",
+ "gecko-profile",
+ "path",
+ "pernosco",
+ "rebuild",
+ "worker-overrides",
+ ]
+
+
+def run(
+ update=False,
+ query=None,
+ try_config_params=None,
+ full=False,
+ parameters=None,
+ save=False,
+ preset=None,
+ mod_presets=False,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ from .app import create_application
+
+ push = not stage_changes and not dry_run
+ check_working_directory(push)
+
+ tg = generate_tasks(parameters, full)
+
+ # Remove tasks that are not to be shown unless `--full` is specified.
+ if not full:
+ excluded_tasks = [
+ label
+ for label in tg.tasks.keys()
+ if not filter_by_uncommon_try_tasks(label)
+ ]
+ for task in excluded_tasks:
+ tg.tasks.pop(task)
+
+ queue = multiprocessing.Queue()
+
+ if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
+ # we are in the reloader process, don't open the browser or do any try stuff
+ app = create_application(tg, queue)
+ app.run()
+ return
+
+ # give app a second to start before opening the browser
+ url = "http://127.0.0.1:5000"
+ Timer(1, lambda: webbrowser.open(url)).start()
+ print("Starting trychooser on {}".format(url))
+ process = multiprocessing.Process(
+ target=create_and_run_application, args=(tg, queue)
+ )
+ process.start()
+
+ selected = queue.get()
+
+ # Allow the close page to render before terminating the process.
+ time.sleep(1)
+ process.terminate()
+ if not selected:
+ print("no tasks selected")
+ return
+
+ msg = "Try Chooser Enhanced ({} tasks selected)".format(len(selected))
+ return push_to_try(
+ "chooser",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config(
+ "chooser", selected, params=try_config_params
+ ),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
+
+
+def create_and_run_application(tg, queue: multiprocessing.Queue):
+ from .app import create_application
+
+ app = create_application(tg, queue)
+
+ app.run()
diff --git a/tools/tryselect/selectors/chooser/app.py b/tools/tryselect/selectors/chooser/app.py
new file mode 100644
index 0000000000..99d63cd37f
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/app.py
@@ -0,0 +1,176 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import multiprocessing
+from abc import ABCMeta, abstractproperty
+from collections import defaultdict
+
+from flask import Flask, render_template, request
+
+SECTIONS = []
+SUPPORTED_KINDS = set()
+
+
+def register_section(cls):
+ assert issubclass(cls, Section)
+ instance = cls()
+ SECTIONS.append(instance)
+ SUPPORTED_KINDS.update(instance.kind.split(","))
+
+
+class Section(object):
+ __metaclass__ = ABCMeta
+
+ @abstractproperty
+ def name(self):
+ pass
+
+ @abstractproperty
+ def kind(self):
+ pass
+
+ @abstractproperty
+ def title(self):
+ pass
+
+ @abstractproperty
+ def attrs(self):
+ pass
+
+ def contains(self, task):
+ return task.kind in self.kind.split(",")
+
+ def get_context(self, tasks):
+ labels = defaultdict(lambda: {"max_chunk": 0, "attrs": defaultdict(list)})
+
+ for task in tasks.values():
+ if not self.contains(task):
+ continue
+
+ task = task.attributes
+ label = labels[self.labelfn(task)]
+ for attr in self.attrs:
+ if attr in task and task[attr] not in label["attrs"][attr]:
+ label["attrs"][attr].append(task[attr])
+
+ if "test_chunk" in task:
+ label["max_chunk"] = max(
+ label["max_chunk"], int(task["test_chunk"])
+ )
+
+ return {
+ "name": self.name,
+ "kind": self.kind,
+ "title": self.title,
+ "labels": labels,
+ }
+
+
+@register_section
+class Platform(Section):
+ name = "platform"
+ kind = "build"
+ title = "Platforms"
+ attrs = ["build_platform"]
+
+ def labelfn(self, task):
+ return task["build_platform"]
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+
+ # android-stuff tasks aren't actual platforms
+ return task.task["tags"].get("android-stuff", False) != "true"
+
+
+@register_section
+class Test(Section):
+ name = "test"
+ kind = "test"
+ title = "Test Suites"
+ attrs = ["unittest_suite"]
+
+ def labelfn(self, task):
+ suite = task["unittest_suite"].replace(" ", "-")
+
+ if suite.endswith("-chunked"):
+ suite = suite[: -len("-chunked")]
+
+ return suite
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+ return task.attributes["unittest_suite"] not in ("raptor", "talos")
+
+
+@register_section
+class Perf(Section):
+ name = "perf"
+ kind = "test"
+ title = "Performance"
+ attrs = ["unittest_suite", "raptor_try_name", "talos_try_name"]
+
+ def labelfn(self, task):
+ suite = task["unittest_suite"]
+ label = task["{}_try_name".format(suite)]
+
+ if not label.startswith(suite):
+ label = "{}-{}".format(suite, label)
+
+ if label.endswith("-e10s"):
+ label = label[: -len("-e10s")]
+
+ return label
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+ return task.attributes["unittest_suite"] in ("raptor", "talos")
+
+
+@register_section
+class Analysis(Section):
+ name = "analysis"
+ kind = "build,static-analysis-autotest,hazard"
+ title = "Analysis"
+ attrs = ["build_platform"]
+
+ def labelfn(self, task):
+ return task["build_platform"]
+
+ def contains(self, task):
+ if not Section.contains(self, task):
+ return False
+ if task.kind == "build":
+ return task.task["tags"].get("android-stuff", False) == "true"
+ return True
+
+
+def create_application(tg, queue: multiprocessing.Queue):
+ tasks = {l: t for l, t in tg.tasks.items() if t.kind in SUPPORTED_KINDS}
+ sections = [s.get_context(tasks) for s in SECTIONS]
+ context = {
+ "tasks": {l: t.attributes for l, t in tasks.items()},
+ "sections": sections,
+ }
+
+ app = Flask(__name__)
+ app.env = "development"
+ app.tasks = []
+
+ @app.route("/", methods=["GET", "POST"])
+ def chooser():
+ if request.method == "GET":
+ return render_template("chooser.html", **context)
+
+ if request.form["action"] == "Push":
+ labels = request.form["selected-tasks"].splitlines()
+ app.tasks.extend(labels)
+
+ queue.put(app.tasks)
+ return render_template("close.html")
+
+ return app
diff --git a/tools/tryselect/selectors/chooser/static/filter.js b/tools/tryselect/selectors/chooser/static/filter.js
new file mode 100644
index 0000000000..2d8731e61f
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/static/filter.js
@@ -0,0 +1,116 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const selection = $("#selection")[0];
+const count = $("#selection-count")[0];
+const pluralize = (count, noun, suffix = "s") =>
+ `${count} ${noun}${count !== 1 ? suffix : ""}`;
+
+var selected = [];
+
+var updateLabels = () => {
+ $(".tab-pane.active > .filter-label").each(function (index) {
+ let box = $("#" + this.htmlFor)[0];
+ let method = box.checked ? "add" : "remove";
+ $(this)[method + "Class"]("is-checked");
+ });
+};
+
+var apply = () => {
+ let filters = {};
+ let kinds = [];
+
+ $(".filter:checked").each(function (index) {
+ for (let kind of this.name.split(",")) {
+ if (!kinds.includes(kind)) {
+ kinds.push(kind);
+ }
+ }
+
+ // Checkbox element values are generated by Section.get_context() in app.py
+ let attrs = JSON.parse(this.value);
+ for (let attr in attrs) {
+ if (!(attr in filters)) {
+ filters[attr] = [];
+ }
+
+ let values = attrs[attr];
+ filters[attr] = filters[attr].concat(values);
+ }
+ });
+ updateLabels();
+
+ if (
+ !Object.keys(filters).length ||
+ (Object.keys(filters).length == 1 && "build_type" in filters)
+ ) {
+ selection.value = "";
+ count.innerHTML = "0 tasks selected";
+ return;
+ }
+
+ var taskMatches = label => {
+ let task = tasks[label];
+
+ // If no box for the given kind has been checked, this task is
+ // automatically not selected.
+ if (!kinds.includes(task.kind)) {
+ return false;
+ }
+
+ for (let attr in filters) {
+ let values = filters[attr];
+ if (!(attr in task) || values.includes(task[attr])) {
+ continue;
+ }
+ return false;
+ }
+ return true;
+ };
+
+ selected = Object.keys(tasks).filter(taskMatches);
+ applyChunks();
+};
+
+var applyChunks = () => {
+ // For tasks that have a chunk filter applied, we handle that here.
+ let filters = {};
+ $(".filter:text").each(function (index) {
+ let value = $(this).val();
+ if (value === "") {
+ return;
+ }
+
+ let attrs = JSON.parse(this.name);
+ let key = `${attrs.unittest_suite}-${attrs.unittest_flavor}`;
+ if (!(key in filters)) {
+ filters[key] = [];
+ }
+
+ // Parse the chunk strings. These are formatted like printer page setups, e.g: "1,4-6,9"
+ for (let item of value.split(",")) {
+ if (!item.includes("-")) {
+ filters[key].push(parseInt(item));
+ continue;
+ }
+
+ let [start, end] = item.split("-");
+ for (let i = parseInt(start); i <= parseInt(end); ++i) {
+ filters[key].push(i);
+ }
+ }
+ });
+
+ let chunked = selected.filter(function (label) {
+ let task = tasks[label];
+ let key = task.unittest_suite + "-" + task.unittest_flavor;
+ if (key in filters && !filters[key].includes(parseInt(task.test_chunk))) {
+ return false;
+ }
+ return true;
+ });
+
+ selection.value = chunked.join("\n");
+ count.innerText = pluralize(chunked.length, "task") + " selected";
+};
diff --git a/tools/tryselect/selectors/chooser/static/select.js b/tools/tryselect/selectors/chooser/static/select.js
new file mode 100644
index 0000000000..8a315c0a52
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/static/select.js
@@ -0,0 +1,46 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const labels = $("label.multiselect");
+const boxes = $("label.multiselect input:checkbox");
+var lastChecked = {};
+
+// implements shift+click
+labels.click(function (e) {
+ if (e.target.tagName === "INPUT") {
+ return;
+ }
+
+ let box = $("#" + this.htmlFor)[0];
+ let activeSection = $("div.tab-pane.active")[0].id;
+
+ if (activeSection in lastChecked) {
+ // Bug 559506 - In Firefox shift/ctrl/alt+clicking a label doesn't check the box.
+ let isFirefox = navigator.userAgent.toLowerCase().indexOf("firefox") > -1;
+
+ if (e.shiftKey) {
+ if (isFirefox) {
+ box.checked = !box.checked;
+ }
+
+ let start = boxes.index(box);
+ let end = boxes.index(lastChecked[activeSection]);
+
+ boxes
+ .slice(Math.min(start, end), Math.max(start, end) + 1)
+ .prop("checked", box.checked);
+ apply();
+ }
+ }
+
+ lastChecked[activeSection] = box;
+});
+
+function selectAll(btn) {
+ let checked = !!btn.value;
+ $("div.active label.filter-label").each(function (index) {
+ $(this).find("input:checkbox")[0].checked = checked;
+ });
+ apply();
+}
diff --git a/tools/tryselect/selectors/chooser/static/style.css b/tools/tryselect/selectors/chooser/static/style.css
new file mode 100644
index 0000000000..6b2f96935b
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/static/style.css
@@ -0,0 +1,107 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+body {
+ padding-top: 70px;
+}
+
+/* Tabs */
+
+#tabbar .nav-link {
+ color: #009570;
+ font-size: 18px;
+ padding-bottom: 15px;
+ padding-top: 15px;
+}
+
+#tabbar .nav-link.active {
+ color: #212529;
+}
+
+#tabbar .nav-link:hover {
+ color: #0f5a3a;
+}
+
+/* Sections */
+
+.tab-content button {
+ font-size: 14px;
+ margin-bottom: 5px;
+ margin-top: 10px;
+}
+
+.filter-label {
+ display: block;
+ font-size: 16px;
+ position: relative;
+ padding-left: 15px;
+ padding-right: 15px;
+ padding-top: 10px;
+ padding-bottom: 10px;
+ margin-bottom: 0;
+ user-select: none;
+ vertical-align: middle;
+}
+
+.filter-label span {
+ display: flex;
+ min-height: 34px;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.filter-label input[type="checkbox"] {
+ position: absolute;
+ opacity: 0;
+ height: 0;
+ width: 0;
+}
+
+.filter-label input[type="text"] {
+ width: 50px;
+}
+
+.filter-label:hover {
+ background-color: #91a0b0;
+}
+
+.filter-label.is-checked:hover {
+ background-color: #91a0b0;
+}
+
+.filter-label.is-checked {
+ background-color: #404c59;
+ color: white;
+}
+
+/* Preview pane */
+
+#preview {
+ position: fixed;
+ height: 100vh;
+ margin-left: 66%;
+ width: 100%;
+}
+
+#submit-tasks {
+ display: flex;
+ flex-direction: column;
+ height: 80%;
+}
+
+#buttons {
+ display: flex;
+ justify-content: space-between;
+}
+
+#push {
+ background-color: #00e9b7;
+ margin-left: 5px;
+ width: 100%;
+}
+
+#selection {
+ height: 100%;
+ width: 100%;
+}
diff --git a/tools/tryselect/selectors/chooser/templates/chooser.html b/tools/tryselect/selectors/chooser/templates/chooser.html
new file mode 100644
index 0000000000..4e009d94ac
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/templates/chooser.html
@@ -0,0 +1,78 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+{% extends 'layout.html' %}
+{% block content %}
+<div class="container-fluid">
+ <div class="row">
+ <div class="col-8">
+ <div class="form-group form-inline">
+ <span class="col-form-label col-md-2 pt-1">Build Type</span>
+ <div class="form-check form-check-inline">
+ <input id="both" class="filter form-check-input" type="radio" name="buildtype" value='{}' onchange="apply();" checked>
+ <label for="both" class="form-check-label">both</label>
+ </div>
+ {% for type in ["opt", "debug"] %}
+ <div class="form-check form-check-inline">
+ <input id="{{ type }}" class="filter form-check-input" type="radio" name="buildtype" value='{"build_type": "{{ type }}"}' onchange="apply();">
+ <label for={{ type }} class="form-check-label">{{ type }}</label>
+ </div>
+ {% endfor %}
+ </div>
+ <ul class="nav nav-tabs" id="tabbar" role="tablist">
+ {% for section in sections %}
+ <li class="nav-item">
+ {% if loop.first %}
+ <a class="nav-link active" id="{{ section.name }}-tab" data-toggle="tab" href="#{{section.name }}" role="tab" aria-controls="{{ section.name }}" aria-selected="true">{{ section.title }}</a>
+ {% else %}
+ <a class="nav-link" id="{{ section.name }}-tab" data-toggle="tab" href="#{{section.name }}" role="tab" aria-controls="{{ section.name }}" aria-selected="false">{{ section.title }}</a>
+ {% endif %}
+ </li>
+ {% endfor %}
+ </ul>
+ <div class="tab-content">
+ <button type="button" class="btn btn-secondary" value="true" onclick="selectAll(this);">Select All</button>
+ <button type="button" class="btn btn-secondary" onclick="selectAll(this);">Deselect All</button>
+ {% for section in sections %}
+ {% if loop.first %}
+ <div class="tab-pane show active" id="{{ section.name }}" role="tabpanel" aria-labelledby="{{ section.name }}-tab">
+ {% else %}
+ <div class="tab-pane" id="{{ section.name }}" role="tabpanel" aria-labelledby="{{ section.name }}-tab">
+ {% endif %}
+ {% for label, meta in section.labels|dictsort %}
+ <label class="multiselect filter-label" for={{ label }}>
+ <span>
+ {{ label }}
+ <input class="filter" type="checkbox" id={{ label }} name="{{ section.kind }}" value='{{ meta.attrs|tojson|safe }}' onchange="console.log('checkbox onchange triggered');apply();">
+ {% if meta.max_chunk > 1 %}
+ <input class="filter" type="text" pattern="[0-9][0-9,\-]*" placeholder="1-{{ meta.max_chunk }}" name='{{ meta.attrs|tojson|safe }}' oninput="applyChunks();">
+ {% endif %}
+ </span>
+ </label>
+ {% endfor %}
+ </div>
+ {% endfor %}
+ </div>
+ </div>
+ <div class="col-4" id="preview">
+ <form id="submit-tasks" action="" method="POST">
+ <textarea id="selection" name="selected-tasks" wrap="off"></textarea>
+ <span id="selection-count">0 tasks selected</span><br>
+ <span id="buttons">
+ <input id="cancel" class="btn btn-default" type="submit" name="action" value="Cancel">
+ <input id="push" class="btn btn-default" type="submit" name="action" value="Push">
+ </span>
+ </form>
+ </div>
+ </div>
+</div>
+{% endblock %}
+
+{% block scripts %}
+<script>
+ const tasks = {{ tasks|tojson|safe }};
+</script>
+<script src="{{ url_for('static', filename='filter.js') }}"></script>
+<script src="{{ url_for('static', filename='select.js') }}"></script>
+{% endblock %}
diff --git a/tools/tryselect/selectors/chooser/templates/close.html b/tools/tryselect/selectors/chooser/templates/close.html
new file mode 100644
index 0000000000..9dc0a161f3
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/templates/close.html
@@ -0,0 +1,11 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+{% extends 'layout.html' %} {% block content %}
+<div class="container-fluid">
+ <div class="alert alert-primary" role="alert">
+ You may now close this page.
+ </div>
+</div>
+{% endblock %}
diff --git a/tools/tryselect/selectors/chooser/templates/layout.html b/tools/tryselect/selectors/chooser/templates/layout.html
new file mode 100644
index 0000000000..8553ae94df
--- /dev/null
+++ b/tools/tryselect/selectors/chooser/templates/layout.html
@@ -0,0 +1,71 @@
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ - You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>Try Chooser Enhanced</title>
+ <link
+ rel="stylesheet"
+ href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css"
+ />
+ <link
+ rel="stylesheet"
+ href="{{ url_for('static', filename='style.css') }}"
+ />
+ </head>
+ <body>
+ <nav class="navbar navbar-default fixed-top navbar-dark bg-dark">
+ <div class="container-fluid">
+ <span class="navbar-brand mb-0 h1">Try Chooser Enhanced</span>
+ <button
+ class="navbar-toggler"
+ type="button"
+ data-toggle="collapse"
+ data-target="#navbarSupportedContent"
+ aria-controls="navbarSupportedContent"
+ aria-expanded="false"
+ aria-label="Toggle navigation"
+ >
+ <span class="navbar-toggler-icon"></span>
+ </button>
+ <div class="collapse navbar-collapse" id="navbarSupportedContent">
+ <ul class="navbar-nav mr-auto">
+ <li class="nav-item">
+ <a
+ class="nav-link"
+ href="https://firefox-source-docs.mozilla.org/tools/try/index.html"
+ >Documentation</a
+ >
+ </li>
+ <li class="nav-item">
+ <a
+ class="nav-link"
+ href="https://treeherder.mozilla.org/#/jobs?repo=try"
+ >Treeherder</a
+ >
+ </li>
+ </ul>
+ </div>
+ </div>
+ </nav>
+ {% block content %}{% endblock %}
+ <script
+ src="https://code.jquery.com/jquery-3.3.1.slim.min.js"
+ integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"
+ crossorigin="anonymous"
+ ></script>
+ <script
+ src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js"
+ integrity="sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49"
+ crossorigin="anonymous"
+ ></script>
+ <script
+ src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
+ integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
+ crossorigin="anonymous"
+ ></script>
+ {% block scripts %}{% endblock %}
+ </body>
+</html>
diff --git a/tools/tryselect/selectors/compare.py b/tools/tryselect/selectors/compare.py
new file mode 100644
index 0000000000..ac468e0974
--- /dev/null
+++ b/tools/tryselect/selectors/compare.py
@@ -0,0 +1,66 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+from mozbuild.base import MozbuildObject
+from mozversioncontrol import get_repository_object
+
+from tryselect.cli import BaseTryParser
+
+from .again import run as again_run
+from .fuzzy import run as fuzzy_run
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+
+
+class CompareParser(BaseTryParser):
+ name = "compare"
+ arguments = [
+ [
+ ["-cc", "--compare-commit"],
+ {
+ "default": None,
+ "help": "The commit that you want to compare your current revision with",
+ },
+ ],
+ ]
+ common_groups = ["task"]
+ task_configs = [
+ "rebuild",
+ ]
+
+ def get_revisions_to_run(vcs, compare_commit):
+ if compare_commit is None:
+ compare_commit = vcs.base_ref
+ if vcs.branch:
+ current_revision_ref = vcs.branch
+ else:
+ current_revision_ref = vcs.head_ref
+
+ return compare_commit, current_revision_ref
+
+
+def run(compare_commit=None, **kwargs):
+ vcs = get_repository_object(build.topsrcdir)
+ compare_commit, current_revision_ref = CompareParser.get_revisions_to_run(
+ vcs, compare_commit
+ )
+ print("********************************************")
+ print("* 2 commits are created with this command *")
+ print("********************************************")
+
+ try:
+ fuzzy_run(**kwargs)
+ print("********************************************")
+ print("* The base commit can be found above *")
+ print("********************************************")
+ vcs.update(compare_commit)
+ again_run()
+ print("*****************************************")
+ print("* The compare commit can be found above *")
+ print("*****************************************")
+ finally:
+ vcs.update(current_revision_ref)
diff --git a/tools/tryselect/selectors/coverage.py b/tools/tryselect/selectors/coverage.py
new file mode 100644
index 0000000000..f396e4618c
--- /dev/null
+++ b/tools/tryselect/selectors/coverage.py
@@ -0,0 +1,452 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import collections
+import datetime
+import hashlib
+import json
+import os
+import shutil
+import sqlite3
+import subprocess
+
+import requests
+import six
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozpack.files import FileFinder
+from moztest.resolve import TestResolver
+from mozversioncontrol import get_repository_object
+
+from ..cli import BaseTryParser
+from ..push import generate_try_task_config, push_to_try
+from ..tasks import filter_tasks_by_paths, generate_tasks, resolve_tests_by_suite
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = None
+vcs = None
+CHUNK_MAPPING_FILE = None
+CHUNK_MAPPING_TAG_FILE = None
+
+
+def setup_globals():
+ # Avoid incurring expensive computation on import.
+ global build, vcs, CHUNK_MAPPING_TAG_FILE, CHUNK_MAPPING_FILE
+ build = MozbuildObject.from_environment(cwd=here)
+ vcs = get_repository_object(build.topsrcdir)
+
+ root_hash = hashlib.sha256(
+ six.ensure_binary(os.path.abspath(build.topsrcdir))
+ ).hexdigest()
+ cache_dir = os.path.join(get_state_dir(), "cache", root_hash, "chunk_mapping")
+ if not os.path.isdir(cache_dir):
+ os.makedirs(cache_dir)
+ CHUNK_MAPPING_FILE = os.path.join(cache_dir, "chunk_mapping.sqlite")
+ CHUNK_MAPPING_TAG_FILE = os.path.join(cache_dir, "chunk_mapping_tag.json")
+
+
+# Maps from platform names in the chunk_mapping sqlite database to respective
+# substrings in task names.
+PLATFORM_MAP = {
+ "linux": "test-linux64/opt",
+ "windows": "test-windows10-64/opt",
+}
+
+# List of platform/build type combinations that are included in pushes by |mach try coverage|.
+OPT_TASK_PATTERNS = [
+ "macosx64/opt",
+ "windows10-64/opt",
+ "windows7-32/opt",
+ "linux64/opt",
+]
+
+
+class CoverageParser(BaseTryParser):
+ name = "coverage"
+ arguments = []
+ common_groups = ["push", "task"]
+ task_configs = [
+ "artifact",
+ "env",
+ "rebuild",
+ "chemspill-prio",
+ "disable-pgo",
+ "worker-overrides",
+ ]
+
+
+def read_test_manifests():
+ """Uses TestResolver to read all test manifests in the tree.
+
+ Returns a (tests, support_files_map) tuple that describes the tests in the tree:
+ tests - a set of test file paths
+ support_files_map - a dict that maps from each support file to a list with
+ test files that require them it
+ """
+ setup_globals()
+ test_resolver = TestResolver.from_environment(cwd=here)
+ file_finder = FileFinder(build.topsrcdir)
+ support_files_map = collections.defaultdict(list)
+ tests = set()
+
+ for test in test_resolver.resolve_tests(build.topsrcdir):
+ tests.add(test["srcdir_relpath"])
+ if "support-files" not in test:
+ continue
+
+ for support_file_pattern in test["support-files"].split():
+ # Get the pattern relative to topsrcdir.
+ if support_file_pattern.startswith("!/"):
+ support_file_pattern = support_file_pattern[2:]
+ elif support_file_pattern.startswith("/"):
+ support_file_pattern = support_file_pattern[1:]
+ else:
+ support_file_pattern = os.path.normpath(
+ os.path.join(test["dir_relpath"], support_file_pattern)
+ )
+
+ # If it doesn't have a glob, then it's a single file.
+ if "*" not in support_file_pattern:
+ # Simple case: single support file, just add it here.
+ support_files_map[support_file_pattern].append(test["srcdir_relpath"])
+ continue
+
+ for support_file, _ in file_finder.find(support_file_pattern):
+ support_files_map[support_file].append(test["srcdir_relpath"])
+
+ return tests, support_files_map
+
+
+# TODO cache the output of this function
+all_tests, all_support_files = read_test_manifests()
+
+
+def download_coverage_mapping(base_revision):
+ try:
+ with open(CHUNK_MAPPING_TAG_FILE) as f:
+ tags = json.load(f)
+ if tags["target_revision"] == base_revision:
+ return
+ else:
+ print("Base revision changed.")
+ except (OSError, ValueError):
+ print("Chunk mapping file not found.")
+
+ CHUNK_MAPPING_URL_TEMPLATE = "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/project.relman.code-coverage.production.cron.{}/artifacts/public/chunk_mapping.tar.xz" # noqa
+ JSON_PUSHES_URL_TEMPLATE = "https://hg.mozilla.org/mozilla-central/json-pushes?version=2&tipsonly=1&startdate={}" # noqa
+
+ # Get pushes from at most one month ago.
+ PUSH_HISTORY_DAYS = 30
+ delta = datetime.timedelta(days=PUSH_HISTORY_DAYS)
+ start_time = (datetime.datetime.now() - delta).strftime("%Y-%m-%d")
+ pushes_url = JSON_PUSHES_URL_TEMPLATE.format(start_time)
+ pushes_data = requests.get(pushes_url + "&tochange={}".format(base_revision)).json()
+ if "error" in pushes_data:
+ if "unknown revision" in pushes_data["error"]:
+ print(
+ "unknown revision {}, trying with latest mozilla-central".format(
+ base_revision
+ )
+ )
+ pushes_data = requests.get(pushes_url).json()
+
+ if "error" in pushes_data:
+ raise Exception(pushes_data["error"])
+
+ pushes = pushes_data["pushes"]
+
+ print("Looking for coverage data. This might take a minute or two.")
+ print("Base revision:", base_revision)
+ for push_id in sorted(pushes.keys())[::-1]:
+ rev = pushes[push_id]["changesets"][0]
+ url = CHUNK_MAPPING_URL_TEMPLATE.format(rev)
+ print("push id: {},\trevision: {}".format(push_id, rev))
+
+ r = requests.head(url)
+ if not r.ok:
+ continue
+
+ print("Chunk mapping found, downloading...")
+ r = requests.get(url, stream=True)
+
+ CHUNK_MAPPING_ARCHIVE = os.path.join(build.topsrcdir, "chunk_mapping.tar.xz")
+ with open(CHUNK_MAPPING_ARCHIVE, "wb") as f:
+ r.raw.decode_content = True
+ shutil.copyfileobj(r.raw, f)
+
+ subprocess.check_call(
+ [
+ "tar",
+ "-xJf",
+ CHUNK_MAPPING_ARCHIVE,
+ "-C",
+ os.path.dirname(CHUNK_MAPPING_FILE),
+ ]
+ )
+ os.remove(CHUNK_MAPPING_ARCHIVE)
+ assert os.path.isfile(CHUNK_MAPPING_FILE)
+ with open(CHUNK_MAPPING_TAG_FILE, "w") as f:
+ json.dump(
+ {
+ "target_revision": base_revision,
+ "chunk_mapping_revision": rev,
+ "download_date": start_time,
+ },
+ f,
+ )
+ return
+ raise Exception("Could not find suitable coverage data.")
+
+
+def is_a_test(cursor, path):
+ """Checks the all_tests global and the chunk mapping database to see if a
+ given file is a test file.
+ """
+ if path in all_tests:
+ return True
+
+ cursor.execute("SELECT COUNT(*) from chunk_to_test WHERE path=?", (path,))
+ if cursor.fetchone()[0]:
+ return True
+
+ cursor.execute("SELECT COUNT(*) from file_to_test WHERE test=?", (path,))
+ if cursor.fetchone()[0]:
+ return True
+
+ return False
+
+
+def tests_covering_file(cursor, path):
+ """Returns a set of tests that cover a given source file."""
+ cursor.execute("SELECT test FROM file_to_test WHERE source=?", (path,))
+ return {e[0] for e in cursor.fetchall()}
+
+
+def tests_in_chunk(cursor, platform, chunk):
+ """Returns a set of tests that are contained in a given chunk."""
+ cursor.execute(
+ "SELECT path FROM chunk_to_test WHERE platform=? AND chunk=?", (platform, chunk)
+ )
+ # Because of bug 1480103, some entries in this table contain both a file name and a test name,
+ # separated by a space. With the split, only the file name is kept.
+ return {e[0].split(" ")[0] for e in cursor.fetchall()}
+
+
+def chunks_covering_file(cursor, path):
+ """Returns a set of (platform, chunk) tuples with the chunks that cover a given source file."""
+ cursor.execute("SELECT platform, chunk FROM file_to_chunk WHERE path=?", (path,))
+ return set(cursor.fetchall())
+
+
+def tests_supported_by_file(path):
+ """Returns a set of tests that are using the given file as a support-file."""
+ return set(all_support_files[path])
+
+
+def find_tests(changed_files):
+ """Finds both individual tests and test chunks that should be run to test code changes.
+ Argument: a list of file paths relative to the source checkout.
+
+ Returns: a (test_files, test_chunks) tuple with two sets.
+ test_files - contains tests that should be run to verify changes to changed_files.
+ test_chunks - contains (platform, chunk) tuples with chunks that should be
+ run. These chunnks do not support running a subset of the tests (like
+ cppunit or gtest), so the whole chunk must be run.
+ """
+ test_files = set()
+ test_chunks = set()
+ files_no_coverage = set()
+
+ with sqlite3.connect(CHUNK_MAPPING_FILE) as conn:
+ c = conn.cursor()
+ for path in changed_files:
+ # If path is a test, add it to the list and continue.
+ if is_a_test(c, path):
+ test_files.add(path)
+ continue
+
+ # Look at the chunk mapping and add all tests that cover this file.
+ tests = tests_covering_file(c, path)
+ chunks = chunks_covering_file(c, path)
+ # If we found tests covering this, then it's not a support-file, so
+ # save these and continue.
+ if tests or chunks:
+ test_files |= tests
+ test_chunks |= chunks
+ continue
+
+ # Check if the path is a support-file for any test, by querying test manifests.
+ tests = tests_supported_by_file(path)
+ if tests:
+ test_files |= tests
+ continue
+
+ # There is no coverage information for this file.
+ files_no_coverage.add(path)
+
+ files_covered = set(changed_files) - files_no_coverage
+ test_files = {s.replace("\\", "/") for s in test_files}
+
+ _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks)
+
+ remaining_test_chunks = set()
+ # For all test_chunks, try to find the tests contained by them in the
+ # chunk_to_test mapping.
+ for platform, chunk in test_chunks:
+ tests = tests_in_chunk(c, platform, chunk)
+ if tests:
+ for test in tests:
+ test_files.add(test.replace("\\", "/"))
+ else:
+ remaining_test_chunks.add((platform, chunk))
+
+ return test_files, remaining_test_chunks
+
+
+def _print_found_tests(files_covered, files_no_coverage, test_files, test_chunks):
+ """Print a summary of what will be run to the user's terminal."""
+ files_covered = sorted(files_covered)
+ files_no_coverage = sorted(files_no_coverage)
+ test_files = sorted(test_files)
+ test_chunks = sorted(test_chunks)
+
+ if files_covered:
+ print(
+ "Found {} modified source files with test coverage:".format(
+ len(files_covered)
+ )
+ )
+ for covered in files_covered:
+ print("\t", covered)
+
+ if files_no_coverage:
+ print(
+ "Found {} modified source files with no coverage:".format(
+ len(files_no_coverage)
+ )
+ )
+ for f in files_no_coverage:
+ print("\t", f)
+
+ if not files_covered:
+ print("No modified source files are covered by tests.")
+ elif not files_no_coverage:
+ print("All modified source files are covered by tests.")
+
+ if test_files:
+ print("Running {} individual test files.".format(len(test_files)))
+ else:
+ print("Could not find any individual tests to run.")
+
+ if test_chunks:
+ print("Running {} test chunks.".format(len(test_chunks)))
+ for platform, chunk in test_chunks:
+ print("\t", platform, chunk)
+ else:
+ print("Could not find any test chunks to run.")
+
+
+def filter_tasks_by_chunks(tasks, chunks):
+ """Find all tasks that will run the given chunks."""
+ selected_tasks = set()
+ for platform, chunk in chunks:
+ platform = PLATFORM_MAP[platform]
+
+ selected_task = None
+ for task in tasks.keys():
+ if not task.startswith(platform):
+ continue
+
+ if not any(
+ task[len(platform) + 1 :].endswith(c) for c in [chunk, chunk + "-e10s"]
+ ):
+ continue
+
+ assert (
+ selected_task is None
+ ), "Only one task should be selected for a given platform-chunk couple ({} - {}), {} and {} were selected".format( # noqa
+ platform, chunk, selected_task, task
+ )
+ selected_task = task
+
+ if selected_task is None:
+ print("Warning: no task found for chunk", platform, chunk)
+ else:
+ selected_tasks.add(selected_task)
+
+ return list(selected_tasks)
+
+
+def is_opt_task(task):
+ """True if the task runs on a supported platform and build type combination.
+ This is used to remove -ccov/asan/pgo tasks, along with all /debug tasks.
+ """
+ return any(platform in task for platform in OPT_TASK_PATTERNS)
+
+
+def run(
+ try_config_params={},
+ full=False,
+ parameters=None,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ setup_globals()
+ download_coverage_mapping(vcs.base_ref)
+
+ changed_sources = vcs.get_outgoing_files()
+ test_files, test_chunks = find_tests(changed_sources)
+ if not test_files and not test_chunks:
+ print("ERROR Could not find any tests or chunks to run.")
+ return 1
+
+ tg = generate_tasks(parameters, full)
+ all_tasks = tg.tasks
+
+ tasks_by_chunks = filter_tasks_by_chunks(all_tasks, test_chunks)
+ tasks_by_path = filter_tasks_by_paths(all_tasks, test_files)
+ tasks = filter(is_opt_task, set(tasks_by_path) | set(tasks_by_chunks))
+ tasks = list(tasks)
+
+ if not tasks:
+ print("ERROR Did not find any matching tasks after filtering.")
+ return 1
+ test_count_message = (
+ "{test_count} test file{test_plural} that "
+ + "cover{test_singular} these changes "
+ + "({task_count} task{task_plural} to be scheduled)"
+ ).format(
+ test_count=len(test_files),
+ test_plural="" if len(test_files) == 1 else "s",
+ test_singular="s" if len(test_files) == 1 else "",
+ task_count=len(tasks),
+ task_plural="" if len(tasks) == 1 else "s",
+ )
+ print("Found " + test_count_message)
+
+ # Set the test paths to be run by setting MOZHARNESS_TEST_PATHS.
+ path_env = {
+ "MOZHARNESS_TEST_PATHS": six.ensure_text(
+ json.dumps(resolve_tests_by_suite(test_files))
+ )
+ }
+ try_config_params.setdefault("try_task_config", {}).setdefault("env", {}).update(
+ path_env
+ )
+
+ # Build commit message.
+ msg = "try coverage - " + test_count_message
+ return push_to_try(
+ "coverage",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config("coverage", tasks, try_config_params),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/empty.py b/tools/tryselect/selectors/empty.py
new file mode 100644
index 0000000000..15a48fa5d2
--- /dev/null
+++ b/tools/tryselect/selectors/empty.py
@@ -0,0 +1,43 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from ..cli import BaseTryParser
+from ..push import generate_try_task_config, push_to_try
+
+
+class EmptyParser(BaseTryParser):
+ name = "empty"
+ common_groups = ["push"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "chemspill-prio",
+ "disable-pgo",
+ "env",
+ "gecko-profile",
+ "pernosco",
+ "routes",
+ "worker-overrides",
+ ]
+
+
+def run(
+ message="{msg}",
+ try_config_params=None,
+ stage_changes=False,
+ dry_run=False,
+ closed_tree=False,
+ push_to_lando=False,
+):
+ msg = 'No try selector specified, use "Add New Jobs" to select tasks.'
+ return push_to_try(
+ "empty",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config("empty", [], params=try_config_params),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/fuzzy.py b/tools/tryselect/selectors/fuzzy.py
new file mode 100644
index 0000000000..7a9bccc4b7
--- /dev/null
+++ b/tools/tryselect/selectors/fuzzy.py
@@ -0,0 +1,284 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import sys
+from pathlib import PurePath
+
+from gecko_taskgraph.target_tasks import filter_by_uncommon_try_tasks
+from mach.util import get_state_dir
+
+from ..cli import BaseTryParser
+from ..push import check_working_directory, generate_try_task_config, push_to_try
+from ..tasks import filter_tasks_by_paths, generate_tasks
+from ..util.fzf import (
+ FZF_NOT_FOUND,
+ PREVIEW_SCRIPT,
+ format_header,
+ fzf_bootstrap,
+ fzf_shortcuts,
+ run_fzf,
+)
+from ..util.manage_estimates import (
+ download_task_history_data,
+ make_trimmed_taskgraph_cache,
+)
+
+
+class FuzzyParser(BaseTryParser):
+ name = "fuzzy"
+ arguments = [
+ [
+ ["-q", "--query"],
+ {
+ "metavar": "STR",
+ "action": "append",
+ "default": [],
+ "help": "Use the given query instead of entering the selection "
+ "interface. Equivalent to typing <query><ctrl-a><enter> "
+ "from the interface. Specifying multiple times schedules "
+ "the union of computed tasks.",
+ },
+ ],
+ [
+ ["-i", "--interactive"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Force running fzf interactively even when using presets or "
+ "queries with -q/--query.",
+ },
+ ],
+ [
+ ["-x", "--and"],
+ {
+ "dest": "intersection",
+ "action": "store_true",
+ "default": False,
+ "help": "When specifying queries on the command line with -q/--query, "
+ "use the intersection of tasks rather than the union. This is "
+ "especially useful for post filtering presets.",
+ },
+ ],
+ [
+ ["-e", "--exact"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Enable exact match mode. Terms will use an exact match "
+ "by default, and terms prefixed with ' will become fuzzy.",
+ },
+ ],
+ [
+ ["-u", "--update"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Update fzf before running.",
+ },
+ ],
+ [
+ ["-s", "--show-estimates"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show task duration estimates.",
+ },
+ ],
+ [
+ ["--disable-target-task-filter"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Some tasks run on mozilla-central but are filtered out "
+ "of the default list due to resource constraints. This flag "
+ "disables this filtering.",
+ },
+ ],
+ [
+ ["--show-chunk-numbers"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Chunk numbers are hidden to simplify the selection. This flag "
+ "makes them appear again.",
+ },
+ ],
+ ]
+ common_groups = ["push", "task", "preset"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "chemspill-prio",
+ "disable-pgo",
+ "env",
+ "existing-tasks",
+ "gecko-profile",
+ "new-test-config",
+ "path",
+ "pernosco",
+ "rebuild",
+ "routes",
+ "worker-overrides",
+ ]
+
+
+def run(
+ update=False,
+ query=None,
+ intersect_query=None,
+ full=False,
+ parameters=None,
+ try_config_params=None,
+ save_query=False,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ test_paths=None,
+ exact=False,
+ closed_tree=False,
+ show_estimates=False,
+ disable_target_task_filter=False,
+ push_to_lando=False,
+ show_chunk_numbers=False,
+ new_test_config=False,
+):
+ fzf = fzf_bootstrap(update)
+
+ if not fzf:
+ print(FZF_NOT_FOUND)
+ return 1
+
+ push = not stage_changes and not dry_run
+ check_working_directory(push)
+ tg = generate_tasks(
+ parameters, full=full, disable_target_task_filter=disable_target_task_filter
+ )
+ all_tasks = tg.tasks
+
+ # graph_Cache created by generate_tasks, recreate the path to that file.
+ cache_dir = os.path.join(
+ get_state_dir(specific_to_topsrcdir=True), "cache", "taskgraph"
+ )
+ if full:
+ graph_cache = os.path.join(cache_dir, "full_task_graph")
+ dep_cache = os.path.join(cache_dir, "full_task_dependencies")
+ target_set = os.path.join(cache_dir, "full_task_set")
+ else:
+ graph_cache = os.path.join(cache_dir, "target_task_graph")
+ dep_cache = os.path.join(cache_dir, "target_task_dependencies")
+ target_set = os.path.join(cache_dir, "target_task_set")
+
+ if show_estimates:
+ download_task_history_data(cache_dir=cache_dir)
+ make_trimmed_taskgraph_cache(graph_cache, dep_cache, target_file=target_set)
+
+ if not full and not disable_target_task_filter:
+ all_tasks = {
+ task_name: task
+ for task_name, task in all_tasks.items()
+ if filter_by_uncommon_try_tasks(task_name)
+ }
+
+ if test_paths:
+ all_tasks = filter_tasks_by_paths(all_tasks, test_paths)
+ if not all_tasks:
+ return 1
+
+ key_shortcuts = [k + ":" + v for k, v in fzf_shortcuts.items()]
+ base_cmd = [
+ fzf,
+ "-m",
+ "--bind",
+ ",".join(key_shortcuts),
+ "--header",
+ format_header(),
+ "--preview-window=right:30%",
+ "--print-query",
+ ]
+
+ if show_estimates:
+ base_cmd.extend(
+ [
+ "--preview",
+ '{} {} -g {} -s -c {} -t "{{+f}}"'.format(
+ str(PurePath(sys.executable)), PREVIEW_SCRIPT, dep_cache, cache_dir
+ ),
+ ]
+ )
+ else:
+ base_cmd.extend(
+ [
+ "--preview",
+ '{} {} -t "{{+f}}"'.format(
+ str(PurePath(sys.executable)), PREVIEW_SCRIPT
+ ),
+ ]
+ )
+
+ if exact:
+ base_cmd.append("--exact")
+
+ selected = set()
+ queries = []
+
+ def get_tasks(query_arg=None, candidate_tasks=all_tasks):
+ cmd = base_cmd[:]
+ if query_arg and query_arg != "INTERACTIVE":
+ cmd.extend(["-f", query_arg])
+
+ if not show_chunk_numbers:
+ fzf_tasks = set(task.chunk_pattern for task in candidate_tasks.values())
+ else:
+ fzf_tasks = set(candidate_tasks.keys())
+
+ query_str, tasks = run_fzf(cmd, sorted(fzf_tasks))
+ queries.append(query_str)
+ return set(tasks)
+
+ for q in query or []:
+ selected |= get_tasks(q)
+
+ for q in intersect_query or []:
+ if not selected:
+ selected |= get_tasks(q)
+ else:
+ selected &= get_tasks(
+ q,
+ {
+ task_name: task
+ for task_name, task in all_tasks.items()
+ if task_name in selected or task.chunk_pattern in selected
+ },
+ )
+
+ if not queries:
+ selected = get_tasks()
+
+ if not selected:
+ print("no tasks selected")
+ return
+
+ if save_query:
+ return queries
+
+ # build commit message
+ msg = "Fuzzy"
+ args = ["query={}".format(q) for q in queries]
+ if test_paths:
+ args.append("paths={}".format(":".join(test_paths)))
+ if args:
+ msg = "{} {}".format(msg, "&".join(args))
+ return push_to_try(
+ "fuzzy",
+ message.format(msg=msg),
+ try_task_config=generate_try_task_config(
+ "fuzzy", selected, params=try_config_params
+ ),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/perf.py b/tools/tryselect/selectors/perf.py
new file mode 100644
index 0000000000..3c59e5949c
--- /dev/null
+++ b/tools/tryselect/selectors/perf.py
@@ -0,0 +1,1511 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import copy
+import itertools
+import json
+import os
+import pathlib
+import shutil
+import subprocess
+from contextlib import redirect_stdout
+from datetime import datetime, timedelta
+
+import requests
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozversioncontrol import get_repository_object
+
+from ..push import generate_try_task_config, push_to_try
+from ..util.fzf import (
+ FZF_NOT_FOUND,
+ build_base_cmd,
+ fzf_bootstrap,
+ run_fzf,
+ setup_tasks_for_fzf,
+)
+from .compare import CompareParser
+from .perfselector.classification import (
+ Apps,
+ ClassificationProvider,
+ Platforms,
+ Suites,
+ Variants,
+)
+from .perfselector.perfcomparators import get_comparator
+from .perfselector.utils import LogProcessor
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+cache_file = pathlib.Path(get_state_dir(), "try_perf_revision_cache.json")
+PREVIEW_SCRIPT = pathlib.Path(
+ build.topsrcdir, "tools/tryselect/selectors/perf_preview.py"
+)
+
+PERFHERDER_BASE_URL = (
+ "https://treeherder.mozilla.org/perfherder/"
+ "compare?originalProject=try&originalRevision=%s&newProject=try&newRevision=%s"
+)
+PERFCOMPARE_BASE_URL = "https://beta--mozilla-perfcompare.netlify.app/compare-results?baseRev=%s&newRev=%s&baseRepo=try&newRepo=try"
+TREEHERDER_TRY_BASE_URL = "https://treeherder.mozilla.org/jobs?repo=try&revision=%s"
+TREEHERDER_ALERT_TASKS_URL = (
+ "https://treeherder.mozilla.org/api/performance/alertsummary-tasks/?id=%s"
+)
+
+# Prevent users from running more than 300 tests at once. It's possible, but
+# it's more likely that a query is broken and is selecting far too much.
+MAX_PERF_TASKS = 600
+
+# Name of the base category with no variants applied to it
+BASE_CATEGORY_NAME = "base"
+
+# Add environment variable for firefox-android integration.
+# This will let us find the APK to upload automatically. However,
+# the following option will need to be supplied:
+# --browsertime-upload-apk firefox-android
+# OR --mozperftest-upload-apk firefox-android
+MOZ_FIREFOX_ANDROID_APK_OUTPUT = os.getenv("MOZ_FIREFOX_ANDROID_APK_OUTPUT", None)
+
+
+class InvalidCategoryException(Exception):
+ """Thrown when a category is found to be invalid.
+
+ See the `PerfParser.run_category_checks()` method for more info.
+ """
+
+ pass
+
+
+class APKNotFound(Exception):
+ """Raised when a user-supplied path to an APK is invalid."""
+
+ pass
+
+
+class InvalidRegressionDetectorQuery(Exception):
+ """Thrown when the detector query produces anything other than 1 task."""
+
+ pass
+
+
+class PerfParser(CompareParser):
+ name = "perf"
+ common_groups = ["push", "task"]
+ task_configs = [
+ "artifact",
+ "browsertime",
+ "disable-pgo",
+ "env",
+ "gecko-profile",
+ "path",
+ "rebuild",
+ ]
+
+ provider = ClassificationProvider()
+ platforms = provider.platforms
+ apps = provider.apps
+ variants = provider.variants
+ suites = provider.suites
+ categories = provider.categories
+
+ arguments = [
+ [
+ ["--show-all"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show all available tasks.",
+ },
+ ],
+ [
+ ["--android"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show android test categories (disabled by default).",
+ },
+ ],
+ [
+ # Bug 1866047 - Remove once monorepo changes are complete
+ ["--fenix"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Include Fenix in tasks to run (disabled by default). Must "
+ "be used in conjunction with --android. Fenix isn't built on mozilla-central "
+ "so we pull the APK being tested from the firefox-android project. This "
+ "means that the fenix APK being tested in the two pushes is the same, and "
+ "any local changes made won't impact it.",
+ },
+ ],
+ [
+ ["--chrome"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show tests available for Chrome-based browsers "
+ "(disabled by default).",
+ },
+ ],
+ [
+ ["--custom-car"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show tests available for Custom Chromium-as-Release (disabled by default). "
+ "Use with --android flag to select Custom CaR android tests (cstm-car-m)",
+ },
+ ],
+ [
+ ["--safari"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Show tests available for Safari (disabled by default).",
+ },
+ ],
+ [
+ ["--live-sites"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Run tasks with live sites (if possible). "
+ "You can also use the `live-sites` variant.",
+ },
+ ],
+ [
+ ["--profile"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Run tasks with profiling (if possible). "
+ "You can also use the `profiling` variant.",
+ },
+ ],
+ [
+ ["--single-run"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Run tasks without a comparison",
+ },
+ ],
+ [
+ ["-q", "--query"],
+ {
+ "type": str,
+ "default": None,
+ "help": "Query to run in either the perf-category selector, "
+ "or the fuzzy selector if --show-all is provided.",
+ },
+ ],
+ [
+ # Bug 1866047 - Remove once monorepo changes are complete
+ ["--browsertime-upload-apk"],
+ {
+ "type": str,
+ "default": None,
+ "help": "Path to an APK to upload. Note that this "
+ "will replace the APK installed in all Android Performance "
+ "tests. If the Activity, Binary Path, or Intents required "
+ "change at all relative to the existing GeckoView, and Fenix "
+ "tasks, then you will need to make fixes in the associated "
+ "taskcluster files (e.g. taskcluster/ci/test/browsertime-mobile.yml). "
+ "Alternatively, set MOZ_FIREFOX_ANDROID_APK_OUTPUT to a path to "
+ "an APK, and then run the command with --browsertime-upload-apk "
+ "firefox-android. This option will only copy the APK for browsertime, see "
+ "--mozperftest-upload-apk to upload APKs for startup tests.",
+ },
+ ],
+ [
+ # Bug 1866047 - Remove once monorepo changes are complete
+ ["--mozperftest-upload-apk"],
+ {
+ "type": str,
+ "default": None,
+ "help": "See --browsertime-upload-apk. This option does the same "
+ "thing except it's for mozperftest tests such as the startup ones. "
+ "Note that those tests only exist through --show-all, as they "
+ "aren't contained in any existing categories.",
+ },
+ ],
+ [
+ ["--detect-changes"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Adds a task that detects performance changes using MWU.",
+ },
+ ],
+ [
+ ["--comparator"],
+ {
+ "type": str,
+ "default": "BasePerfComparator",
+ "help": "Either a path to a file to setup a custom comparison, "
+ "or a builtin name. See the Firefox source docs for mach try perf for "
+ "examples of how to build your own, along with the interface.",
+ },
+ ],
+ [
+ ["--comparator-args"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "comparator_args",
+ "help": "Arguments provided to the base, and new revision setup stages "
+ "of the comparator.",
+ "metavar": "ARG=VALUE",
+ },
+ ],
+ [
+ ["--variants"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [BASE_CATEGORY_NAME],
+ "dest": "requested_variants",
+ "choices": list(variants.keys()),
+ "help": "Select variants to display in the selector from: "
+ + ", ".join(list(variants.keys())),
+ "metavar": "",
+ },
+ ],
+ [
+ ["--platforms"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "requested_platforms",
+ "choices": list(platforms.keys()),
+ "help": "Select specific platforms to target. Android only "
+ "available with --android. Available platforms: "
+ + ", ".join(list(platforms.keys())),
+ "metavar": "",
+ },
+ ],
+ [
+ ["--apps"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "requested_apps",
+ "choices": list(apps.keys()),
+ "help": "Select specific applications to target from: "
+ + ", ".join(list(apps.keys())),
+ "metavar": "",
+ },
+ ],
+ [
+ ["--clear-cache"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Deletes the try_perf_revision_cache file",
+ },
+ ],
+ [
+ ["--alert"],
+ {
+ "type": str,
+ "default": None,
+ "help": "Run tests that produced this alert summary.",
+ },
+ ],
+ [
+ ["--extra-args"],
+ {
+ "nargs": "*",
+ "type": str,
+ "default": [],
+ "dest": "extra_args",
+ "help": "Set the extra args "
+ "(e.x, --extra-args verbose post-startup-delay=1)",
+ "metavar": "",
+ },
+ ],
+ [
+ ["--perfcompare-beta"],
+ {
+ "action": "store_true",
+ "default": False,
+ "help": "Use PerfCompare Beta instead of CompareView.",
+ },
+ ],
+ ]
+
+ def get_tasks(base_cmd, queries, query_arg=None, candidate_tasks=None):
+ cmd = base_cmd[:]
+ if query_arg:
+ cmd.extend(["-f", query_arg])
+
+ query_str, tasks = run_fzf(cmd, sorted(candidate_tasks))
+ queries.append(query_str)
+ return set(tasks)
+
+ def get_perf_tasks(base_cmd, all_tg_tasks, perf_categories, query=None):
+ # Convert the categories to tasks
+ selected_tasks = set()
+ queries = []
+
+ selected_categories = PerfParser.get_tasks(
+ base_cmd, queries, query, perf_categories
+ )
+
+ for category, category_info in perf_categories.items():
+ if category not in selected_categories:
+ continue
+ print("Gathering tasks for %s category" % category)
+
+ category_tasks = set()
+ for suite in PerfParser.suites:
+ # Either perform a query to get the tasks (recommended), or
+ # use a hardcoded task list
+ suite_queries = category_info["queries"].get(suite)
+
+ category_suite_tasks = set()
+ if suite_queries:
+ print(
+ "Executing %s queries: %s" % (suite, ", ".join(suite_queries))
+ )
+
+ for perf_query in suite_queries:
+ if not category_suite_tasks:
+ # Get all tasks selected with the first query
+ category_suite_tasks |= PerfParser.get_tasks(
+ base_cmd, queries, perf_query, all_tg_tasks
+ )
+ else:
+ # Keep only those tasks that matched in all previous queries
+ category_suite_tasks &= PerfParser.get_tasks(
+ base_cmd, queries, perf_query, category_suite_tasks
+ )
+
+ if len(category_suite_tasks) == 0:
+ print("Failed to find any tasks for query: %s" % perf_query)
+ break
+
+ if category_suite_tasks:
+ category_tasks |= category_suite_tasks
+
+ if category_info["tasks"]:
+ category_tasks = set(category_info["tasks"]) & all_tg_tasks
+ if category_tasks != set(category_info["tasks"]):
+ print(
+ "Some expected tasks could not be found: %s"
+ % ", ".join(category_info["tasks"] - category_tasks)
+ )
+
+ if not category_tasks:
+ print("Could not find any tasks for category %s" % category)
+ else:
+ # Add the new tasks to the currently selected ones
+ selected_tasks |= category_tasks
+
+ return selected_tasks, selected_categories, queries
+
+ def _check_app(app, target):
+ """Checks if the app exists in the target."""
+ if app.value in target:
+ return True
+ return False
+
+ def _check_platform(platform, target):
+ """Checks if the platform, or it's type exists in the target."""
+ if (
+ platform.value in target
+ or PerfParser.platforms[platform.value]["platform"] in target
+ ):
+ return True
+ return False
+
+ def _build_initial_decision_matrix():
+ # Build first stage of matrix APPS X PLATFORMS
+ initial_decision_matrix = []
+ for platform in Platforms:
+ platform_row = []
+ for app in Apps:
+ if PerfParser._check_platform(
+ platform, PerfParser.apps[app.value]["platforms"]
+ ):
+ # This app can run on this platform
+ platform_row.append(True)
+ else:
+ platform_row.append(False)
+ initial_decision_matrix.append(platform_row)
+ return initial_decision_matrix
+
+ def _build_intermediate_decision_matrix():
+ # Second stage of matrix building applies the 2D matrix found above
+ # to each suite
+ initial_decision_matrix = PerfParser._build_initial_decision_matrix()
+
+ intermediate_decision_matrix = []
+ for suite in Suites:
+ suite_matrix = copy.deepcopy(initial_decision_matrix)
+ suite_info = PerfParser.suites[suite.value]
+
+ # Restric the platforms for this suite now
+ for platform in Platforms:
+ for app in Apps:
+ runnable = False
+ if PerfParser._check_app(
+ app, suite_info["apps"]
+ ) and PerfParser._check_platform(platform, suite_info["platforms"]):
+ runnable = True
+ suite_matrix[platform][app] = (
+ runnable and suite_matrix[platform][app]
+ )
+
+ intermediate_decision_matrix.append(suite_matrix)
+ return intermediate_decision_matrix
+
+ def _build_variants_matrix():
+ # Third stage is expanding the intermediate matrix
+ # across all the variants (non-expanded). Start with the
+ # intermediate matrix in the list since it provides our
+ # base case with no variants
+ intermediate_decision_matrix = PerfParser._build_intermediate_decision_matrix()
+
+ variants_matrix = []
+ for variant in Variants:
+ variant_matrix = copy.deepcopy(intermediate_decision_matrix)
+
+ for suite in Suites:
+ if variant.value in PerfParser.suites[suite.value]["variants"]:
+ # Allow the variant through and set it's platforms and apps
+ # based on how it sets it -> only restrict, don't make allowances
+ # here
+ for platform in Platforms:
+ for app in Apps:
+ if not (
+ PerfParser._check_platform(
+ platform,
+ PerfParser.variants[variant.value]["platforms"],
+ )
+ and PerfParser._check_app(
+ app, PerfParser.variants[variant.value]["apps"]
+ )
+ ):
+ variant_matrix[suite][platform][app] = False
+ else:
+ # This variant matrix needs to be completely False
+ variant_matrix[suite] = [
+ [False] * len(platform_row)
+ for platform_row in variant_matrix[suite]
+ ]
+
+ variants_matrix.append(variant_matrix)
+
+ return variants_matrix, intermediate_decision_matrix
+
+ def _build_decision_matrix():
+ """Build the decision matrix.
+
+ This method builds the decision matrix that is used
+ to determine what categories will be shown to the user.
+ This matrix has the following form (as lists):
+ - Variants
+ - Suites
+ - Platforms
+ - Apps
+
+ Each element in the 4D Matrix is either True or False and tells us
+ whether the particular combination is "runnable" according to
+ the given specifications. This does not mean that the combination
+ exists, just that it's fully configured in this selector.
+
+ The ("base",) variant combination found in the matrix has
+ no variants applied to it. At this stage, it's a catch-all for those
+ categories. The query it uses is reduced further in later stages.
+ """
+ # Get the variants matrix (see methods above) and the intermediate decision
+ # matrix to act as the base category
+ (
+ variants_matrix,
+ intermediate_decision_matrix,
+ ) = PerfParser._build_variants_matrix()
+
+ # Get all possible combinations of the variants
+ expanded_variants = [
+ variant_combination
+ for set_size in range(len(Variants) + 1)
+ for variant_combination in itertools.combinations(list(Variants), set_size)
+ ]
+
+ # Final stage combines the intermediate matrix with the
+ # expanded variants and leaves a "base" category which
+ # doesn't have any variant specifications (it catches them all)
+ decision_matrix = {(BASE_CATEGORY_NAME,): intermediate_decision_matrix}
+ for variant_combination in expanded_variants:
+ expanded_variant_matrix = []
+
+ # Perform an AND operation on the combination of variants
+ # to determine where this particular combination can run
+ for suite in Suites:
+ suite_matrix = []
+ suite_variants = PerfParser.suites[suite.value]["variants"]
+
+ # Disable the variant combination if none of them
+ # are found in the suite
+ disable_variant = not any(
+ [variant.value in suite_variants for variant in variant_combination]
+ )
+
+ for platform in Platforms:
+ if disable_variant:
+ platform_row = [False for _ in Apps]
+ else:
+ platform_row = [
+ all(
+ variants_matrix[variant][suite][platform][app]
+ for variant in variant_combination
+ if variant.value in suite_variants
+ )
+ for app in Apps
+ ]
+ suite_matrix.append(platform_row)
+
+ expanded_variant_matrix.append(suite_matrix)
+ decision_matrix[variant_combination] = expanded_variant_matrix
+
+ return decision_matrix
+
+ def _skip_with_restrictions(value, restrictions, requested=[]):
+ """Determines if we should skip an app, platform, or variant.
+
+ We add base here since it's the base category variant that
+ would always be displayed and it won't affect the app, or
+ platform selections.
+ """
+ if restrictions is not None and value not in restrictions + [
+ BASE_CATEGORY_NAME
+ ]:
+ return True
+ if requested and value not in requested + [BASE_CATEGORY_NAME]:
+ return True
+ return False
+
+ def build_category_matrix(**kwargs):
+ """Build a decision matrix for all the categories.
+
+ It will have the form:
+ - Category
+ - Variants
+ - ...
+ """
+ requested_variants = kwargs.get("requested_variants", [BASE_CATEGORY_NAME])
+ requested_platforms = kwargs.get("requested_platforms", [])
+ requested_apps = kwargs.get("requested_apps", [])
+
+ # Build the base decision matrix
+ decision_matrix = PerfParser._build_decision_matrix()
+
+ # Here, the variants are further restricted by the category settings
+ # using the `_skip_with_restrictions` method. This part also handles
+ # explicitly requested platforms, apps, and variants.
+ category_decision_matrix = {}
+ for category, category_info in PerfParser.categories.items():
+ category_matrix = copy.deepcopy(decision_matrix)
+
+ for variant_combination, variant_matrix in decision_matrix.items():
+ variant_runnable = True
+ if BASE_CATEGORY_NAME not in variant_combination:
+ # Make sure that all portions of the variant combination
+ # target at least one of the suites in the category
+ tmp_variant_combination = set(
+ [v.value for v in variant_combination]
+ )
+ for suite in Suites:
+ if suite.value not in category_info["suites"]:
+ continue
+ tmp_variant_combination = tmp_variant_combination - set(
+ [
+ variant.value
+ for variant in variant_combination
+ if variant.value
+ in PerfParser.suites[suite.value]["variants"]
+ ]
+ )
+ if tmp_variant_combination:
+ # If it's not empty, then some variants
+ # are non-existent
+ variant_runnable = False
+
+ for suite, platform, app in itertools.product(Suites, Platforms, Apps):
+ runnable = variant_runnable
+
+ # Disable this combination if there are any variant
+ # restrictions for this suite, or if the user didn't request it
+ # (and did request some variants). The same is done below with
+ # the apps, and platforms.
+ if any(
+ PerfParser._skip_with_restrictions(
+ variant.value if not isinstance(variant, str) else variant,
+ category_info.get("variant-restrictions", {}).get(
+ suite.value, None
+ ),
+ requested_variants,
+ )
+ for variant in variant_combination
+ ):
+ runnable = False
+
+ if PerfParser._skip_with_restrictions(
+ platform.value,
+ category_info.get("platform-restrictions", None),
+ requested_platforms,
+ ):
+ runnable = False
+
+ # If the platform is restricted, check if the appropriate
+ # flags were provided (or appropriate conditions hit). We do
+ # the same thing for apps below.
+ if (
+ PerfParser.platforms[platform.value].get("restriction", None)
+ is not None
+ ):
+ runnable = runnable and PerfParser.platforms[platform.value][
+ "restriction"
+ ](**kwargs)
+
+ if PerfParser._skip_with_restrictions(
+ app.value,
+ category_info.get("app-restrictions", {}).get(
+ suite.value, None
+ ),
+ requested_apps,
+ ):
+ runnable = False
+ if PerfParser.apps[app.value].get("restriction", None) is not None:
+ runnable = runnable and PerfParser.apps[app.value][
+ "restriction"
+ ](**kwargs)
+
+ category_matrix[variant_combination][suite][platform][app] = (
+ runnable and variant_matrix[suite][platform][app]
+ )
+
+ category_decision_matrix[category] = category_matrix
+
+ return category_decision_matrix
+
+ def _enable_restriction(restriction, **kwargs):
+ """Used to simplify checking a restriction."""
+ return restriction is not None and restriction(**kwargs)
+
+ def _category_suites(category_info):
+ """Returns all the suite enum entries in this category."""
+ return [suite for suite in Suites if suite.value in category_info["suites"]]
+
+ def _add_variant_queries(
+ category_info, variant_matrix, variant_combination, platform, queries, app=None
+ ):
+ """Used to add the variant queries to various categories."""
+ for variant in variant_combination:
+ for suite in PerfParser._category_suites(category_info):
+ if (app is not None and variant_matrix[suite][platform][app]) or (
+ app is None and any(variant_matrix[suite][platform])
+ ):
+ queries[suite.value].append(
+ PerfParser.variants[variant.value]["query"]
+ )
+
+ def _build_categories(category, category_info, category_matrix):
+ """Builds the categories to display."""
+ categories = {}
+
+ for variant_combination, variant_matrix in category_matrix.items():
+ base_category = BASE_CATEGORY_NAME in variant_combination
+
+ for platform in Platforms:
+ if not any(
+ any(variant_matrix[suite][platform])
+ for suite in PerfParser._category_suites(category_info)
+ ):
+ # There are no apps available on this platform in either
+ # of the requested suites
+ continue
+
+ # This code has the effect of restricting all suites to
+ # a platform. This means categories with mixed suites will
+ # be available even if some suites will no longer run
+ # given this platform constraint. The reasoning for this is that
+ # it's unexpected to receive desktop tests when you explicitly
+ # request android.
+ platform_queries = {
+ suite: (
+ category_info["query"][suite]
+ + [PerfParser.platforms[platform.value]["query"]]
+ )
+ for suite in category_info["suites"]
+ }
+
+ platform_category_name = f"{category} {platform.value}"
+ platform_category_info = {
+ "queries": platform_queries,
+ "tasks": category_info["tasks"],
+ "platform": platform,
+ "app": None,
+ "suites": category_info["suites"],
+ "base-category": base_category,
+ "base-category-name": category,
+ "description": category_info["description"],
+ }
+ for app in Apps:
+ if not any(
+ variant_matrix[suite][platform][app]
+ for suite in PerfParser._category_suites(category_info)
+ ):
+ # This app is not available on the given platform
+ # for any of the suites
+ continue
+
+ # Add the queries for the app for any suites that need it and
+ # the variant queries if needed
+ app_queries = copy.deepcopy(platform_queries)
+ for suite in Suites:
+ if suite.value not in app_queries:
+ continue
+ app_queries[suite.value].append(
+ PerfParser.apps[app.value]["query"]
+ )
+ if not base_category:
+ PerfParser._add_variant_queries(
+ category_info,
+ variant_matrix,
+ variant_combination,
+ platform,
+ app_queries,
+ app=app,
+ )
+
+ app_category_name = f"{platform_category_name} {app.value}"
+ if not base_category:
+ app_category_name = (
+ f"{app_category_name} "
+ f"{'+'.join([v.value for v in variant_combination])}"
+ )
+ categories[app_category_name] = {
+ "queries": app_queries,
+ "tasks": category_info["tasks"],
+ "platform": platform,
+ "app": app,
+ "suites": category_info["suites"],
+ "base-category": base_category,
+ "description": category_info["description"],
+ }
+
+ if not base_category:
+ platform_category_name = (
+ f"{platform_category_name} "
+ f"{'+'.join([v.value for v in variant_combination])}"
+ )
+ PerfParser._add_variant_queries(
+ category_info,
+ variant_matrix,
+ variant_combination,
+ platform,
+ platform_queries,
+ )
+ categories[platform_category_name] = platform_category_info
+
+ return categories
+
+ def _handle_variant_negations(category, category_info, **kwargs):
+ """Handle variant negations.
+
+ The reason why we're negating variants here instead of where we add
+ them to the queries is because we need to iterate over all of the variants
+ but when we add them, we only look at the variants in the combination. It's
+ possible to combine these, but that increases the complexity of the code
+ by quite a bit so it's best to do it separately.
+ """
+ for variant in Variants:
+ if category_info["base-category"] and variant.value in kwargs.get(
+ "requested_variants", [BASE_CATEGORY_NAME]
+ ):
+ # When some particular variant(s) are requested, and we are at a
+ # base category, don't negate it. Otherwise, if the variant
+ # wasn't requested negate it
+ continue
+ if variant.value in category:
+ # If this variant is in the category name, skip negations
+ continue
+ if not PerfParser._check_platform(
+ category_info["platform"],
+ PerfParser.variants[variant.value]["platforms"],
+ ):
+ # Make sure the variant applies to the platform
+ continue
+
+ for suite in category_info["suites"]:
+ if variant.value not in PerfParser.suites[suite]["variants"]:
+ continue
+ category_info["queries"][suite].append(
+ PerfParser.variants[variant.value]["negation"]
+ )
+
+ def _handle_app_negations(category, category_info, **kwargs):
+ """Handle app negations.
+
+ This is where the global chrome/safari negations get added. We use kwargs
+ along with the app restriction method to make this decision.
+ """
+ for app in Apps:
+ if PerfParser.apps[app.value].get("negation", None) is None:
+ continue
+ elif any(
+ PerfParser.apps[app.value]["negation"]
+ in category_info["queries"][suite]
+ for suite in category_info["suites"]
+ ):
+ # Already added the negations
+ continue
+ if category_info.get("app", None) is not None:
+ # We only need to handle this for categories that
+ # don't specify an app
+ continue
+
+ if PerfParser.apps[app.value].get("restriction", None) is None:
+ # If this app has no restriction flag, it means we should select it
+ # as much as possible and not negate it. However, if specific apps were requested,
+ # we should allow the negation to proceed since a `negation` field
+ # was provided (checked above), assuming this app was requested.
+ requested_apps = kwargs.get("requested_apps", [])
+ if requested_apps and app.value in requested_apps:
+ # Apps were requested, and this was is included
+ continue
+ elif not requested_apps:
+ # Apps were not requested, so we should keep this one
+ continue
+
+ if PerfParser._enable_restriction(
+ PerfParser.apps[app.value].get("restriction", None), **kwargs
+ ):
+ continue
+
+ for suite in category_info["suites"]:
+ if app.value not in PerfParser.suites[suite]["apps"]:
+ continue
+ category_info["queries"][suite].append(
+ PerfParser.apps[app.value]["negation"]
+ )
+
+ def _handle_negations(category, category_info, **kwargs):
+ """This method handles negations.
+
+ This method should only include things that should be globally applied
+ to all the queries. The apps are included as chrome is negated if
+ --chrome isn't provided, and the variants are negated here too.
+ """
+ PerfParser._handle_variant_negations(category, category_info, **kwargs)
+ PerfParser._handle_app_negations(category, category_info, **kwargs)
+
+ def get_categories(**kwargs):
+ """Get the categories to be displayed.
+
+ The categories are built using the decision matrices from `build_category_matrix`.
+ The methods above provide more detail on how this is done. Here, we use
+ this matrix to determine if we should show a category to a user.
+
+ We also apply the negations for restricted apps/platforms and variants
+ at the end before displaying the categories.
+ """
+ categories = {}
+
+ # Setup the restrictions, and ease-of-use variants requested (if any)
+ for variant in Variants:
+ if PerfParser._enable_restriction(
+ PerfParser.variants[variant.value].get("restriction", None), **kwargs
+ ):
+ kwargs.setdefault("requested_variants", []).append(variant.value)
+
+ category_decision_matrix = PerfParser.build_category_matrix(**kwargs)
+
+ # Now produce the categories by finding all the entries that are True
+ for category, category_matrix in category_decision_matrix.items():
+ categories.update(
+ PerfParser._build_categories(
+ category, PerfParser.categories[category], category_matrix
+ )
+ )
+
+ # Handle the restricted app queries, and variant negations
+ for category, category_info in categories.items():
+ PerfParser._handle_negations(category, category_info, **kwargs)
+
+ return categories
+
+ def inject_change_detector(base_cmd, all_tasks, selected_tasks):
+ query = "'perftest 'mwu 'detect"
+ mwu_task = PerfParser.get_tasks(base_cmd, [], query, all_tasks)
+
+ if len(mwu_task) > 1 or len(mwu_task) == 0:
+ raise InvalidRegressionDetectorQuery(
+ f"Expected 1 task from change detector "
+ f"query, but found {len(mwu_task)}"
+ )
+
+ selected_tasks |= set(mwu_task)
+
+ def check_cached_revision(selected_tasks, base_commit=None):
+ """
+ If the base_commit parameter does not exist, remove expired cache data.
+ Cache data format:
+ {
+ base_commit[str]: [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-12",
+ "tasks": ["a-task"],
+ },
+ {
+ "base_revision_treeherder": "999998888",
+ "date": "2023-03-12",
+ "tasks": ["b-task"],
+ },
+ ]
+ }
+
+ The list represents different pushes with different task selections.
+
+ TODO: See if we can request additional tests on a given base revision.
+
+ :param selected_tasks list: The list of tasks selected by the user
+ :param base_commit str: The base commit to search
+ :return: The base_revision_treeherder if found, else None
+ """
+ today = datetime.now()
+ expired_date = (today - timedelta(weeks=2)).strftime("%Y-%m-%d")
+ today = today.strftime("%Y-%m-%d")
+
+ if not cache_file.is_file():
+ return
+
+ with cache_file.open("r") as f:
+ cache_data = json.load(f)
+
+ # Remove expired cache data
+ if base_commit is None:
+ for cached_base_commit in list(cache_data):
+ if not isinstance(cache_data[cached_base_commit], list):
+ # TODO: Remove in the future, this is for backwards-compatibility
+ # with the previous cache structure
+ cache_data.pop(cached_base_commit)
+ else:
+ # Go through the pushes, and expire any that are too old
+ new_pushes = []
+ for push in cache_data[cached_base_commit]:
+ if push["date"] > expired_date:
+ new_pushes.append(push)
+ # If no pushes are left after expiration, expire the base commit
+ if new_pushes:
+ cache_data[cached_base_commit] = new_pushes
+ else:
+ cache_data.pop(cached_base_commit)
+ with cache_file.open("w") as f:
+ json.dump(cache_data, f, indent=4)
+
+ cached_base_commit = cache_data.get(base_commit, None)
+ if cached_base_commit:
+ for push in cached_base_commit:
+ if set(selected_tasks) <= set(push["tasks"]):
+ return push["base_revision_treeherder"]
+
+ def save_revision_treeherder(selected_tasks, base_commit, base_revision_treeherder):
+ """
+ Save the base revision of treeherder to the cache.
+ See "check_cached_revision" for more information about the data structure.
+
+ :param selected_tasks list: The list of tasks selected by the user
+ :param base_commit str: The base commit to save
+ :param base_revision_treeherder str: The base revision of treeherder to save
+ :return: None
+ """
+ today = datetime.now().strftime("%Y-%m-%d")
+ new_revision = {
+ "base_revision_treeherder": base_revision_treeherder,
+ "date": today,
+ "tasks": list(selected_tasks),
+ }
+ cache_data = {}
+
+ if cache_file.is_file():
+ with cache_file.open("r") as f:
+ cache_data = json.load(f)
+ cache_data.setdefault(base_commit, []).append(new_revision)
+ else:
+ cache_data[base_commit] = [new_revision]
+
+ with cache_file.open(mode="w") as f:
+ json.dump(cache_data, f, indent=4)
+
+ def found_android_tasks(selected_tasks):
+ """
+ Check if any of the selected tasks are android.
+
+ :param selected_tasks list: List of tasks selected.
+ :return bool: True if android tasks were found, False otherwise.
+ """
+ return any("android" in task for task in selected_tasks)
+
+ def setup_try_config(
+ try_config_params, extra_args, selected_tasks, base_revision_treeherder=None
+ ):
+ """
+ Setup the try config for a push.
+
+ :param try_config_params dict: The current try config to be modified.
+ :param extra_args list: A list of extra options to add to the tasks being run.
+ :param selected_tasks list: List of tasks selected. Used for determining if android
+ tasks are selected to disable artifact mode.
+ :param base_revision_treeherder str: The base revision of treeherder to save
+ :return: None
+ """
+ if try_config_params is None:
+ try_config_params = {}
+
+ try_config = try_config_params.setdefault("try_task_config", {})
+ env = try_config.setdefault("env", {})
+ if extra_args:
+ args = " ".join(extra_args)
+ env["PERF_FLAGS"] = args
+ if base_revision_treeherder:
+ # Reset updated since we no longer need to worry
+ # about failing while we're on a base commit
+ env["PERF_BASE_REVISION"] = base_revision_treeherder
+ if PerfParser.found_android_tasks(selected_tasks) and try_config.get(
+ "use-artifact-builds", False
+ ):
+ # XXX: Fix artifact mode on android (no bug)
+ try_config["use-artifact-builds"] = False
+ print("Disabling artifact mode due to android task selection")
+
+ def perf_push_to_try(
+ selected_tasks,
+ selected_categories,
+ queries,
+ try_config_params,
+ dry_run,
+ single_run,
+ extra_args,
+ comparator,
+ comparator_args,
+ alert_summary_id,
+ ):
+ """Perf-specific push to try method.
+
+ This makes use of logic from the CompareParser to do something
+ very similar except with log redirection. We get the comparison
+ revisions, then use the repository object to update between revisions
+ and the LogProcessor for parsing out the revisions that are used
+ to build the Perfherder links.
+ """
+ vcs = get_repository_object(build.topsrcdir)
+ compare_commit, current_revision_ref = PerfParser.get_revisions_to_run(
+ vcs, None
+ )
+
+ # Build commit message, and limit first line to 200 characters
+ selected_categories_msg = ", ".join(selected_categories)
+ if len(selected_categories_msg) > 200:
+ selected_categories_msg = f"{selected_categories_msg[:200]}...\n...{selected_categories_msg[200:]}"
+ msg = "Perf selections={} \nQueries={}".format(
+ selected_categories_msg,
+ json.dumps(queries, indent=4),
+ )
+ if alert_summary_id:
+ msg = f"Perf alert summary id={alert_summary_id}"
+
+ # Get the comparator to run
+ comparator_klass = get_comparator(comparator)
+ comparator_obj = comparator_klass(
+ vcs, compare_commit, current_revision_ref, comparator_args
+ )
+ base_comparator = True
+ if comparator_klass.__name__ != "BasePerfComparator":
+ base_comparator = False
+
+ new_revision_treeherder = ""
+ base_revision_treeherder = ""
+ try:
+ # redirect_stdout allows us to feed each line into
+ # a processor that we can use to catch the revision
+ # while providing real-time output
+ log_processor = LogProcessor()
+
+ # Push the base revision first. This lets the new revision appear
+ # first in the Treeherder view, and it also lets us enhance the new
+ # revision with information about the base run.
+ base_revision_treeherder = None
+ if base_comparator:
+ # Don't cache the base revision when a custom comparison is being performed
+ # since the base revision is now unique and not general to all pushes
+ base_revision_treeherder = PerfParser.check_cached_revision(
+ selected_tasks, compare_commit
+ )
+
+ if not (dry_run or single_run or base_revision_treeherder):
+ # Setup the base revision, and try config. This lets us change the options
+ # we run the tests with through the PERF_FLAGS environment variable.
+ base_extra_args = list(extra_args)
+ base_try_config_params = copy.deepcopy(try_config_params)
+ comparator_obj.setup_base_revision(base_extra_args)
+ PerfParser.setup_try_config(
+ base_try_config_params, base_extra_args, selected_tasks
+ )
+
+ with redirect_stdout(log_processor):
+ # XXX Figure out if we can use the `again` selector in some way
+ # Right now we would need to modify it to be able to do this.
+ # XXX Fix up the again selector for the perf selector (if it makes sense to)
+ push_to_try(
+ "perf-again",
+ "{msg}".format(msg=msg),
+ try_task_config=generate_try_task_config(
+ "fuzzy", selected_tasks, params=base_try_config_params
+ ),
+ stage_changes=False,
+ dry_run=dry_run,
+ closed_tree=False,
+ allow_log_capture=True,
+ )
+
+ base_revision_treeherder = log_processor.revision
+ if base_comparator:
+ PerfParser.save_revision_treeherder(
+ selected_tasks, compare_commit, base_revision_treeherder
+ )
+
+ comparator_obj.teardown_base_revision()
+
+ new_extra_args = list(extra_args)
+ comparator_obj.setup_new_revision(new_extra_args)
+ PerfParser.setup_try_config(
+ try_config_params,
+ new_extra_args,
+ selected_tasks,
+ base_revision_treeherder=base_revision_treeherder,
+ )
+
+ with redirect_stdout(log_processor):
+ push_to_try(
+ "perf",
+ "{msg}".format(msg=msg),
+ # XXX Figure out if changing `fuzzy` to `perf` will break something
+ try_task_config=generate_try_task_config(
+ "fuzzy", selected_tasks, params=try_config_params
+ ),
+ stage_changes=False,
+ dry_run=dry_run,
+ closed_tree=False,
+ allow_log_capture=True,
+ )
+
+ new_revision_treeherder = log_processor.revision
+ comparator_obj.teardown_new_revision()
+
+ finally:
+ comparator_obj.teardown()
+
+ return base_revision_treeherder, new_revision_treeherder
+
+ def run(
+ update=False,
+ show_all=False,
+ parameters=None,
+ try_config_params=None,
+ dry_run=False,
+ single_run=False,
+ query=None,
+ detect_changes=False,
+ rebuild=1,
+ clear_cache=False,
+ **kwargs,
+ ):
+ # Setup fzf
+ fzf = fzf_bootstrap(update)
+
+ if not fzf:
+ print(FZF_NOT_FOUND)
+ return 1
+
+ if clear_cache:
+ print(f"Removing cached {cache_file} file")
+ cache_file.unlink(missing_ok=True)
+
+ all_tasks, dep_cache, cache_dir = setup_tasks_for_fzf(
+ not dry_run,
+ parameters,
+ full=True,
+ disable_target_task_filter=False,
+ )
+ base_cmd = build_base_cmd(
+ fzf,
+ dep_cache,
+ cache_dir,
+ show_estimates=False,
+ preview_script=PREVIEW_SCRIPT,
+ )
+
+ # Perform the selection, then push to try and return the revisions
+ queries = []
+ selected_categories = []
+ alert_summary_id = kwargs.get("alert")
+ if alert_summary_id:
+ alert_tasks = requests.get(
+ TREEHERDER_ALERT_TASKS_URL % alert_summary_id,
+ headers={"User-Agent": "mozilla-central"},
+ )
+ if alert_tasks.status_code != 200:
+ print(
+ "\nFailed to obtain tasks from alert due to:\n"
+ f"Alert ID: {alert_summary_id}\n"
+ f"Status Code: {alert_tasks.status_code}\n"
+ f"Response Message: {alert_tasks.json()}\n"
+ )
+ alert_tasks.raise_for_status()
+ alert_tasks = set([task for task in alert_tasks.json()["tasks"] if task])
+ selected_tasks = alert_tasks & set(all_tasks)
+ if not selected_tasks:
+ raise Exception("Alert ID has no task to run.")
+ elif len(selected_tasks) != len(alert_tasks):
+ print(
+ "\nAll the tasks of the Alert Summary couldn't be found in the taskgraph.\n"
+ f"Not exist tasks: {alert_tasks - set(all_tasks)}\n"
+ )
+ elif not show_all:
+ # Expand the categories first
+ categories = PerfParser.get_categories(**kwargs)
+ PerfParser.build_category_description(base_cmd, categories)
+
+ selected_tasks, selected_categories, queries = PerfParser.get_perf_tasks(
+ base_cmd, all_tasks, categories, query=query
+ )
+ else:
+ selected_tasks = PerfParser.get_tasks(base_cmd, queries, query, all_tasks)
+
+ if len(selected_tasks) == 0:
+ print("No tasks selected")
+ return None
+
+ total_task_count = len(selected_tasks) * rebuild
+ if total_task_count > MAX_PERF_TASKS:
+ print(
+ "\n\n----------------------------------------------------------------------------------------------\n"
+ f"You have selected {total_task_count} total test runs! (selected tasks({len(selected_tasks)}) * rebuild"
+ f" count({rebuild}) \nThese tests won't be triggered as the current maximum for a single ./mach try "
+ f"perf run is {MAX_PERF_TASKS}. \nIf this was unexpected, please file a bug in Testing :: Performance."
+ "\n----------------------------------------------------------------------------------------------\n\n"
+ )
+ return None
+
+ if detect_changes:
+ PerfParser.inject_change_detector(base_cmd, all_tasks, selected_tasks)
+
+ return PerfParser.perf_push_to_try(
+ selected_tasks,
+ selected_categories,
+ queries,
+ try_config_params,
+ dry_run,
+ single_run,
+ kwargs.get("extra_args", []),
+ kwargs.get("comparator", "BasePerfComparator"),
+ kwargs.get("comparator_args", []),
+ alert_summary_id,
+ )
+
+ def run_category_checks():
+ # XXX: Add a jsonschema check for the category definition
+ # Make sure the queries don't specify variants in them
+ variant_queries = {
+ suite: [
+ PerfParser.variants[variant]["query"]
+ for variant in suite_info.get(
+ "variants", list(PerfParser.variants.keys())
+ )
+ ]
+ + [
+ PerfParser.variants[variant]["negation"]
+ for variant in suite_info.get(
+ "variants", list(PerfParser.variants.keys())
+ )
+ ]
+ for suite, suite_info in PerfParser.suites.items()
+ }
+
+ for category, category_info in PerfParser.categories.items():
+ for suite, query in category_info["query"].items():
+ if len(variant_queries[suite]) == 0:
+ # This suite has no variants
+ continue
+ if any(any(v in q for q in query) for v in variant_queries[suite]):
+ raise InvalidCategoryException(
+ f"The '{category}' category suite query for '{suite}' "
+ f"uses a variant in it's query '{query}'."
+ "If you don't want a particular variant use the "
+ "`variant-restrictions` field in the category."
+ )
+
+ return True
+
+ def setup_apk_upload(framework, apk_upload_path):
+ """Setup the APK for uploading to test on try.
+
+ There are two ways of performing the upload:
+ (1) Passing a path to an APK with:
+ --browsertime-upload-apk <PATH/FILE.APK>
+ --mozperftest-upload-apk <PATH/FILE.APK>
+ (2) Setting MOZ_FIREFOX_ANDROID_APK_OUTPUT to a path that will
+ always point to an APK (<PATH/FILE.APK>) that we can upload.
+
+ The file is always copied to testing/raptor/raptor/user_upload.apk to
+ integrate with minimal changes for simpler cases when using raptor-browsertime.
+
+ For mozperftest, the APK is always uploaded here for the same reasons:
+ python/mozperftest/mozperftest/user_upload.apk
+ """
+ frameworks_to_locations = {
+ "browsertime": pathlib.Path(
+ build.topsrcdir, "testing", "raptor", "raptor", "user_upload.apk"
+ ),
+ "mozperftest": pathlib.Path(
+ build.topsrcdir,
+ "python",
+ "mozperftest",
+ "mozperftest",
+ "user_upload.apk",
+ ),
+ }
+
+ print("Setting up custom APK upload")
+ if apk_upload_path in ("firefox-android"):
+ apk_upload_path = MOZ_FIREFOX_ANDROID_APK_OUTPUT
+ if apk_upload_path is None:
+ raise APKNotFound(
+ "MOZ_FIREFOX_ANDROID_APK_OUTPUT is not defined. It should "
+ "point to an APK to upload."
+ )
+ apk_upload_path = pathlib.Path(apk_upload_path)
+ if not apk_upload_path.exists() or apk_upload_path.is_dir():
+ raise APKNotFound(
+ "MOZ_FIREFOX_ANDROID_APK_OUTPUT needs to point to an APK."
+ )
+ else:
+ apk_upload_path = pathlib.Path(apk_upload_path)
+ if not apk_upload_path.exists():
+ raise APKNotFound(f"Path does not exist: {str(apk_upload_path)}")
+
+ print("\nCopying file in-tree for upload...")
+ shutil.copyfile(
+ str(apk_upload_path),
+ frameworks_to_locations[framework],
+ )
+
+ hg_cmd = ["hg", "add", str(frameworks_to_locations[framework])]
+ print(
+ f"\nRunning the following hg command (RAM warnings are expected):\n"
+ f" {hg_cmd}"
+ )
+ subprocess.check_output(hg_cmd)
+ print(
+ "\nAPK is setup for uploading. Please commit the changes, "
+ "and re-run this command. \nEnsure you supply the --android, "
+ "and select the correct tasks (fenix, geckoview) or use "
+ "--show-all for mozperftest task selection. \nFor Fenix, ensure "
+ "you also provide the --fenix flag."
+ )
+
+ def build_category_description(base_cmd, categories):
+ descriptions = {}
+
+ for category in categories:
+ if categories[category].get("description"):
+ descriptions[category] = categories[category].get("description")
+
+ description_file = pathlib.Path(
+ get_state_dir(), "try_perf_categories_info.json"
+ )
+ with description_file.open("w") as f:
+ json.dump(descriptions, f, indent=4)
+
+ preview_option = base_cmd.index("--preview") + 1
+ base_cmd[preview_option] = (
+ base_cmd[preview_option] + f' -d "{description_file}" -l "{{}}"'
+ )
+
+ for idx, cmd in enumerate(base_cmd):
+ if "--preview-window" in cmd:
+ base_cmd[idx] += ":wrap"
+
+
+def get_compare_url(revisions, perfcompare_beta=False):
+ """Setup the comparison link."""
+ if perfcompare_beta:
+ return PERFCOMPARE_BASE_URL % revisions
+ return PERFHERDER_BASE_URL % revisions
+
+
+def run(**kwargs):
+ if (
+ kwargs.get("browsertime_upload_apk") is not None
+ or kwargs.get("mozperftest_upload_apk") is not None
+ ):
+ framework = "browsertime"
+ upload_apk = kwargs.get("browsertime_upload_apk")
+ if upload_apk is None:
+ framework = "mozperftest"
+ upload_apk = kwargs.get("mozperftest_upload_apk")
+
+ PerfParser.setup_apk_upload(framework, upload_apk)
+ return
+
+ # Make sure the categories are following
+ # the rules we've setup
+ PerfParser.run_category_checks()
+ PerfParser.check_cached_revision([])
+
+ revisions = PerfParser.run(
+ profile=kwargs.get("try_config_params", {})
+ .get("try_task_config", {})
+ .get("gecko-profile", False),
+ rebuild=kwargs.get("try_config_params", {})
+ .get("try_task_config", {})
+ .get("rebuild", 1),
+ **kwargs,
+ )
+
+ if revisions is None:
+ return
+
+ # Provide link to perfherder for comparisons now
+ if not kwargs.get("single_run", False):
+ perfcompare_url = get_compare_url(
+ revisions, perfcompare_beta=kwargs.get("perfcompare_beta", False)
+ )
+ original_try_url = TREEHERDER_TRY_BASE_URL % revisions[0]
+ local_change_try_url = TREEHERDER_TRY_BASE_URL % revisions[1]
+ print(
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison here "
+ "once the tests are complete (ensure you select the right "
+ "framework): %s\n" % perfcompare_url
+ )
+ print("\n*******************************************************")
+ print("* 2 commits/try-runs are created... *")
+ print("*******************************************************")
+ print(f"Base revision's try run: {original_try_url}")
+ print(f"Local revision's try run: {local_change_try_url}\n")
+ print(
+ "If you need any help, you can find us in the #perf-help Matrix channel:\n"
+ "https://matrix.to/#/#perf-help:mozilla.org\n"
+ )
+ print(
+ "For more information on the performance tests, see our PerfDocs here:\n"
+ "https://firefox-source-docs.mozilla.org/testing/perfdocs/"
+ )
diff --git a/tools/tryselect/selectors/perf_preview.py b/tools/tryselect/selectors/perf_preview.py
new file mode 100644
index 0000000000..55219d3300
--- /dev/null
+++ b/tools/tryselect/selectors/perf_preview.py
@@ -0,0 +1,62 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""This script is intended to be called through fzf as a preview formatter."""
+
+
+import argparse
+import json
+import os
+import pathlib
+import sys
+
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(0, os.path.join(os.path.dirname(here), "util"))
+
+
+def process_args():
+ """Process preview arguments."""
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument(
+ "-t",
+ "--tasklist",
+ type=str,
+ default=None,
+ help="Path to temporary file containing the selected tasks",
+ )
+ argparser.add_argument(
+ "-d",
+ "--description",
+ type=str,
+ default=None,
+ help="Path to description file containing the item description",
+ )
+ argparser.add_argument(
+ "-l",
+ "--line",
+ type=str,
+ default=None,
+ help="Current line that the user is pointing",
+ )
+ return argparser.parse_args()
+
+
+def plain_display(taskfile, description, line):
+ """Original preview window display."""
+ with open(taskfile) as f:
+ tasklist = [line.strip() for line in f]
+ print("\n".join(sorted(tasklist)))
+
+ if description is None or line is None:
+ return
+ line = line.replace("'", "")
+ with pathlib.Path(description).open("r") as f:
+ description_dict = json.load(f)
+ if line in description_dict:
+ print(f"\n* Desc:\n{description_dict[line]}")
+
+
+if __name__ == "__main__":
+ args = process_args()
+ plain_display(args.tasklist, args.description, args.line)
diff --git a/tools/tryselect/selectors/perfselector/__init__.py b/tools/tryselect/selectors/perfselector/__init__.py
new file mode 100644
index 0000000000..c580d191c1
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/tools/tryselect/selectors/perfselector/classification.py b/tools/tryselect/selectors/perfselector/classification.py
new file mode 100644
index 0000000000..cabf2a323e
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/classification.py
@@ -0,0 +1,387 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import enum
+
+
+class ClassificationEnum(enum.Enum):
+ """This class provides the ability to use Enums as array indices."""
+
+ @property
+ def value(self):
+ return self._value_["value"]
+
+ def __index__(self):
+ return self._value_["index"]
+
+ def __int__(self):
+ return self._value_["index"]
+
+
+class Platforms(ClassificationEnum):
+ ANDROID_A51 = {"value": "android-a51", "index": 0}
+ ANDROID = {"value": "android", "index": 1}
+ WINDOWS = {"value": "windows", "index": 2}
+ LINUX = {"value": "linux", "index": 3}
+ MACOSX = {"value": "macosx", "index": 4}
+ DESKTOP = {"value": "desktop", "index": 5}
+
+
+class Apps(ClassificationEnum):
+ FIREFOX = {"value": "firefox", "index": 0}
+ CHROME = {"value": "chrome", "index": 1}
+ CHROMIUM = {"value": "chromium", "index": 2}
+ GECKOVIEW = {"value": "geckoview", "index": 3}
+ FENIX = {"value": "fenix", "index": 4}
+ CHROME_M = {"value": "chrome-m", "index": 5}
+ SAFARI = {"value": "safari", "index": 6}
+ CHROMIUM_RELEASE = {"value": "custom-car", "index": 7}
+ CHROMIUM_RELEASE_M = {"value": "cstm-car-m", "index": 8}
+
+
+class Suites(ClassificationEnum):
+ RAPTOR = {"value": "raptor", "index": 0}
+ TALOS = {"value": "talos", "index": 1}
+ AWSY = {"value": "awsy", "index": 2}
+
+
+class Variants(ClassificationEnum):
+ FISSION = {"value": "fission", "index": 0}
+ BYTECODE_CACHED = {"value": "bytecode-cached", "index": 1}
+ LIVE_SITES = {"value": "live-sites", "index": 2}
+ PROFILING = {"value": "profiling", "index": 3}
+ SWR = {"value": "swr", "index": 4}
+
+
+"""
+The following methods and constants are used for restricting
+certain platforms and applications such as chrome, safari, and
+android tests. These all require a flag such as --android to
+enable (see build_category_matrix for more info).
+"""
+
+
+def check_for_android(android=False, **kwargs):
+ return android
+
+
+def check_for_fenix(fenix=False, **kwargs):
+ return fenix or ("fenix" in kwargs.get("requested_apps", []))
+
+
+def check_for_chrome(chrome=False, **kwargs):
+ return chrome
+
+
+def check_for_custom_car(custom_car=False, **kwargs):
+ return custom_car
+
+
+def check_for_safari(safari=False, **kwargs):
+ return safari
+
+
+def check_for_live_sites(live_sites=False, **kwargs):
+ return live_sites
+
+
+def check_for_profile(profile=False, **kwargs):
+ return profile
+
+
+class ClassificationProvider:
+ @property
+ def platforms(self):
+ return {
+ Platforms.ANDROID_A51.value: {
+ "query": "'android 'a51 'shippable 'aarch64",
+ "restriction": check_for_android,
+ "platform": Platforms.ANDROID.value,
+ },
+ Platforms.ANDROID.value: {
+ # The android, and android-a51 queries are expected to be the same,
+ # we don't want to run the tests on other mobile platforms.
+ "query": "'android 'a51 'shippable 'aarch64",
+ "restriction": check_for_android,
+ "platform": Platforms.ANDROID.value,
+ },
+ Platforms.WINDOWS.value: {
+ "query": "!-32 'windows 'shippable",
+ "platform": Platforms.DESKTOP.value,
+ },
+ Platforms.LINUX.value: {
+ "query": "!clang 'linux 'shippable",
+ "platform": Platforms.DESKTOP.value,
+ },
+ Platforms.MACOSX.value: {
+ "query": "'osx 'shippable",
+ "platform": Platforms.DESKTOP.value,
+ },
+ Platforms.DESKTOP.value: {
+ "query": "!android 'shippable !-32 !clang",
+ "platform": Platforms.DESKTOP.value,
+ },
+ }
+
+ @property
+ def apps(self):
+ return {
+ Apps.FIREFOX.value: {
+ "query": "!chrom !geckoview !fenix !safari !m-car",
+ "platforms": [Platforms.DESKTOP.value],
+ },
+ Apps.CHROME.value: {
+ "query": "'chrome",
+ "negation": "!chrom",
+ "restriction": check_for_chrome,
+ "platforms": [Platforms.DESKTOP.value],
+ },
+ Apps.CHROMIUM.value: {
+ "query": "'chromium",
+ "negation": "!chrom",
+ "restriction": check_for_chrome,
+ "platforms": [Platforms.DESKTOP.value],
+ },
+ Apps.GECKOVIEW.value: {
+ "query": "'geckoview",
+ "negation": "!geckoview",
+ "platforms": [Platforms.ANDROID.value],
+ },
+ Apps.FENIX.value: {
+ "query": "'fenix",
+ "negation": "!fenix",
+ "restriction": check_for_fenix,
+ "platforms": [Platforms.ANDROID.value],
+ },
+ Apps.CHROME_M.value: {
+ "query": "'chrome-m",
+ "negation": "!chrom",
+ "restriction": check_for_chrome,
+ "platforms": [Platforms.ANDROID.value],
+ },
+ Apps.SAFARI.value: {
+ "query": "'safari",
+ "negation": "!safari",
+ "restriction": check_for_safari,
+ "platforms": [Platforms.MACOSX.value],
+ },
+ Apps.CHROMIUM_RELEASE.value: {
+ "query": "'m-car",
+ "negation": "!m-car",
+ "restriction": check_for_custom_car,
+ "platforms": [
+ Platforms.LINUX.value,
+ Platforms.WINDOWS.value,
+ Platforms.MACOSX.value,
+ ],
+ },
+ Apps.CHROMIUM_RELEASE_M.value: {
+ "query": "'m-car",
+ "negation": "!m-car",
+ "restriction": check_for_custom_car,
+ "platforms": [Platforms.ANDROID.value],
+ },
+ }
+
+ @property
+ def variants(self):
+ return {
+ Variants.FISSION.value: {
+ "query": "!nofis",
+ "negation": "'nofis",
+ "platforms": [Platforms.ANDROID.value],
+ "apps": [Apps.FENIX.value, Apps.GECKOVIEW.value],
+ },
+ Variants.BYTECODE_CACHED.value: {
+ "query": "'bytecode",
+ "negation": "!bytecode",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+ Variants.LIVE_SITES.value: {
+ "query": "'live",
+ "negation": "!live",
+ "restriction": check_for_live_sites,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": [ # XXX No live CaR tests
+ Apps.FIREFOX.value,
+ Apps.CHROME.value,
+ Apps.CHROMIUM.value,
+ Apps.FENIX.value,
+ Apps.GECKOVIEW.value,
+ Apps.SAFARI.value,
+ ],
+ },
+ Variants.PROFILING.value: {
+ "query": "'profil",
+ "negation": "!profil",
+ "restriction": check_for_profile,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": [Apps.FIREFOX.value, Apps.GECKOVIEW.value, Apps.FENIX.value],
+ },
+ Variants.SWR.value: {
+ "query": "'swr",
+ "negation": "!swr",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+ }
+
+ @property
+ def suites(self):
+ return {
+ Suites.RAPTOR.value: {
+ "apps": list(self.apps.keys()),
+ "platforms": list(self.platforms.keys()),
+ "variants": [
+ Variants.FISSION.value,
+ Variants.LIVE_SITES.value,
+ Variants.PROFILING.value,
+ Variants.BYTECODE_CACHED.value,
+ ],
+ },
+ Suites.TALOS.value: {
+ "apps": [Apps.FIREFOX.value],
+ "platforms": [Platforms.DESKTOP.value],
+ "variants": [
+ Variants.PROFILING.value,
+ Variants.SWR.value,
+ ],
+ },
+ Suites.AWSY.value: {
+ "apps": [Apps.FIREFOX.value],
+ "platforms": [Platforms.DESKTOP.value],
+ "variants": [],
+ },
+ }
+
+ """
+ Here you can find the base categories that are defined for the perf
+ selector. The following fields are available:
+ * query: Set the queries to use for each suite you need.
+ * suites: The suites that are needed for this category.
+ * tasks: A hard-coded list of tasks to select.
+ * platforms: The platforms that it can run on.
+ * app-restrictions: A list of apps that the category can run.
+ * variant-restrictions: A list of variants available for each suite.
+
+ Note that setting the App/Variant-Restriction fields should be used to
+ restrict the available apps and variants, not expand them.
+ """
+
+ @property
+ def categories(self):
+ return {
+ "Pageload": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'tp6 !tp6-bench"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "tasks": [],
+ "description": "A group of tests that measures various important pageload metrics. More information "
+ "can about what is exactly measured can found here:"
+ " https://firefox-source-docs.mozilla.org/testing/perfdocs/raptor.html#desktop",
+ },
+ "Speedometer 3": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'speedometer3"],
+ },
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.FISSION.value]},
+ "suites": [Suites.RAPTOR.value],
+ "app-restrictions": {},
+ "tasks": [],
+ "description": "A group of Speedometer3 tests on various platforms and architectures, speedometer3 is"
+ "currently the best benchmark we have for a baseline on real-world web performance",
+ },
+ "Responsiveness": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'responsive"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "app-restrictions": {
+ Suites.RAPTOR.value: [
+ Apps.FIREFOX.value,
+ Apps.CHROME.value,
+ Apps.CHROMIUM.value,
+ Apps.FENIX.value,
+ Apps.GECKOVIEW.value,
+ ],
+ },
+ "tasks": [],
+ "description": "A group of tests that ensure that the interactive part of the browser stays fast and"
+ "responsive",
+ },
+ "Benchmarks": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'benchmark !tp6-bench"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "tasks": [],
+ "description": "A group of tests that benchmark how the browser performs in various categories. "
+ "More information about what exact benchmarks we run can be found here: "
+ "https://firefox-source-docs.mozilla.org/testing/perfdocs/raptor.html#benchmarks",
+ },
+ "DAMP (Devtools)": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'damp"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ "description": "The DAMP tests are a group of tests that measure the performance of the browsers "
+ "devtools under certain conditiones. More information on the DAMP tests can be found"
+ " here: https://firefox-source-docs.mozilla.org/devtools/tests/performance-tests"
+ "-damp.html#what-does-it-do",
+ },
+ "Talos PerfTests": {
+ "query": {
+ Suites.TALOS.value: ["'talos"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ "description": "This selects all of the talos performance tests. More information can be found here: "
+ "https://firefox-source-docs.mozilla.org/testing/perfdocs/talos.html#test-types",
+ },
+ "Resource Usage": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'xperf | 'tp5"],
+ Suites.RAPTOR.value: ["'power 'osx"],
+ Suites.AWSY.value: ["'awsy"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value, Suites.AWSY.value],
+ "platform-restrictions": [Platforms.DESKTOP.value],
+ "variant-restrictions": {
+ Suites.RAPTOR.value: [],
+ Suites.TALOS.value: [],
+ },
+ "app-restrictions": {
+ Suites.RAPTOR.value: [Apps.FIREFOX.value],
+ Suites.TALOS.value: [Apps.FIREFOX.value],
+ },
+ "tasks": [],
+ "description": "A group of tests that monitor resource usage of various metrics like power, CPU, and"
+ "memory",
+ },
+ "Graphics, & Media Playback": {
+ "query": {
+ # XXX This might not be an exhaustive list for talos atm
+ Suites.TALOS.value: ["'talos 'svgr | 'bcv | 'webgl"],
+ Suites.RAPTOR.value: ["'browsertime 'youtube-playback"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.FISSION.value]},
+ "app-restrictions": {
+ Suites.RAPTOR.value: [
+ Apps.FIREFOX.value,
+ Apps.CHROME.value,
+ Apps.CHROMIUM.value,
+ Apps.FENIX.value,
+ Apps.GECKOVIEW.value,
+ ],
+ },
+ "tasks": [],
+ "description": "A group of tests that monitor key graphics and media metrics to keep the browser fast",
+ },
+ }
diff --git a/tools/tryselect/selectors/perfselector/perfcomparators.py b/tools/tryselect/selectors/perfselector/perfcomparators.py
new file mode 100644
index 0000000000..fce35fe562
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/perfcomparators.py
@@ -0,0 +1,258 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import importlib
+import inspect
+import pathlib
+
+BUILTIN_COMPARATORS = {}
+
+
+class ComparatorNotFound(Exception):
+ """Raised when we can't find the specified comparator.
+
+ Triggered when either the comparator name is incorrect for a builtin one,
+ or when a path to a specified comparator cannot be found.
+ """
+
+ pass
+
+
+class GithubRequestFailure(Exception):
+ """Raised when we hit a failure during PR link parsing."""
+
+ pass
+
+
+class BadComparatorArgs(Exception):
+ """Raised when the args given to the comparator are incorrect."""
+
+ pass
+
+
+def comparator(comparator_klass):
+ BUILTIN_COMPARATORS[comparator_klass.__name__] = comparator_klass
+ return comparator_klass
+
+
+@comparator
+class BasePerfComparator:
+ def __init__(self, vcs, compare_commit, current_revision_ref, comparator_args):
+ """Initialize the standard/default settings for Comparators.
+
+ :param vcs object: Used for updating the local repo.
+ :param compare_commit str: The base revision found for the local repo.
+ :param current_revision_ref str: The current revision of the local repo.
+ :param comparator_args list: List of comparator args in the format NAME=VALUE.
+ """
+ self.vcs = vcs
+ self.compare_commit = compare_commit
+ self.current_revision_ref = current_revision_ref
+ self.comparator_args = comparator_args
+
+ # Used to ensure that the local repo gets cleaned up appropriately on failures
+ self._updated = False
+
+ def setup_base_revision(self, extra_args):
+ """Setup the base try run/revision.
+
+ In this case, we update to the repo to the base revision and
+ push that to try. The extra_args can be used to set additional
+ arguments for Raptor (not available for other harnesses).
+
+ :param extra_args list: A list of extra arguments to pass to the try tasks.
+ """
+ self.vcs.update(self.compare_commit)
+ self._updated = True
+
+ def teardown_base_revision(self):
+ """Teardown the setup for the base revision."""
+ if self._updated:
+ self.vcs.update(self.current_revision_ref)
+ self._updated = False
+
+ def setup_new_revision(self, extra_args):
+ """Setup the new try run/revision.
+
+ Note that the extra_args are reset between the base, and new revision runs.
+
+ :param extra_args list: A list of extra arguments to pass to the try tasks.
+ """
+ pass
+
+ def teardown_new_revision(self):
+ """Teardown the new run/revision setup."""
+ pass
+
+ def teardown(self):
+ """Teardown for failures.
+
+ This method can be used for ensuring that the repo is cleaned up
+ when a failure is hit at any point in the process of doing the
+ new/base revision setups, or the pushes to try.
+ """
+ self.teardown_base_revision()
+
+
+def get_github_pull_request_info(link):
+ """Returns information about a PR link.
+
+ This method accepts a Github link in either of these formats:
+ https://github.com/mozilla-mobile/firefox-android/pull/1627,
+ https://github.com/mozilla-mobile/firefox-android/pull/1876/commits/17c7350cc37a4a85cea140a7ce54e9fd037b5365 #noqa
+
+ and returns the Github link, branch, and revision of the commit.
+ """
+ from urllib.parse import urlparse
+
+ import requests
+
+ # Parse the url, and get all the necessary info
+ parsed_url = urlparse(link)
+ path_parts = parsed_url.path.strip("/").split("/")
+ owner, repo = path_parts[0], path_parts[1]
+ pr_number = path_parts[-1]
+
+ if "/pull/" not in parsed_url.path:
+ raise GithubRequestFailure(
+ f"Link for Github PR is invalid (missing /pull/): {link}"
+ )
+
+ # Get the commit being targeted in the PR
+ pr_commit = None
+ if "/commits/" in parsed_url.path:
+ pr_commit = path_parts[-1]
+ pr_number = path_parts[-3]
+
+ # Make the request, and get the PR info, otherwise,
+ # raise an exception if the response code is not 200
+ api_url = f"https://api.github.com/repos/{owner}/{repo}/pulls/{pr_number}"
+ response = requests.get(api_url)
+ if response.status_code == 200:
+ link_info = response.json()
+ return (
+ link_info["head"]["repo"]["html_url"],
+ pr_commit if pr_commit else link_info["head"]["sha"],
+ link_info["head"]["ref"],
+ )
+
+ raise GithubRequestFailure(
+ f"The following url returned a non-200 status code: {api_url}"
+ )
+
+
+@comparator
+class BenchmarkComparator(BasePerfComparator):
+ def _get_benchmark_info(self, arg_prefix):
+ # Get the flag from the comparator args
+ benchmark_info = {"repo": None, "branch": None, "revision": None, "link": None}
+ for arg in self.comparator_args:
+ if arg.startswith(arg_prefix):
+ _, settings = arg.split(arg_prefix)
+ setting, val = settings.split("=")
+ if setting not in benchmark_info:
+ raise BadComparatorArgs(
+ f"Unknown argument provided `{setting}`. Only the following "
+ f"are available (prefixed with `{arg_prefix}`): "
+ f"{list(benchmark_info.keys())}"
+ )
+ benchmark_info[setting] = val
+
+ # Parse the link for any required information
+ if benchmark_info.get("link", None) is not None:
+ (
+ benchmark_info["repo"],
+ benchmark_info["revision"],
+ benchmark_info["branch"],
+ ) = get_github_pull_request_info(benchmark_info["link"])
+
+ return benchmark_info
+
+ def _setup_benchmark_args(self, extra_args, benchmark_info):
+ # Setup the arguments for Raptor
+ extra_args.append(f"benchmark-repository={benchmark_info['repo']}")
+ extra_args.append(f"benchmark-revision={benchmark_info['revision']}")
+
+ if benchmark_info.get("branch", None):
+ extra_args.append(f"benchmark-branch={benchmark_info['branch']}")
+
+ def setup_base_revision(self, extra_args):
+ """Sets up the options for a base benchmark revision run.
+
+ Checks for a `base-link` in the
+ command and adds the appropriate commands to the extra_args
+ which will be added to the PERF_FLAGS environment variable.
+
+ If that isn't provided, then you must provide the repo, branch,
+ and revision directly through these (branch is optional):
+
+ base-repo=https://github.com/mozilla-mobile/firefox-android
+ base-branch=main
+ base-revision=17c7350cc37a4a85cea140a7ce54e9fd037b5365
+
+ Otherwise, we'll use the default mach try perf
+ base behaviour.
+
+ TODO: Get the information automatically from a commit link. Github
+ API doesn't provide the branch name from a link like that.
+ """
+ base_info = self._get_benchmark_info("base-")
+
+ # If no options were provided, use the default BasePerfComparator behaviour
+ if not any(v is not None for v in base_info.values()):
+ raise BadComparatorArgs(
+ f"Could not find the correct base-revision arguments in: {self.comparator_args}"
+ )
+
+ self._setup_benchmark_args(extra_args, base_info)
+
+ def setup_new_revision(self, extra_args):
+ """Sets up the options for a new benchmark revision run.
+
+ Same as `setup_base_revision`, except it uses
+ `new-` as the prefix instead of `base-`.
+ """
+ new_info = self._get_benchmark_info("new-")
+
+ # If no options were provided, use the default BasePerfComparator behaviour
+ if not any(v is not None for v in new_info.values()):
+ raise BadComparatorArgs(
+ f"Could not find the correct new-revision arguments in: {self.comparator_args}"
+ )
+
+ self._setup_benchmark_args(extra_args, new_info)
+
+
+def get_comparator(comparator):
+ if comparator in BUILTIN_COMPARATORS:
+ return BUILTIN_COMPARATORS[comparator]
+
+ file = pathlib.Path(comparator)
+ if not file.exists():
+ raise ComparatorNotFound(
+ f"Expected either a path to a file containing a comparator, or a "
+ f"builtin comparator from this list: {BUILTIN_COMPARATORS.keys()}"
+ )
+
+ # Importing a source file directly
+ spec = importlib.util.spec_from_file_location(name=file.name, location=comparator)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ members = inspect.getmembers(
+ module,
+ lambda c: inspect.isclass(c)
+ and issubclass(c, BasePerfComparator)
+ and c != BasePerfComparator,
+ )
+
+ if not members:
+ raise ComparatorNotFound(
+ f"The path {comparator} was found but it was not a valid comparator. "
+ f"Ensure it is a subclass of BasePerfComparator and optionally contains the "
+ f"following methods: "
+ f"{', '.join(inspect.getmembers(BasePerfComparator, predicate=inspect.ismethod))}"
+ )
+
+ return members[0][-1]
diff --git a/tools/tryselect/selectors/perfselector/utils.py b/tools/tryselect/selectors/perfselector/utils.py
new file mode 100644
index 0000000000..105d003091
--- /dev/null
+++ b/tools/tryselect/selectors/perfselector/utils.py
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+import sys
+
+REVISION_MATCHER = re.compile(r"remote:.*/try/rev/([\w]*)[ \t]*$")
+
+
+class LogProcessor:
+ def __init__(self):
+ self.buf = ""
+ self.stdout = sys.__stdout__
+ self._revision = None
+
+ @property
+ def revision(self):
+ return self._revision
+
+ def write(self, buf):
+ while buf:
+ try:
+ newline_index = buf.index("\n")
+ except ValueError:
+ # No newline, wait for next call
+ self.buf += buf
+ break
+
+ # Get data up to next newline and combine with previously buffered data
+ data = self.buf + buf[: newline_index + 1]
+ buf = buf[newline_index + 1 :]
+
+ # Reset buffer then output line
+ self.buf = ""
+ if data.strip() == "":
+ continue
+ self.stdout.write(data.strip("\n") + "\n")
+
+ # Check if a temporary commit wa created
+ match = REVISION_MATCHER.match(data)
+ if match:
+ # Last line found is the revision we want
+ self._revision = match.group(1)
diff --git a/tools/tryselect/selectors/preview.py b/tools/tryselect/selectors/preview.py
new file mode 100644
index 0000000000..1d232af9e0
--- /dev/null
+++ b/tools/tryselect/selectors/preview.py
@@ -0,0 +1,102 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""This script is intended to be called through fzf as a preview formatter."""
+
+
+import argparse
+import os
+import sys
+
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(0, os.path.join(os.path.dirname(here), "util"))
+from estimates import duration_summary
+
+
+def process_args():
+ """Process preview arguments."""
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument(
+ "-s",
+ "--show-estimates",
+ action="store_true",
+ help="Show task duration estimates (default: False)",
+ )
+ argparser.add_argument(
+ "-g",
+ "--graph-cache",
+ type=str,
+ default=None,
+ help="Filename of task graph dependencies",
+ )
+ argparser.add_argument(
+ "-c",
+ "--cache_dir",
+ type=str,
+ default=None,
+ help="Path to cache directory containing task durations",
+ )
+ argparser.add_argument(
+ "-t",
+ "--tasklist",
+ type=str,
+ default=None,
+ help="Path to temporary file containing the selected tasks",
+ )
+ return argparser.parse_args()
+
+
+def plain_display(taskfile):
+ """Original preview window display."""
+ with open(taskfile) as f:
+ tasklist = [line.strip() for line in f]
+ print("\n".join(sorted(tasklist)))
+
+
+def duration_display(graph_cache_file, taskfile, cache_dir):
+ """Preview window display with task durations + metadata."""
+ with open(taskfile) as f:
+ tasklist = [line.strip() for line in f]
+
+ durations = duration_summary(graph_cache_file, tasklist, cache_dir)
+ output = ""
+ max_columns = int(os.environ["FZF_PREVIEW_COLUMNS"])
+
+ output += "\nSelected tasks take {}\n".format(durations["selected_duration"])
+ output += "+{} dependencies, total {}\n".format(
+ durations["dependency_count"],
+ durations["selected_duration"] + durations["dependency_duration"],
+ )
+
+ if durations.get("percentile"):
+ output += "This is in the top {}% of requests\n".format(
+ 100 - durations["percentile"]
+ )
+
+ output += "Estimated finish in {} at {}".format(
+ durations["wall_duration_seconds"], durations["eta_datetime"].strftime("%H:%M")
+ )
+
+ duration_width = 5 # show five numbers at most.
+ output += "{:>{width}}\n".format("Duration", width=max_columns)
+ for task in tasklist:
+ duration = durations["task_durations"].get(task, 0.0)
+ output += "{:{align}{width}} {:{nalign}{nwidth}}s\n".format(
+ task,
+ duration,
+ align="<",
+ width=max_columns - (duration_width + 2), # 2: space and 's'
+ nalign=">",
+ nwidth=duration_width,
+ )
+
+ print(output)
+
+
+if __name__ == "__main__":
+ args = process_args()
+ if args.show_estimates and os.path.isdir(args.cache_dir):
+ duration_display(args.graph_cache, args.tasklist, args.cache_dir)
+ else:
+ plain_display(args.tasklist)
diff --git a/tools/tryselect/selectors/release.py b/tools/tryselect/selectors/release.py
new file mode 100644
index 0000000000..994bbe644d
--- /dev/null
+++ b/tools/tryselect/selectors/release.py
@@ -0,0 +1,159 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+
+import attr
+import yaml
+from mozilla_version.gecko import FirefoxVersion
+
+from ..cli import BaseTryParser
+from ..push import push_to_try, vcs
+
+TARGET_TASKS = {
+ "staging": "staging_release_builds",
+ "release-sim": "release_simulation",
+}
+
+
+def read_file(path):
+ with open(path) as fh:
+ return fh.read()
+
+
+class ReleaseParser(BaseTryParser):
+ name = "release"
+ arguments = [
+ [
+ ["-v", "--version"],
+ {
+ "metavar": "STR",
+ "required": True,
+ "action": "store",
+ "type": FirefoxVersion.parse,
+ "help": "The version number to use for the staging release.",
+ },
+ ],
+ [
+ ["--migration"],
+ {
+ "metavar": "STR",
+ "action": "append",
+ "dest": "migrations",
+ "choices": [
+ "central-to-beta",
+ "beta-to-release",
+ "early-to-late-beta",
+ "release-to-esr",
+ ],
+ "help": "Migration to run for the release (can be specified multiple times).",
+ },
+ ],
+ [
+ ["--no-limit-locales"],
+ {
+ "action": "store_false",
+ "dest": "limit_locales",
+ "help": "Don't build a limited number of locales in the staging release.",
+ },
+ ],
+ [
+ ["--tasks"],
+ {
+ "choices": TARGET_TASKS.keys(),
+ "default": "staging",
+ "help": "Which tasks to run on-push.",
+ },
+ ],
+ ]
+ common_groups = ["push"]
+ task_configs = ["disable-pgo", "worker-overrides"]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.set_defaults(migrations=[])
+
+
+def run(
+ version,
+ migrations,
+ limit_locales,
+ tasks,
+ try_config_params=None,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ app_version = attr.evolve(version, beta_number=None, is_esr=False)
+
+ files_to_change = {
+ "browser/config/version.txt": "{}\n".format(app_version),
+ "browser/config/version_display.txt": "{}\n".format(version),
+ "config/milestone.txt": "{}\n".format(app_version),
+ }
+ with open("browser/config/version.txt") as f:
+ current_version = FirefoxVersion.parse(f.read())
+ format_options = {
+ "current_major_version": current_version.major_number,
+ "next_major_version": version.major_number,
+ "current_weave_version": current_version.major_number + 2,
+ "next_weave_version": version.major_number + 2,
+ }
+
+ if "beta-to-release" in migrations and "early-to-late-beta" not in migrations:
+ migrations.append("early-to-late-beta")
+
+ release_type = version.version_type.name.lower()
+ if release_type not in ("beta", "release", "esr"):
+ raise Exception(
+ "Can't do staging release for version: {} type: {}".format(
+ version, version.version_type
+ )
+ )
+ elif release_type == "esr":
+ release_type += str(version.major_number)
+ task_config = {"version": 2, "parameters": try_config_params or {}}
+ task_config["parameters"].update(
+ {
+ "target_tasks_method": TARGET_TASKS[tasks],
+ "optimize_target_tasks": True,
+ "release_type": release_type,
+ }
+ )
+
+ with open(os.path.join(vcs.path, "taskcluster/ci/config.yml")) as f:
+ migration_configs = yaml.safe_load(f)
+ for migration in migrations:
+ migration_config = migration_configs["merge-automation"]["behaviors"][migration]
+ for path, from_, to in migration_config["replacements"]:
+ if path in files_to_change:
+ contents = files_to_change[path]
+ else:
+ contents = read_file(path)
+ from_ = from_.format(**format_options)
+ to = to.format(**format_options)
+ files_to_change[path] = contents.replace(from_, to)
+
+ if limit_locales:
+ files_to_change["browser/locales/l10n-changesets.json"] = read_file(
+ os.path.join(vcs.path, "browser/locales/l10n-onchange-changesets.json")
+ )
+ files_to_change["browser/locales/shipped-locales"] = "en-US\n" + read_file(
+ os.path.join(vcs.path, "browser/locales/onchange-locales")
+ )
+
+ msg = "staging release: {}".format(version)
+ return push_to_try(
+ "release",
+ message.format(msg=msg),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ try_task_config=task_config,
+ files_to_change=files_to_change,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/scriptworker.py b/tools/tryselect/selectors/scriptworker.py
new file mode 100644
index 0000000000..08020390c2
--- /dev/null
+++ b/tools/tryselect/selectors/scriptworker.py
@@ -0,0 +1,174 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import sys
+
+import requests
+from gecko_taskgraph.util.taskgraph import find_existing_tasks
+from taskgraph.parameters import Parameters
+from taskgraph.util.taskcluster import find_task_id, get_artifact, get_session
+
+from ..cli import BaseTryParser
+from ..push import push_to_try
+
+TASK_TYPES = {
+ "linux-signing": [
+ "build-signing-linux-shippable/opt",
+ "build-signing-linux64-shippable/opt",
+ "build-signing-win64-shippable/opt",
+ "build-signing-win32-shippable/opt",
+ "repackage-signing-win64-shippable/opt",
+ "repackage-signing-win32-shippable/opt",
+ "repackage-signing-msi-win32-shippable/opt",
+ "repackage-signing-msi-win64-shippable/opt",
+ "mar-signing-linux64-shippable/opt",
+ ],
+ "linux-signing-partial": ["partials-signing-linux64-shippable/opt"],
+ "mac-signing": ["build-signing-macosx64-shippable/opt"],
+ "beetmover-candidates": ["beetmover-repackage-linux64-shippable/opt"],
+ "bouncer-submit": ["release-bouncer-sub-firefox"],
+ "balrog-submit": [
+ "release-balrog-submit-toplevel-firefox",
+ "balrog-linux64-shippable/opt",
+ ],
+ "tree": ["release-early-tagging-firefox", "release-version-bump-firefox"],
+}
+
+RELEASE_TO_BRANCH = {
+ "beta": "releases/mozilla-beta",
+ "release": "releases/mozilla-release",
+}
+
+
+class ScriptworkerParser(BaseTryParser):
+ name = "scriptworker"
+ arguments = [
+ [
+ ["task_type"],
+ {
+ "choices": ["list"] + list(TASK_TYPES.keys()),
+ "metavar": "TASK-TYPE",
+ "help": "Scriptworker task types to run. (Use `list` to show possibilities)",
+ },
+ ],
+ [
+ ["--release-type"],
+ {
+ "choices": ["nightly"] + list(RELEASE_TO_BRANCH.keys()),
+ "default": "beta",
+ "help": "Release type to run",
+ },
+ ],
+ ]
+
+ common_groups = ["push"]
+ task_configs = ["worker-overrides", "routes"]
+
+
+def get_releases(branch):
+ response = requests.get(
+ "https://shipitapi-public.services.mozilla.com/releases",
+ params={"product": "firefox", "branch": branch, "status": "shipped"},
+ headers={"Accept": "application/json"},
+ )
+ response.raise_for_status()
+ return response.json()
+
+
+def get_release_graph(release):
+ for phase in release["phases"]:
+ if phase["name"] in ("ship_firefox",):
+ return phase["actionTaskId"]
+ raise Exception("No ship phase.")
+
+
+def get_nightly_graph():
+ return find_task_id(
+ "gecko.v2.mozilla-central.latest.taskgraph.decision-nightly-desktop"
+ )
+
+
+def print_available_task_types():
+ print("Available task types:")
+ for task_type, tasks in TASK_TYPES.items():
+ print(" " * 4 + "{}:".format(task_type))
+ for task in tasks:
+ print(" " * 8 + "- {}".format(task))
+
+
+def get_hg_file(parameters, path):
+ session = get_session()
+ response = session.get(parameters.file_url(path))
+ response.raise_for_status()
+ return response.content
+
+
+def run(
+ task_type,
+ release_type,
+ try_config_params=None,
+ stage_changes=False,
+ dry_run=False,
+ message="{msg}",
+ closed_tree=False,
+ push_to_lando=False,
+):
+ if task_type == "list":
+ print_available_task_types()
+ sys.exit(0)
+
+ if release_type == "nightly":
+ previous_graph = get_nightly_graph()
+ else:
+ release = get_releases(RELEASE_TO_BRANCH[release_type])[-1]
+ previous_graph = get_release_graph(release)
+ existing_tasks = find_existing_tasks([previous_graph])
+
+ previous_parameters = Parameters(
+ strict=False, **get_artifact(previous_graph, "public/parameters.yml")
+ )
+
+ # Copy L10n configuration from the commit the release we are using was
+ # based on. This *should* ensure that the chunking of L10n tasks is the
+ # same between graphs.
+ files_to_change = {
+ path: get_hg_file(previous_parameters, path)
+ for path in [
+ "browser/locales/l10n-changesets.json",
+ "browser/locales/shipped-locales",
+ ]
+ }
+
+ task_config = {"version": 2, "parameters": try_config_params or {}}
+ task_config["parameters"]["optimize_target_tasks"] = True
+ task_config["parameters"]["existing_tasks"] = existing_tasks
+ for param in (
+ "app_version",
+ "build_number",
+ "next_version",
+ "release_history",
+ "release_product",
+ "release_type",
+ "version",
+ ):
+ task_config["parameters"][param] = previous_parameters[param]
+
+ try_config = task_config["parameters"].setdefault("try_task_config", {})
+ try_config["tasks"] = TASK_TYPES[task_type]
+ for label in try_config["tasks"]:
+ if label in existing_tasks:
+ del existing_tasks[label]
+
+ msg = "scriptworker tests: {}".format(task_type)
+ return push_to_try(
+ "scriptworker",
+ message.format(msg=msg),
+ stage_changes=stage_changes,
+ dry_run=dry_run,
+ closed_tree=closed_tree,
+ try_task_config=task_config,
+ files_to_change=files_to_change,
+ push_to_lando=push_to_lando,
+ )
diff --git a/tools/tryselect/selectors/syntax.py b/tools/tryselect/selectors/syntax.py
new file mode 100644
index 0000000000..29b80f519a
--- /dev/null
+++ b/tools/tryselect/selectors/syntax.py
@@ -0,0 +1,708 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import re
+import sys
+from collections import defaultdict
+
+import mozpack.path as mozpath
+from moztest.resolve import TestResolver
+
+from ..cli import BaseTryParser
+from ..push import build, push_to_try
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+class SyntaxParser(BaseTryParser):
+ name = "syntax"
+ arguments = [
+ [
+ ["paths"],
+ {
+ "nargs": "*",
+ "default": [],
+ "help": "Paths to search for tests to run on try.",
+ },
+ ],
+ [
+ ["-b", "--build"],
+ {
+ "dest": "builds",
+ "default": "do",
+ "help": "Build types to run (d for debug, o for optimized).",
+ },
+ ],
+ [
+ ["-p", "--platform"],
+ {
+ "dest": "platforms",
+ "action": "append",
+ "help": "Platforms to run (required if not found in the environment as "
+ "AUTOTRY_PLATFORM_HINT).",
+ },
+ ],
+ [
+ ["-u", "--unittests"],
+ {
+ "dest": "tests",
+ "action": "append",
+ "help": "Test suites to run in their entirety.",
+ },
+ ],
+ [
+ ["-t", "--talos"],
+ {
+ "action": "append",
+ "help": "Talos suites to run.",
+ },
+ ],
+ [
+ ["-j", "--jobs"],
+ {
+ "action": "append",
+ "help": "Job tasks to run.",
+ },
+ ],
+ [
+ ["--tag"],
+ {
+ "dest": "tags",
+ "action": "append",
+ "help": "Restrict tests to the given tag (may be specified multiple times).",
+ },
+ ],
+ [
+ ["--and"],
+ {
+ "action": "store_true",
+ "dest": "intersection",
+ "help": "When -u and paths are supplied run only the intersection of the "
+ "tests specified by the two arguments.",
+ },
+ ],
+ [
+ ["--no-artifact"],
+ {
+ "action": "store_true",
+ "help": "Disable artifact builds even if --enable-artifact-builds is set "
+ "in the mozconfig.",
+ },
+ ],
+ [
+ ["-v", "--verbose"],
+ {
+ "dest": "verbose",
+ "action": "store_true",
+ "default": False,
+ "help": "Print detailed information about the resulting test selection "
+ "and commands performed.",
+ },
+ ],
+ ]
+
+ # Arguments we will accept on the command line and pass through to try
+ # syntax with no further intervention. The set is taken from
+ # http://trychooser.pub.build.mozilla.org with a few additions.
+ #
+ # Note that the meaning of store_false and store_true arguments is
+ # not preserved here, as we're only using these to echo the literal
+ # arguments to another consumer. Specifying either store_false or
+ # store_true here will have an equivalent effect.
+ pass_through_arguments = {
+ "--rebuild": {
+ "action": "store",
+ "dest": "rebuild",
+ "help": "Re-trigger all test jobs (up to 20 times)",
+ },
+ "--rebuild-talos": {
+ "action": "store",
+ "dest": "rebuild_talos",
+ "help": "Re-trigger all talos jobs",
+ },
+ "--interactive": {
+ "action": "store_true",
+ "dest": "interactive",
+ "help": "Allow ssh-like access to running test containers",
+ },
+ "--no-retry": {
+ "action": "store_true",
+ "dest": "no_retry",
+ "help": "Do not retrigger failed tests",
+ },
+ "--setenv": {
+ "action": "append",
+ "dest": "setenv",
+ "help": "Set the corresponding variable in the test environment for "
+ "applicable harnesses.",
+ },
+ "-f": {
+ "action": "store_true",
+ "dest": "failure_emails",
+ "help": "Request failure emails only",
+ },
+ "--failure-emails": {
+ "action": "store_true",
+ "dest": "failure_emails",
+ "help": "Request failure emails only",
+ },
+ "-e": {
+ "action": "store_true",
+ "dest": "all_emails",
+ "help": "Request all emails",
+ },
+ "--all-emails": {
+ "action": "store_true",
+ "dest": "all_emails",
+ "help": "Request all emails",
+ },
+ "--artifact": {
+ "action": "store_true",
+ "dest": "artifact",
+ "help": "Force artifact builds where possible.",
+ },
+ "--upload-xdbs": {
+ "action": "store_true",
+ "dest": "upload_xdbs",
+ "help": "Upload XDB compilation db files generated by hazard build",
+ },
+ }
+ task_configs = []
+
+ def __init__(self, *args, **kwargs):
+ BaseTryParser.__init__(self, *args, **kwargs)
+
+ group = self.add_argument_group("pass-through arguments")
+ for arg, opts in self.pass_through_arguments.items():
+ group.add_argument(arg, **opts)
+
+
+class TryArgumentTokenizer:
+ symbols = [
+ ("separator", ","),
+ ("list_start", r"\["),
+ ("list_end", r"\]"),
+ ("item", r"([^,\[\]\s][^,\[\]]+)"),
+ ("space", r"\s+"),
+ ]
+ token_re = re.compile("|".join("(?P<%s>%s)" % item for item in symbols))
+
+ def tokenize(self, data):
+ for match in self.token_re.finditer(data):
+ symbol = match.lastgroup
+ data = match.group(symbol)
+ if symbol == "space":
+ pass
+ else:
+ yield symbol, data
+
+
+class TryArgumentParser:
+ """Simple three-state parser for handling expressions
+ of the from "foo[sub item, another], bar,baz". This takes
+ input from the TryArgumentTokenizer and runs through a small
+ state machine, returning a dictionary of {top-level-item:[sub_items]}
+ i.e. the above would result in
+ {"foo":["sub item", "another"], "bar": [], "baz": []}
+ In the case of invalid input a ValueError is raised."""
+
+ EOF = object()
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.tokens = None
+ self.current_item = None
+ self.data = {}
+ self.token = None
+ self.state = None
+
+ def parse(self, tokens):
+ self.reset()
+ self.tokens = tokens
+ self.consume()
+ self.state = self.item_state
+ while self.token[0] != self.EOF:
+ self.state()
+ return self.data
+
+ def consume(self):
+ try:
+ self.token = next(self.tokens)
+ except StopIteration:
+ self.token = (self.EOF, None)
+
+ def expect(self, *types):
+ if self.token[0] not in types:
+ raise ValueError(
+ "Error parsing try string, unexpected %s" % (self.token[0])
+ )
+
+ def item_state(self):
+ self.expect("item")
+ value = self.token[1].strip()
+ if value not in self.data:
+ self.data[value] = []
+ self.current_item = value
+ self.consume()
+ if self.token[0] == "separator":
+ self.consume()
+ elif self.token[0] == "list_start":
+ self.consume()
+ self.state = self.subitem_state
+ elif self.token[0] == self.EOF:
+ pass
+ else:
+ raise ValueError
+
+ def subitem_state(self):
+ self.expect("item")
+ value = self.token[1].strip()
+ self.data[self.current_item].append(value)
+ self.consume()
+ if self.token[0] == "separator":
+ self.consume()
+ elif self.token[0] == "list_end":
+ self.consume()
+ self.state = self.after_list_end_state
+ else:
+ raise ValueError
+
+ def after_list_end_state(self):
+ self.expect("separator")
+ self.consume()
+ self.state = self.item_state
+
+
+def parse_arg(arg):
+ tokenizer = TryArgumentTokenizer()
+ parser = TryArgumentParser()
+ return parser.parse(tokenizer.tokenize(arg))
+
+
+class AutoTry:
+ # Maps from flavors to the job names needed to run that flavour
+ flavor_jobs = {
+ "mochitest": ["mochitest-1", "mochitest-e10s-1"],
+ "xpcshell": ["xpcshell"],
+ "chrome": ["mochitest-o"],
+ "browser-a11y": ["mochitest-ba"],
+ "browser-media": ["mochitest-bmda"],
+ "browser-chrome": [
+ "mochitest-browser-chrome-1",
+ "mochitest-e10s-browser-chrome-1",
+ "mochitest-browser-chrome-e10s-1",
+ ],
+ "devtools-chrome": [
+ "mochitest-devtools-chrome-1",
+ "mochitest-e10s-devtools-chrome-1",
+ "mochitest-devtools-chrome-e10s-1",
+ ],
+ "crashtest": ["crashtest", "crashtest-e10s"],
+ "reftest": ["reftest", "reftest-e10s"],
+ "remote": ["mochitest-remote"],
+ "web-platform-tests": ["web-platform-tests-1"],
+ }
+
+ flavor_suites = {
+ "mochitest": "mochitests",
+ "xpcshell": "xpcshell",
+ "chrome": "mochitest-o",
+ "browser-chrome": "mochitest-bc",
+ "browser-a11y": "mochitest-ba",
+ "browser-media": "mochitest-bmda",
+ "devtools-chrome": "mochitest-dt",
+ "crashtest": "crashtest",
+ "reftest": "reftest",
+ "web-platform-tests": "web-platform-tests",
+ }
+
+ compiled_suites = [
+ "cppunit",
+ "gtest",
+ "jittest",
+ ]
+
+ common_suites = [
+ "cppunit",
+ "crashtest",
+ "firefox-ui-functional",
+ "geckoview",
+ "geckoview-junit",
+ "gtest",
+ "jittest",
+ "jsreftest",
+ "marionette",
+ "marionette-e10s",
+ "mochitests",
+ "reftest",
+ "robocop",
+ "web-platform-tests",
+ "xpcshell",
+ ]
+
+ def __init__(self):
+ self.topsrcdir = build.topsrcdir
+ self._resolver = None
+
+ @property
+ def resolver(self):
+ if self._resolver is None:
+ self._resolver = TestResolver.from_environment(cwd=here)
+ return self._resolver
+
+ @classmethod
+ def split_try_string(cls, data):
+ return re.findall(r"(?:\[.*?\]|\S)+", data)
+
+ def paths_by_flavor(self, paths=None, tags=None):
+ paths_by_flavor = defaultdict(set)
+
+ if not (paths or tags):
+ return dict(paths_by_flavor)
+
+ tests = list(self.resolver.resolve_tests(paths=paths, tags=tags))
+
+ for t in tests:
+ if t["flavor"] in self.flavor_suites:
+ flavor = t["flavor"]
+ if "subsuite" in t and t["subsuite"] == "devtools":
+ flavor = "devtools-chrome"
+
+ if "subsuite" in t and t["subsuite"] == "a11y":
+ flavor = "browser-a11y"
+
+ if "subsuite" in t and t["subsuite"] == "media-bc":
+ flavor = "browser-media"
+
+ if flavor in ["crashtest", "reftest"]:
+ manifest_relpath = os.path.relpath(t["manifest"], self.topsrcdir)
+ paths_by_flavor[flavor].add(os.path.dirname(manifest_relpath))
+ elif "dir_relpath" in t:
+ paths_by_flavor[flavor].add(t["dir_relpath"])
+ else:
+ file_relpath = os.path.relpath(t["path"], self.topsrcdir)
+ dir_relpath = os.path.dirname(file_relpath)
+ paths_by_flavor[flavor].add(dir_relpath)
+
+ for flavor, path_set in paths_by_flavor.items():
+ paths_by_flavor[flavor] = self.deduplicate_prefixes(path_set, paths)
+
+ return dict(paths_by_flavor)
+
+ def deduplicate_prefixes(self, path_set, input_paths):
+ # Removes paths redundant to test selection in the given path set.
+ # If a path was passed on the commandline that is the prefix of a
+ # path in our set, we only need to include the specified prefix to
+ # run the intended tests (every test in "layout/base" will run if
+ # "layout" is passed to the reftest harness).
+ removals = set()
+ additions = set()
+
+ for path in path_set:
+ full_path = path
+ while path:
+ path, _ = os.path.split(path)
+ if path in input_paths:
+ removals.add(full_path)
+ additions.add(path)
+
+ return additions | (path_set - removals)
+
+ def remove_duplicates(self, paths_by_flavor, tests):
+ rv = {}
+ for item in paths_by_flavor:
+ if self.flavor_suites[item] not in tests:
+ rv[item] = paths_by_flavor[item].copy()
+ return rv
+
+ def calc_try_syntax(
+ self,
+ platforms,
+ tests,
+ talos,
+ jobs,
+ builds,
+ paths_by_flavor,
+ tags,
+ extras,
+ intersection,
+ ):
+ parts = ["try:"]
+
+ if platforms:
+ parts.extend(["-b", builds, "-p", ",".join(platforms)])
+
+ suites = tests if not intersection else {}
+ paths = set()
+ for flavor, flavor_tests in paths_by_flavor.items():
+ suite = self.flavor_suites[flavor]
+ if suite not in suites and (not intersection or suite in tests):
+ for job_name in self.flavor_jobs[flavor]:
+ for test in flavor_tests:
+ paths.add("{}:{}".format(flavor, test))
+ suites[job_name] = tests.get(suite, [])
+
+ # intersection implies tests are expected
+ if intersection and not suites:
+ raise ValueError("No tests found matching filters")
+
+ if extras.get("artifact") and any([p.endswith("-nightly") for p in platforms]):
+ print(
+ 'You asked for |--artifact| but "-nightly" platforms don\'t have artifacts. '
+ "Running without |--artifact| instead."
+ )
+ del extras["artifact"]
+
+ if extras.get("artifact"):
+ rejected = []
+ for suite in suites.keys():
+ if any([suite.startswith(c) for c in self.compiled_suites]):
+ rejected.append(suite)
+ if rejected:
+ raise ValueError(
+ "You can't run {} with "
+ "--artifact option.".format(", ".join(rejected))
+ )
+
+ if extras.get("artifact") and "all" in suites.keys():
+ non_compiled_suites = set(self.common_suites) - set(self.compiled_suites)
+ message = (
+ "You asked for |-u all| with |--artifact| but compiled-code tests ({tests})"
+ " can't run against an artifact build. Running (-u {non_compiled_suites}) "
+ "instead."
+ )
+ string_format = {
+ "tests": ",".join(self.compiled_suites),
+ "non_compiled_suites": ",".join(non_compiled_suites),
+ }
+ print(message.format(**string_format))
+ del suites["all"]
+ suites.update({suite_name: None for suite_name in non_compiled_suites})
+
+ if suites:
+ parts.append("-u")
+ parts.append(
+ ",".join(
+ "{}{}".format(k, "[%s]" % ",".join(v) if v else "")
+ for k, v in sorted(suites.items())
+ )
+ )
+
+ if talos:
+ parts.append("-t")
+ parts.append(
+ ",".join(
+ "{}{}".format(k, "[%s]" % ",".join(v) if v else "")
+ for k, v in sorted(talos.items())
+ )
+ )
+
+ if jobs:
+ parts.append("-j")
+ parts.append(",".join(jobs))
+
+ if tags:
+ parts.append(" ".join("--tag %s" % t for t in tags))
+
+ if paths:
+ parts.append("--try-test-paths %s" % " ".join(sorted(paths)))
+
+ args_by_dest = {
+ v["dest"]: k for k, v in SyntaxParser.pass_through_arguments.items()
+ }
+ for dest, value in extras.items():
+ assert dest in args_by_dest
+ arg = args_by_dest[dest]
+ action = SyntaxParser.pass_through_arguments[arg]["action"]
+ if action == "store":
+ parts.append(arg)
+ parts.append(value)
+ if action == "append":
+ for e in value:
+ parts.append(arg)
+ parts.append(e)
+ if action in ("store_true", "store_false"):
+ parts.append(arg)
+
+ return " ".join(parts)
+
+ def normalise_list(self, items, allow_subitems=False):
+ rv = defaultdict(list)
+ for item in items:
+ parsed = parse_arg(item)
+ for key, values in parsed.items():
+ rv[key].extend(values)
+
+ if not allow_subitems:
+ if not all(item == [] for item in rv.values()):
+ raise ValueError("Unexpected subitems in argument")
+ return rv.keys()
+ else:
+ return rv
+
+ def validate_args(self, **kwargs):
+ tests_selected = kwargs["tests"] or kwargs["paths"] or kwargs["tags"]
+ if kwargs["platforms"] is None and (kwargs["jobs"] is None or tests_selected):
+ if "AUTOTRY_PLATFORM_HINT" in os.environ:
+ kwargs["platforms"] = [os.environ["AUTOTRY_PLATFORM_HINT"]]
+ elif tests_selected:
+ print("Must specify platform when selecting tests.")
+ sys.exit(1)
+ else:
+ print(
+ "Either platforms or jobs must be specified as an argument to autotry."
+ )
+ sys.exit(1)
+
+ try:
+ platforms = (
+ self.normalise_list(kwargs["platforms"]) if kwargs["platforms"] else {}
+ )
+ except ValueError as e:
+ print("Error parsing -p argument:\n%s" % e)
+ sys.exit(1)
+
+ try:
+ tests = (
+ self.normalise_list(kwargs["tests"], allow_subitems=True)
+ if kwargs["tests"]
+ else {}
+ )
+ except ValueError as e:
+ print("Error parsing -u argument ({}):\n{}".format(kwargs["tests"], e))
+ sys.exit(1)
+
+ try:
+ talos = (
+ self.normalise_list(kwargs["talos"], allow_subitems=True)
+ if kwargs["talos"]
+ else []
+ )
+ except ValueError as e:
+ print("Error parsing -t argument:\n%s" % e)
+ sys.exit(1)
+
+ try:
+ jobs = self.normalise_list(kwargs["jobs"]) if kwargs["jobs"] else {}
+ except ValueError as e:
+ print("Error parsing -j argument:\n%s" % e)
+ sys.exit(1)
+
+ paths = []
+ for p in kwargs["paths"]:
+ p = mozpath.normpath(os.path.abspath(p))
+ if not (os.path.isdir(p) and p.startswith(self.topsrcdir)):
+ print(
+ 'Specified path "%s" is not a directory under the srcdir,'
+ " unable to specify tests outside of the srcdir" % p
+ )
+ sys.exit(1)
+ if len(p) <= len(self.topsrcdir):
+ print(
+ 'Specified path "%s" is at the top of the srcdir and would'
+ " select all tests." % p
+ )
+ sys.exit(1)
+ paths.append(os.path.relpath(p, self.topsrcdir))
+
+ try:
+ tags = self.normalise_list(kwargs["tags"]) if kwargs["tags"] else []
+ except ValueError as e:
+ print("Error parsing --tags argument:\n%s" % e)
+ sys.exit(1)
+
+ extra_values = {k["dest"] for k in SyntaxParser.pass_through_arguments.values()}
+ extra_args = {k: v for k, v in kwargs.items() if k in extra_values and v}
+
+ return kwargs["builds"], platforms, tests, talos, jobs, paths, tags, extra_args
+
+ def run(self, **kwargs):
+ if not any(kwargs[item] for item in ("paths", "tests", "tags")):
+ kwargs["paths"] = set()
+ kwargs["tags"] = set()
+
+ builds, platforms, tests, talos, jobs, paths, tags, extra = self.validate_args(
+ **kwargs
+ )
+
+ if paths or tags:
+ paths = [
+ os.path.relpath(os.path.normpath(os.path.abspath(item)), self.topsrcdir)
+ for item in paths
+ ]
+ paths_by_flavor = self.paths_by_flavor(paths=paths, tags=tags)
+
+ if not paths_by_flavor and not tests:
+ print(
+ "No tests were found when attempting to resolve paths:\n\n\t%s"
+ % paths
+ )
+ sys.exit(1)
+
+ if not kwargs["intersection"]:
+ paths_by_flavor = self.remove_duplicates(paths_by_flavor, tests)
+ else:
+ paths_by_flavor = {}
+
+ # No point in dealing with artifacts if we aren't running any builds
+ local_artifact_build = False
+ if platforms:
+ local_artifact_build = kwargs.get("local_artifact_build", False)
+
+ # Add --artifact if --enable-artifact-builds is set ...
+ if local_artifact_build:
+ extra["artifact"] = True
+ # ... unless --no-artifact is explicitly given.
+ if kwargs["no_artifact"]:
+ if "artifact" in extra:
+ del extra["artifact"]
+
+ try:
+ msg = self.calc_try_syntax(
+ platforms,
+ tests,
+ talos,
+ jobs,
+ builds,
+ paths_by_flavor,
+ tags,
+ extra,
+ kwargs["intersection"],
+ )
+ except ValueError as e:
+ print(e)
+ sys.exit(1)
+
+ if local_artifact_build and not kwargs["no_artifact"]:
+ print(
+ "mozconfig has --enable-artifact-builds; including "
+ "--artifact flag in try syntax (use --no-artifact "
+ "to override)"
+ )
+
+ if kwargs["verbose"] and paths_by_flavor:
+ print("The following tests will be selected: ")
+ for flavor, paths in paths_by_flavor.items():
+ print("{}: {}".format(flavor, ",".join(paths)))
+
+ if kwargs["verbose"]:
+ print("The following try syntax was calculated:\n%s" % msg)
+
+ push_to_try(
+ "syntax",
+ kwargs["message"].format(msg=msg),
+ stage_changes=kwargs["stage_changes"],
+ dry_run=kwargs["dry_run"],
+ closed_tree=kwargs["closed_tree"],
+ push_to_lando=kwargs["push_to_lando"],
+ )
+
+
+def run(**kwargs):
+ at = AutoTry()
+ return at.run(**kwargs)
diff --git a/tools/tryselect/task_config.py b/tools/tryselect/task_config.py
new file mode 100644
index 0000000000..f7a78cbfbf
--- /dev/null
+++ b/tools/tryselect/task_config.py
@@ -0,0 +1,642 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Templates provide a way of modifying the task definition of selected tasks.
+They are added to 'try_task_config.json' and processed by the transforms.
+"""
+
+
+import json
+import os
+import pathlib
+import subprocess
+import sys
+from abc import ABCMeta, abstractmethod, abstractproperty
+from argparse import SUPPRESS, Action
+from contextlib import contextmanager
+from textwrap import dedent
+
+import mozpack.path as mozpath
+import requests
+import six
+from mozbuild.base import BuildEnvironmentNotFoundException, MozbuildObject
+from mozversioncontrol import Repository
+from taskgraph.util import taskcluster
+
+from .tasks import resolve_tests_by_suite
+from .util.ssh import get_ssh_user
+
+here = pathlib.Path(__file__).parent
+build = MozbuildObject.from_environment(cwd=str(here))
+
+
+@contextmanager
+def try_config_commit(vcs: Repository, commit_message: str):
+ """Context manager that creates and removes a try config commit."""
+ # Add the `try_task_config.json` file if it exists.
+ try_task_config_path = pathlib.Path(build.topsrcdir) / "try_task_config.json"
+ if try_task_config_path.exists():
+ vcs.add_remove_files("try_task_config.json")
+
+ try:
+ # Create a try config commit.
+ vcs.create_try_commit(commit_message)
+
+ yield
+ finally:
+ # Revert the try config commit.
+ vcs.remove_current_commit()
+
+
+class ParameterConfig:
+ __metaclass__ = ABCMeta
+
+ def __init__(self):
+ self.dests = set()
+
+ def add_arguments(self, parser):
+ for cli, kwargs in self.arguments:
+ action = parser.add_argument(*cli, **kwargs)
+ self.dests.add(action.dest)
+
+ @abstractproperty
+ def arguments(self):
+ pass
+
+ @abstractmethod
+ def get_parameters(self, **kwargs) -> dict:
+ pass
+
+ def validate(self, **kwargs):
+ pass
+
+
+class TryConfig(ParameterConfig):
+ @abstractmethod
+ def try_config(self, **kwargs) -> dict:
+ pass
+
+ def get_parameters(self, **kwargs):
+ result = self.try_config(**kwargs)
+ if result is None:
+ return None
+ return {"try_task_config": result}
+
+
+class Artifact(TryConfig):
+ arguments = [
+ [
+ ["--artifact"],
+ {"action": "store_true", "help": "Force artifact builds where possible."},
+ ],
+ [
+ ["--no-artifact"],
+ {
+ "action": "store_true",
+ "help": "Disable artifact builds even if being used locally.",
+ },
+ ],
+ ]
+
+ def add_arguments(self, parser):
+ group = parser.add_mutually_exclusive_group()
+ return super().add_arguments(group)
+
+ @classmethod
+ def is_artifact_build(cls):
+ try:
+ return build.substs.get("MOZ_ARTIFACT_BUILDS", False)
+ except BuildEnvironmentNotFoundException:
+ return False
+
+ def try_config(self, artifact, no_artifact, **kwargs):
+ if artifact:
+ return {"use-artifact-builds": True, "disable-pgo": True}
+
+ if no_artifact:
+ return
+
+ if self.is_artifact_build():
+ print("Artifact builds enabled, pass --no-artifact to disable")
+ return {"use-artifact-builds": True, "disable-pgo": True}
+
+
+class Pernosco(TryConfig):
+ arguments = [
+ [
+ ["--pernosco"],
+ {
+ "action": "store_true",
+ "default": None,
+ "help": "Opt-in to analysis by the Pernosco debugging service.",
+ },
+ ],
+ [
+ ["--no-pernosco"],
+ {
+ "dest": "pernosco",
+ "action": "store_false",
+ "default": None,
+ "help": "Opt-out of the Pernosco debugging service (if you are on the include list).",
+ },
+ ],
+ ]
+
+ def add_arguments(self, parser):
+ group = parser.add_mutually_exclusive_group()
+ return super().add_arguments(group)
+
+ def try_config(self, pernosco, **kwargs):
+ if pernosco is None:
+ return
+
+ if pernosco:
+ try:
+ # The Pernosco service currently requires a Mozilla e-mail address to
+ # log in. Prevent people with non-Mozilla addresses from using this
+ # flag so they don't end up consuming time and resources only to
+ # realize they can't actually log in and see the reports.
+ address = get_ssh_user()
+ if not address.endswith("@mozilla.com"):
+ print(
+ dedent(
+ """\
+ Pernosco requires a Mozilla e-mail address to view its reports. Please
+ push to try with an @mozilla.com address to use --pernosco.
+
+ Current user: {}
+ """.format(
+ address
+ )
+ )
+ )
+ sys.exit(1)
+
+ except (subprocess.CalledProcessError, IndexError):
+ print("warning: failed to detect current user for 'hg.mozilla.org'")
+ print("Pernosco requires a Mozilla e-mail address to view its reports.")
+ while True:
+ answer = input(
+ "Do you have an @mozilla.com address? [Y/n]: "
+ ).lower()
+ if answer == "n":
+ sys.exit(1)
+ elif answer == "y":
+ break
+
+ return {
+ "env": {
+ "PERNOSCO": str(int(pernosco)),
+ }
+ }
+
+ def validate(self, **kwargs):
+ try_config = kwargs["try_config_params"].get("try_task_config") or {}
+ if try_config.get("use-artifact-builds"):
+ print(
+ "Pernosco does not support artifact builds at this time. "
+ "Please try again with '--no-artifact'."
+ )
+ sys.exit(1)
+
+
+class Path(TryConfig):
+ arguments = [
+ [
+ ["paths"],
+ {
+ "nargs": "*",
+ "default": [],
+ "help": "Run tasks containing tests under the specified path(s).",
+ },
+ ],
+ ]
+
+ def try_config(self, paths, **kwargs):
+ if not paths:
+ return
+
+ for p in paths:
+ if not os.path.exists(p):
+ print("error: '{}' is not a valid path.".format(p), file=sys.stderr)
+ sys.exit(1)
+
+ paths = [
+ mozpath.relpath(mozpath.join(os.getcwd(), p), build.topsrcdir)
+ for p in paths
+ ]
+ return {
+ "env": {
+ "MOZHARNESS_TEST_PATHS": six.ensure_text(
+ json.dumps(resolve_tests_by_suite(paths))
+ ),
+ }
+ }
+
+
+class Environment(TryConfig):
+ arguments = [
+ [
+ ["--env"],
+ {
+ "action": "append",
+ "default": None,
+ "help": "Set an environment variable, of the form FOO=BAR. "
+ "Can be passed in multiple times.",
+ },
+ ],
+ ]
+
+ def try_config(self, env, **kwargs):
+ if not env:
+ return
+ return {
+ "env": dict(e.split("=", 1) for e in env),
+ }
+
+
+class ExistingTasks(ParameterConfig):
+ TREEHERDER_PUSH_ENDPOINT = (
+ "https://treeherder.mozilla.org/api/project/try/push/?count=1&author={user}"
+ )
+ TREEHERDER_PUSH_URL = (
+ "https://treeherder.mozilla.org/jobs?repo={branch}&revision={revision}"
+ )
+
+ arguments = [
+ [
+ ["-E", "--use-existing-tasks"],
+ {
+ "const": "last_try_push",
+ "default": None,
+ "nargs": "?",
+ "help": """
+ Use existing tasks from a previous push. Without args this
+ uses your most recent try push. You may also specify
+ `rev=<revision>` where <revision> is the head revision of the
+ try push or `task-id=<task id>` where <task id> is the Decision
+ task id of the push. This last method even works for non-try
+ branches.
+ """,
+ },
+ ]
+ ]
+
+ def find_decision_task(self, use_existing_tasks):
+ branch = "try"
+ if use_existing_tasks == "last_try_push":
+ # Use existing tasks from user's previous try push.
+ user = get_ssh_user()
+ url = self.TREEHERDER_PUSH_ENDPOINT.format(user=user)
+ res = requests.get(url, headers={"User-Agent": "gecko-mach-try/1.0"})
+ res.raise_for_status()
+ data = res.json()
+ if data["meta"]["count"] == 0:
+ raise Exception(f"Could not find a try push for '{user}'!")
+ revision = data["results"][0]["revision"]
+
+ elif use_existing_tasks.startswith("rev="):
+ revision = use_existing_tasks[len("rev=") :]
+
+ else:
+ raise Exception("Unable to parse '{use_existing_tasks}'!")
+
+ url = self.TREEHERDER_PUSH_URL.format(branch=branch, revision=revision)
+ print(f"Using existing tasks from: {url}")
+ index_path = f"gecko.v2.{branch}.revision.{revision}.taskgraph.decision"
+ return taskcluster.find_task_id(index_path)
+
+ def get_parameters(self, use_existing_tasks, **kwargs):
+ if not use_existing_tasks:
+ return
+
+ if use_existing_tasks.startswith("task-id="):
+ tid = use_existing_tasks[len("task-id=") :]
+ else:
+ tid = self.find_decision_task(use_existing_tasks)
+
+ label_to_task_id = taskcluster.get_artifact(tid, "public/label-to-taskid.json")
+ return {"existing_tasks": label_to_task_id}
+
+
+class RangeAction(Action):
+ def __init__(self, min, max, *args, **kwargs):
+ self.min = min
+ self.max = max
+ kwargs["metavar"] = "[{}-{}]".format(self.min, self.max)
+ super().__init__(*args, **kwargs)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ name = option_string or self.dest
+ if values < self.min:
+ parser.error("{} can not be less than {}".format(name, self.min))
+ if values > self.max:
+ parser.error("{} can not be more than {}".format(name, self.max))
+ setattr(namespace, self.dest, values)
+
+
+class Rebuild(TryConfig):
+ arguments = [
+ [
+ ["--rebuild"],
+ {
+ "action": RangeAction,
+ "min": 2,
+ "max": 20,
+ "default": None,
+ "type": int,
+ "help": "Rebuild all selected tasks the specified number of times.",
+ },
+ ],
+ ]
+
+ def try_config(self, rebuild, **kwargs):
+ if not rebuild:
+ return
+
+ if (
+ not kwargs.get("new_test_config", False)
+ and kwargs.get("full")
+ and rebuild > 3
+ ):
+ print(
+ "warning: limiting --rebuild to 3 when using --full. "
+ "Use custom push actions to add more."
+ )
+ rebuild = 3
+
+ return {
+ "rebuild": rebuild,
+ }
+
+
+class Routes(TryConfig):
+ arguments = [
+ [
+ ["--route"],
+ {
+ "action": "append",
+ "dest": "routes",
+ "help": (
+ "Additional route to add to the tasks "
+ "(note: these will not be added to the decision task)"
+ ),
+ },
+ ],
+ ]
+
+ def try_config(self, routes, **kwargs):
+ if routes:
+ return {
+ "routes": routes,
+ }
+
+
+class ChemspillPrio(TryConfig):
+ arguments = [
+ [
+ ["--chemspill-prio"],
+ {
+ "action": "store_true",
+ "help": "Run at a higher priority than most try jobs (chemspills only).",
+ },
+ ],
+ ]
+
+ def try_config(self, chemspill_prio, **kwargs):
+ if chemspill_prio:
+ return {"chemspill-prio": True}
+
+
+class GeckoProfile(TryConfig):
+ arguments = [
+ [
+ ["--gecko-profile"],
+ {
+ "dest": "profile",
+ "action": "store_true",
+ "default": False,
+ "help": "Create and upload a gecko profile during talos/raptor tasks.",
+ },
+ ],
+ [
+ ["--gecko-profile-interval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": float,
+ "help": "How frequently to take samples (ms)",
+ },
+ ],
+ [
+ ["--gecko-profile-entries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": int,
+ "help": "How many samples to take with the profiler",
+ },
+ ],
+ [
+ ["--gecko-profile-features"],
+ {
+ "dest": "gecko_profile_features",
+ "type": str,
+ "default": None,
+ "help": "Set the features enabled for the profiler.",
+ },
+ ],
+ [
+ ["--gecko-profile-threads"],
+ {
+ "dest": "gecko_profile_threads",
+ "type": str,
+ "help": "Comma-separated list of threads to sample.",
+ },
+ ],
+ # For backwards compatibility
+ [
+ ["--talos-profile"],
+ {
+ "dest": "profile",
+ "action": "store_true",
+ "default": False,
+ "help": SUPPRESS,
+ },
+ ],
+ # This is added for consistency with the 'syntax' selector
+ [
+ ["--geckoProfile"],
+ {
+ "dest": "profile",
+ "action": "store_true",
+ "default": False,
+ "help": SUPPRESS,
+ },
+ ],
+ ]
+
+ def try_config(
+ self,
+ profile,
+ gecko_profile_interval,
+ gecko_profile_entries,
+ gecko_profile_features,
+ gecko_profile_threads,
+ **kwargs,
+ ):
+ if profile or not all(
+ s is None for s in (gecko_profile_features, gecko_profile_threads)
+ ):
+ cfg = {
+ "gecko-profile": True,
+ "gecko-profile-interval": gecko_profile_interval,
+ "gecko-profile-entries": gecko_profile_entries,
+ "gecko-profile-features": gecko_profile_features,
+ "gecko-profile-threads": gecko_profile_threads,
+ }
+ return {key: value for key, value in cfg.items() if value is not None}
+
+
+class Browsertime(TryConfig):
+ arguments = [
+ [
+ ["--browsertime"],
+ {
+ "action": "store_true",
+ "help": "Use browsertime during Raptor tasks.",
+ },
+ ],
+ ]
+
+ def try_config(self, browsertime, **kwargs):
+ if browsertime:
+ return {
+ "browsertime": True,
+ }
+
+
+class DisablePgo(TryConfig):
+ arguments = [
+ [
+ ["--disable-pgo"],
+ {
+ "action": "store_true",
+ "help": "Don't run PGO builds",
+ },
+ ],
+ ]
+
+ def try_config(self, disable_pgo, **kwargs):
+ if disable_pgo:
+ return {
+ "disable-pgo": True,
+ }
+
+
+class NewConfig(TryConfig):
+ arguments = [
+ [
+ ["--new-test-config"],
+ {
+ "action": "store_true",
+ "help": "When a test fails (mochitest only) restart the browser and start from the next test",
+ },
+ ],
+ ]
+
+ def try_config(self, new_test_config, **kwargs):
+ if new_test_config:
+ return {
+ "new-test-config": True,
+ }
+
+
+class WorkerOverrides(TryConfig):
+ arguments = [
+ [
+ ["--worker-override"],
+ {
+ "action": "append",
+ "dest": "worker_overrides",
+ "help": (
+ "Override the worker pool used for a given taskgraph worker alias. "
+ "The argument should be `<alias>=<worker-pool>`. "
+ "Can be specified multiple times."
+ ),
+ },
+ ],
+ [
+ ["--worker-suffix"],
+ {
+ "action": "append",
+ "dest": "worker_suffixes",
+ "help": (
+ "Override the worker pool used for a given taskgraph worker alias, "
+ "by appending a suffix to the work-pool. "
+ "The argument should be `<alias>=<suffix>`. "
+ "Can be specified multiple times."
+ ),
+ },
+ ],
+ ]
+
+ def try_config(self, worker_overrides, worker_suffixes, **kwargs):
+ from gecko_taskgraph.util.workertypes import get_worker_type
+ from taskgraph.config import load_graph_config
+
+ overrides = {}
+ if worker_overrides:
+ for override in worker_overrides:
+ alias, worker_pool = override.split("=", 1)
+ if alias in overrides:
+ print(
+ "Can't override worker alias {alias} more than once. "
+ "Already set to use {previous}, but also asked to use {new}.".format(
+ alias=alias, previous=overrides[alias], new=worker_pool
+ )
+ )
+ sys.exit(1)
+ overrides[alias] = worker_pool
+
+ if worker_suffixes:
+ root = build.topsrcdir
+ root = os.path.join(root, "taskcluster", "ci")
+ graph_config = load_graph_config(root)
+ for worker_suffix in worker_suffixes:
+ alias, suffix = worker_suffix.split("=", 1)
+ if alias in overrides:
+ print(
+ "Can't override worker alias {alias} more than once. "
+ "Already set to use {previous}, but also asked "
+ "to add suffix {suffix}.".format(
+ alias=alias, previous=overrides[alias], suffix=suffix
+ )
+ )
+ sys.exit(1)
+ provisioner, worker_type = get_worker_type(
+ graph_config, worker_type=alias, parameters={"level": "1"}
+ )
+ overrides[alias] = "{provisioner}/{worker_type}{suffix}".format(
+ provisioner=provisioner, worker_type=worker_type, suffix=suffix
+ )
+
+ if overrides:
+ return {"worker-overrides": overrides}
+
+
+all_task_configs = {
+ "artifact": Artifact,
+ "browsertime": Browsertime,
+ "chemspill-prio": ChemspillPrio,
+ "disable-pgo": DisablePgo,
+ "env": Environment,
+ "existing-tasks": ExistingTasks,
+ "gecko-profile": GeckoProfile,
+ "new-test-config": NewConfig,
+ "path": Path,
+ "pernosco": Pernosco,
+ "rebuild": Rebuild,
+ "routes": Routes,
+ "worker-overrides": WorkerOverrides,
+}
diff --git a/tools/tryselect/tasks.py b/tools/tryselect/tasks.py
new file mode 100644
index 0000000000..fa3eebc161
--- /dev/null
+++ b/tools/tryselect/tasks.py
@@ -0,0 +1,209 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+import re
+import sys
+from collections import defaultdict
+
+import mozpack.path as mozpath
+import taskgraph
+from mach.util import get_state_dir
+from mozbuild.base import MozbuildObject
+from mozpack.files import FileFinder
+from moztest.resolve import TestManifestLoader, TestResolver, get_suite_definition
+from taskgraph.generator import TaskGraphGenerator
+from taskgraph.parameters import ParameterMismatch, parameters_loader
+from taskgraph.taskgraph import TaskGraph
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+
+PARAMETER_MISMATCH = """
+ERROR - The parameters being used to generate tasks differ from those expected
+by your working copy:
+
+ {}
+
+To fix this, either rebase onto the latest mozilla-central or pass in
+-p/--parameters. For more information on how to define parameters, see:
+https://firefox-source-docs.mozilla.org/taskcluster/taskcluster/mach.html#parameters
+"""
+
+
+def invalidate(cache):
+ try:
+ cmod = os.path.getmtime(cache)
+ except OSError as e:
+ # File does not exist. We catch OSError rather than use `isfile`
+ # because the recommended watchman hook could possibly invalidate the
+ # cache in-between the check to `isfile` and the call to `getmtime`
+ # below.
+ if e.errno == 2:
+ return
+ raise
+
+ tc_dir = os.path.join(build.topsrcdir, "taskcluster")
+ tmod = max(os.path.getmtime(os.path.join(tc_dir, p)) for p, _ in FileFinder(tc_dir))
+
+ if tmod > cmod:
+ os.remove(cache)
+
+
+def cache_key(attr, params, disable_target_task_filter):
+ key = attr
+ if params and params["project"] not in ("autoland", "mozilla-central"):
+ key += f"-{params['project']}"
+
+ if disable_target_task_filter and "full" not in attr:
+ key += "-uncommon"
+ return key
+
+
+def generate_tasks(params=None, full=False, disable_target_task_filter=False):
+ attr = "full_task_set" if full else "target_task_set"
+ target_tasks_method = (
+ "try_select_tasks"
+ if not disable_target_task_filter
+ else "try_select_tasks_uncommon"
+ )
+ params = parameters_loader(
+ params,
+ strict=False,
+ overrides={
+ "try_mode": "try_select",
+ "target_tasks_method": target_tasks_method,
+ },
+ )
+ root = os.path.join(build.topsrcdir, "taskcluster", "ci")
+ taskgraph.fast = True
+ generator = TaskGraphGenerator(root_dir=root, parameters=params)
+
+ def add_chunk_patterns(tg):
+ for task_name, task in tg.tasks.items():
+ chunk_index = -1
+ if task_name.endswith("-cf"):
+ chunk_index = -2
+
+ chunks = task.task.get("extra", {}).get("chunks", {})
+ if isinstance(chunks, int):
+ task.chunk_pattern = "{}-*/{}".format(
+ "-".join(task_name.split("-")[:chunk_index]), chunks
+ )
+ else:
+ assert isinstance(chunks, dict)
+ if chunks.get("total", 1) == 1:
+ task.chunk_pattern = task_name
+ else:
+ task.chunk_pattern = "{}-*".format(
+ "-".join(task_name.split("-")[:chunk_index])
+ )
+ return tg
+
+ cache_dir = os.path.join(
+ get_state_dir(specific_to_topsrcdir=True), "cache", "taskgraph"
+ )
+ key = cache_key(attr, generator.parameters, disable_target_task_filter)
+ cache = os.path.join(cache_dir, key)
+
+ invalidate(cache)
+ if os.path.isfile(cache):
+ with open(cache) as fh:
+ return add_chunk_patterns(TaskGraph.from_json(json.load(fh))[1])
+
+ if not os.path.isdir(cache_dir):
+ os.makedirs(cache_dir)
+
+ print("Task configuration changed, generating {}".format(attr.replace("_", " ")))
+
+ cwd = os.getcwd()
+ os.chdir(build.topsrcdir)
+
+ def generate(attr):
+ try:
+ tg = getattr(generator, attr)
+ except ParameterMismatch as e:
+ print(PARAMETER_MISMATCH.format(e.args[0]))
+ sys.exit(1)
+
+ # write cache
+ key = cache_key(attr, generator.parameters, disable_target_task_filter)
+ with open(os.path.join(cache_dir, key), "w") as fh:
+ json.dump(tg.to_json(), fh)
+ return add_chunk_patterns(tg)
+
+ # Cache both full_task_set and target_task_set regardless of whether or not
+ # --full was requested. Caching is cheap and can potentially save a lot of
+ # time.
+ tg_full = generate("full_task_set")
+ tg_target = generate("target_task_set")
+
+ # discard results from these, we only need cache.
+ if full:
+ generate("full_task_graph")
+ generate("target_task_graph")
+
+ os.chdir(cwd)
+ if full:
+ return tg_full
+ return tg_target
+
+
+def filter_tasks_by_paths(tasks, paths):
+ resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
+ run_suites, run_tests = resolver.resolve_metadata(paths)
+ flavors = {(t["flavor"], t.get("subsuite")) for t in run_tests}
+
+ task_regexes = set()
+ for flavor, subsuite in flavors:
+ _, suite = get_suite_definition(flavor, subsuite, strict=True)
+ if "task_regex" not in suite:
+ print(
+ "warning: no tasks could be resolved from flavor '{}'{}".format(
+ flavor, " and subsuite '{}'".format(subsuite) if subsuite else ""
+ )
+ )
+ continue
+
+ task_regexes.update(suite["task_regex"])
+
+ def match_task(task):
+ return any(re.search(pattern, task) for pattern in task_regexes)
+
+ return {
+ task_name: task for task_name, task in tasks.items() if match_task(task_name)
+ }
+
+
+def resolve_tests_by_suite(paths):
+ resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
+ _, run_tests = resolver.resolve_metadata(paths)
+
+ suite_to_tests = defaultdict(list)
+
+ # A dictionary containing all the input paths that we haven't yet
+ # assigned to a specific test flavor.
+ remaining_paths_by_suite = defaultdict(lambda: set(paths))
+
+ for test in run_tests:
+ key, _ = get_suite_definition(test["flavor"], test.get("subsuite"), strict=True)
+
+ test_path = test.get("srcdir_relpath")
+ if test_path is None:
+ continue
+ found_path = None
+ manifest_relpath = None
+ if "manifest_relpath" in test:
+ manifest_relpath = mozpath.normpath(test["manifest_relpath"])
+ for path in remaining_paths_by_suite[key]:
+ if test_path.startswith(path) or manifest_relpath == path:
+ found_path = path
+ break
+ if found_path:
+ suite_to_tests[key].append(found_path)
+ remaining_paths_by_suite[key].remove(found_path)
+
+ return suite_to_tests
diff --git a/tools/tryselect/test/conftest.py b/tools/tryselect/test/conftest.py
new file mode 100644
index 0000000000..d9cb7daee3
--- /dev/null
+++ b/tools/tryselect/test/conftest.py
@@ -0,0 +1,106 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from unittest.mock import MagicMock
+
+import pytest
+import yaml
+from moztest.resolve import TestResolver
+from responses import RequestsMock
+from taskgraph.graph import Graph
+from taskgraph.task import Task
+from taskgraph.taskgraph import TaskGraph
+from tryselect import push
+
+
+@pytest.fixture
+def responses():
+ with RequestsMock() as rsps:
+ yield rsps
+
+
+@pytest.fixture
+def tg(request):
+ if not hasattr(request.module, "TASKS"):
+ pytest.fail(
+ "'tg' fixture used from a module that didn't define the TASKS variable"
+ )
+
+ tasks = request.module.TASKS
+ for task in tasks:
+ task.setdefault("task", {})
+ task["task"].setdefault("tags", {})
+
+ tasks = {t["label"]: Task(**t) for t in tasks}
+ return TaskGraph(tasks, Graph(tasks.keys(), set()))
+
+
+@pytest.fixture
+def patch_resolver(monkeypatch):
+ def inner(suites, tests):
+ def fake_test_metadata(*args, **kwargs):
+ return suites, tests
+
+ monkeypatch.setattr(TestResolver, "resolve_metadata", fake_test_metadata)
+
+ return inner
+
+
+@pytest.fixture(autouse=True)
+def patch_vcs(monkeypatch):
+ attrs = {
+ "path": push.vcs.path,
+ }
+ mock = MagicMock()
+ mock.configure_mock(**attrs)
+ monkeypatch.setattr(push, "vcs", mock)
+
+
+@pytest.fixture(scope="session")
+def run_mach():
+ import mach_initialize
+ from tryselect.tasks import build
+
+ mach = mach_initialize.initialize(build.topsrcdir)
+
+ def inner(args):
+ return mach.run(args)
+
+ return inner
+
+
+def pytest_generate_tests(metafunc):
+ if all(
+ fixture in metafunc.fixturenames
+ for fixture in ("task_config", "args", "expected")
+ ):
+
+ def load_tests():
+ for task_config, tests in metafunc.module.TASK_CONFIG_TESTS.items():
+ for args, expected in tests:
+ yield (task_config, args, expected)
+
+ tests = list(load_tests())
+ ids = ["{} {}".format(t[0], " ".join(t[1])).strip() for t in tests]
+ metafunc.parametrize("task_config,args,expected", tests, ids=ids)
+
+ elif all(
+ fixture in metafunc.fixturenames for fixture in ("shared_name", "shared_preset")
+ ):
+ preset_path = os.path.join(
+ push.build.topsrcdir, "tools", "tryselect", "try_presets.yml"
+ )
+ with open(preset_path, "r") as fh:
+ presets = list(yaml.safe_load(fh).items())
+
+ ids = [p[0] for p in presets]
+
+ # Mark fuzzy presets on Windows xfail due to fzf not being installed.
+ if os.name == "nt":
+ for i, preset in enumerate(presets):
+ if preset[1]["selector"] == "fuzzy":
+ presets[i] = pytest.param(*preset, marks=pytest.mark.xfail)
+
+ metafunc.parametrize("shared_name,shared_preset", presets, ids=ids)
diff --git a/tools/tryselect/test/cram.toml b/tools/tryselect/test/cram.toml
new file mode 100644
index 0000000000..5dd8c41b4e
--- /dev/null
+++ b/tools/tryselect/test/cram.toml
@@ -0,0 +1,5 @@
+["test_auto.t"]
+["test_empty.t"]
+["test_fuzzy.t"]
+["test_message.t"]
+["test_preset.t"]
diff --git a/tools/tryselect/test/python.toml b/tools/tryselect/test/python.toml
new file mode 100644
index 0000000000..f88156f69b
--- /dev/null
+++ b/tools/tryselect/test/python.toml
@@ -0,0 +1,31 @@
+[DEFAULT]
+subsuite = "try"
+
+["test_again.py"]
+
+["test_auto.py"]
+
+["test_chooser.py"]
+
+["test_fuzzy.py"]
+
+["test_mozharness_integration.py"]
+
+["test_perf.py"]
+
+["test_perfcomparators.py"]
+
+["test_presets.py"]
+# Modifies "task_duration_history.json" in .mozbuild. Since other tests depend on this file, this test
+# shouldn't be run in parallel with those other tests.
+sequential = true
+
+["test_push.py"]
+
+["test_release.py"]
+
+["test_scriptworker.py"]
+
+["test_task_configs.py"]
+
+["test_tasks.py"]
diff --git a/tools/tryselect/test/setup.sh b/tools/tryselect/test/setup.sh
new file mode 100644
index 0000000000..c883ec6e8a
--- /dev/null
+++ b/tools/tryselect/test/setup.sh
@@ -0,0 +1,101 @@
+export topsrcdir=$TESTDIR/../../../
+export MOZBUILD_STATE_PATH=$TMP/mozbuild
+export MACH_TRY_PRESET_PATHS=$MOZBUILD_STATE_PATH/try_presets.yml
+
+# This helps to find fzf when running these tests locally, since normally fzf
+# would be found via MOZBUILD_STATE_PATH pointing to $HOME/.mozbuild
+export PATH="$PATH:$HOME/.mozbuild/fzf/bin"
+
+export MACHRC=$TMP/machrc
+cat > $MACHRC << EOF
+[try]
+default=syntax
+EOF
+
+cmd="$topsrcdir/mach python -c 'from mach.util import get_state_dir; print(get_state_dir(specific_to_topsrcdir=True))'"
+# First run local state dir generation so it doesn't affect test output.
+eval $cmd > /dev/null 2>&1
+# Now run it again to get the actual directory.
+cachedir=$(eval $cmd)/cache/taskgraph
+mkdir -p $cachedir
+# Run `mach try --help` to generate virtualenv.
+eval "$topsrcdir/mach try --help" > /dev/null 2>&1
+
+cat > $cachedir/target_task_set << EOF
+{
+ "test/foo-opt": {
+ "kind": "test",
+ "label": "test/foo-opt",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/foo-debug": {
+ "kind": "test",
+ "label": "test/foo-debug",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "build-baz": {
+ "kind": "build",
+ "label": "build-baz",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ }
+}
+EOF
+
+cat > $cachedir/full_task_set << EOF
+{
+ "test/foo-opt": {
+ "kind": "test",
+ "label": "test/foo-opt",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/foo-debug": {
+ "kind": "test",
+ "label": "test/foo-debug",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/bar-opt": {
+ "kind": "test",
+ "label": "test/bar-opt",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "test/bar-debug": {
+ "kind": "test",
+ "label": "test/bar-debug",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ },
+ "build-baz": {
+ "kind": "build",
+ "label": "build-baz",
+ "attributes": {},
+ "task": {},
+ "optimization": {},
+ "dependencies": {}
+ }
+}
+EOF
+
+# set mtime to the future so we don't re-generate tasks
+find $cachedir -type f -exec touch -d "next day" {} +
+
+export testargs="--no-push --no-artifact"
diff --git a/tools/tryselect/test/test_again.py b/tools/tryselect/test/test_again.py
new file mode 100644
index 0000000000..3c6b87cfdf
--- /dev/null
+++ b/tools/tryselect/test/test_again.py
@@ -0,0 +1,73 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozunit
+import pytest
+from six.moves import reload_module as reload
+from tryselect import push
+from tryselect.selectors import again
+
+
+@pytest.fixture(autouse=True)
+def patch_history_path(tmpdir, monkeypatch):
+ monkeypatch.setattr(push, "history_path", tmpdir.join("history.json").strpath)
+ reload(again)
+
+
+def test_try_again(monkeypatch):
+ push.push_to_try(
+ "fuzzy",
+ "Fuzzy message",
+ try_task_config=push.generate_try_task_config(
+ "fuzzy",
+ ["foo", "bar"],
+ {"try_task_config": {"use-artifact-builds": True}},
+ ),
+ )
+
+ assert os.path.isfile(push.history_path)
+ with open(push.history_path, "r") as fh:
+ assert len(fh.readlines()) == 1
+
+ def fake_push_to_try(*args, **kwargs):
+ return args, kwargs
+
+ monkeypatch.setattr(push, "push_to_try", fake_push_to_try)
+ reload(again)
+
+ args, kwargs = again.run()
+
+ assert args[0] == "again"
+ assert args[1] == "Fuzzy message"
+
+ try_task_config = kwargs["try_task_config"]["parameters"].pop("try_task_config")
+ assert sorted(try_task_config.get("tasks")) == sorted(["foo", "bar"])
+ assert try_task_config.get("env") == {"TRY_SELECTOR": "fuzzy"}
+ assert try_task_config.get("use-artifact-builds")
+
+ with open(push.history_path, "r") as fh:
+ assert len(fh.readlines()) == 1
+
+
+def test_no_push_does_not_generate_history(tmpdir):
+ assert not os.path.isfile(push.history_path)
+
+ push.push_to_try(
+ "fuzzy",
+ "Fuzzy",
+ try_task_config=push.generate_try_task_config(
+ "fuzzy",
+ ["foo", "bar"],
+ {"use-artifact-builds": True},
+ ),
+ dry_run=True,
+ )
+ assert not os.path.isfile(push.history_path)
+ assert again.run() == 1
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_auto.py b/tools/tryselect/test/test_auto.py
new file mode 100644
index 0000000000..63f0fe6bd7
--- /dev/null
+++ b/tools/tryselect/test/test_auto.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+from tryselect.selectors.auto import AutoParser
+
+
+def test_strategy_validation():
+ parser = AutoParser()
+ args = parser.parse_args(["--strategy", "relevant_tests"])
+ assert args.strategy == "gecko_taskgraph.optimize:tryselect.relevant_tests"
+
+ args = parser.parse_args(
+ ["--strategy", "gecko_taskgraph.optimize:experimental.relevant_tests"]
+ )
+ assert args.strategy == "gecko_taskgraph.optimize:experimental.relevant_tests"
+
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--strategy", "gecko_taskgraph.optimize:tryselect"])
+
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--strategy", "foo"])
+
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--strategy", "foo:bar"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_auto.t b/tools/tryselect/test/test_auto.t
new file mode 100644
index 0000000000..c3fe797949
--- /dev/null
+++ b/tools/tryselect/test/test_auto.t
@@ -0,0 +1,61 @@
+
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test auto selector
+
+ $ ./mach try auto $testargs
+ Commit message:
+ Tasks automatically selected.
+
+ Pushed via `mach try auto`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium",
+ "optimize_target_tasks": true,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {}
+ },
+ "version": 2
+ }
+
+
+ $ ./mach try auto $testargs --closed-tree
+ Commit message:
+ Tasks automatically selected. ON A CLOSED TREE
+
+ Pushed via `mach try auto`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium",
+ "optimize_target_tasks": true,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {}
+ },
+ "version": 2
+ }
+
+ $ ./mach try auto $testargs --closed-tree -m "foo {msg} bar"
+ Commit message:
+ foo Tasks automatically selected. bar ON A CLOSED TREE
+
+ Pushed via `mach try auto`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_strategies": "gecko_taskgraph.optimize:tryselect.bugbug_reduced_manifests_config_selection_medium",
+ "optimize_target_tasks": true,
+ "target_tasks_method": "try_auto",
+ "test_manifest_loader": "bugbug",
+ "try_mode": "try_auto",
+ "try_task_config": {}
+ },
+ "version": 2
+ }
+
diff --git a/tools/tryselect/test/test_chooser.py b/tools/tryselect/test/test_chooser.py
new file mode 100644
index 0000000000..3d60a0f8d4
--- /dev/null
+++ b/tools/tryselect/test/test_chooser.py
@@ -0,0 +1,84 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import multiprocessing
+
+import mozunit
+import pytest
+from tryselect.selectors.chooser.app import create_application
+
+TASKS = [
+ {
+ "kind": "build",
+ "label": "build-windows",
+ "attributes": {
+ "build_platform": "windows",
+ },
+ },
+ {
+ "kind": "test",
+ "label": "test-windows-mochitest-e10s",
+ "attributes": {
+ "unittest_suite": "mochitest-browser-chrome",
+ "mochitest_try_name": "mochitest-browser-chrome",
+ },
+ },
+]
+
+
+@pytest.fixture
+def queue():
+ return multiprocessing.Queue()
+
+
+@pytest.fixture
+def app(tg, queue):
+ app = create_application(tg, queue)
+ app.config["TESTING"] = True
+
+ ctx = app.app_context()
+ ctx.push()
+ yield app
+ ctx.pop()
+
+
+def test_try_chooser(app, queue: multiprocessing.Queue):
+ client = app.test_client()
+
+ response = client.get("/")
+ assert response.status_code == 200
+
+ expected_output = [
+ b"""<title>Try Chooser Enhanced</title>""",
+ b"""<input class="filter" type="checkbox" id=windows name="build" value='{"build_platform": ["windows"]}' onchange="console.log('checkbox onchange triggered');apply();">""", # noqa
+ b"""<input class="filter" type="checkbox" id=mochitest-browser-chrome name="test" value='{"unittest_suite": ["mochitest-browser-chrome"]}' onchange="console.log('checkbox onchange triggered');apply();">""", # noqa
+ ]
+
+ for expected in expected_output:
+ assert expected in response.data
+
+ response = client.post("/", data={"action": "Cancel"})
+ assert response.status_code == 200
+ assert b"You may now close this page" in response.data
+ assert queue.get() == []
+
+ response = client.post("/", data={"action": "Push", "selected-tasks": ""})
+ assert response.status_code == 200
+ assert b"You may now close this page" in response.data
+ assert queue.get() == []
+
+ response = client.post(
+ "/",
+ data={
+ "action": "Push",
+ "selected-tasks": "build-windows\ntest-windows-mochitest-e10s",
+ },
+ )
+ assert response.status_code == 200
+ assert b"You may now close this page" in response.data
+ assert set(queue.get()) == set(["build-windows", "test-windows-mochitest-e10s"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_empty.t b/tools/tryselect/test/test_empty.t
new file mode 100644
index 0000000000..d7e9c22618
--- /dev/null
+++ b/tools/tryselect/test/test_empty.t
@@ -0,0 +1,62 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test empty selector
+
+ $ ./mach try empty --no-push
+ Commit message:
+ No try selector specified, use "Add New Jobs" to select tasks.
+
+ Pushed via `mach try empty`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "empty"
+ },
+ "tasks": []
+ }
+ },
+ "version": 2
+ }
+
+ $ ./mach try empty --no-push --closed-tree
+ Commit message:
+ No try selector specified, use "Add New Jobs" to select tasks. ON A CLOSED TREE
+
+ Pushed via `mach try empty`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "empty"
+ },
+ "tasks": []
+ }
+ },
+ "version": 2
+ }
+
+ $ ./mach try empty --no-push --closed-tree -m "foo {msg} bar"
+ Commit message:
+ foo No try selector specified, use "Add New Jobs" to select tasks. bar ON A CLOSED TREE
+
+ Pushed via `mach try empty`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "empty"
+ },
+ "tasks": []
+ }
+ },
+ "version": 2
+ }
+
diff --git a/tools/tryselect/test/test_fuzzy.py b/tools/tryselect/test/test_fuzzy.py
new file mode 100644
index 0000000000..9ff1b386af
--- /dev/null
+++ b/tools/tryselect/test/test_fuzzy.py
@@ -0,0 +1,125 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+
+import mozunit
+import pytest
+
+
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+@pytest.mark.parametrize("show_chunk_numbers", [True, False])
+def test_query_paths(run_mach, capfd, show_chunk_numbers):
+ cmd = [
+ "try",
+ "fuzzy",
+ "--no-push",
+ "-q",
+ "^test-linux '64-qr/debug-mochitest-chrome-1proc-",
+ "caps/tests/mochitest/test_addonMayLoad.html",
+ ]
+ chunk = "*"
+ if show_chunk_numbers:
+ cmd.append("--show-chunk-numbers")
+ chunk = "1"
+
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ delim = "Calculated try_task_config.json:"
+ index = output.find(delim)
+ result = json.loads(output[index + len(delim) :])
+
+ # If there are more than one tasks here, it means that something went wrong
+ # with the path filtering.
+ tasks = result["parameters"]["try_task_config"]["tasks"]
+ assert tasks == ["test-linux1804-64-qr/debug-mochitest-chrome-1proc-%s" % chunk]
+
+
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+@pytest.mark.parametrize("show_chunk_numbers", [True, False])
+def test_query_paths_no_chunks(run_mach, capfd, show_chunk_numbers):
+ cmd = [
+ "try",
+ "fuzzy",
+ "--no-push",
+ "-q",
+ "^test-linux '64-qr/debug-cppunittest",
+ ]
+ if show_chunk_numbers:
+ cmd.append("--show-chunk-numbers")
+
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ delim = "Calculated try_task_config.json:"
+ index = output.find(delim)
+ result = json.loads(output[index + len(delim) :])
+
+ # If there are more than one tasks here, it means that something went wrong
+ # with the path filtering.
+ tasks = result["parameters"]["try_task_config"]["tasks"]
+ assert tasks == ["test-linux1804-64-qr/debug-cppunittest-1proc"]
+
+
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+@pytest.mark.parametrize("variant", ["", "spi-nw"])
+def test_query_paths_variants(run_mach, capfd, variant):
+ if variant:
+ variant = "-%s" % variant
+
+ cmd = [
+ "try",
+ "fuzzy",
+ "--no-push",
+ "-q",
+ "^test-linux '64-qr/debug-mochitest-browser-chrome%s-" % variant,
+ ]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ if variant:
+ expected = ["test-linux1804-64-qr/debug-mochitest-browser-chrome%s-*" % variant]
+ else:
+ expected = [
+ "test-linux1804-64-qr/debug-mochitest-browser-chrome-spi-nw-*",
+ "test-linux1804-64-qr/debug-mochitest-browser-chrome-swr-*",
+ ]
+
+ delim = "Calculated try_task_config.json:"
+ index = output.find(delim)
+ result = json.loads(output[index + len(delim) :])
+ tasks = result["parameters"]["try_task_config"]["tasks"]
+ assert tasks == expected
+
+
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+@pytest.mark.parametrize("full", [True, False])
+def test_query(run_mach, capfd, full):
+ cmd = ["try", "fuzzy", "--no-push", "-q", "'source-test-python-taskgraph-tests-py3"]
+ if full:
+ cmd.append("--full")
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ delim = "Calculated try_task_config.json:"
+ index = output.find(delim)
+ result = json.loads(output[index + len(delim) :])
+
+ # Should only ever mach one task exactly.
+ tasks = result["parameters"]["try_task_config"]["tasks"]
+ assert tasks == ["source-test-python-taskgraph-tests-py3"]
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_fuzzy.t b/tools/tryselect/test/test_fuzzy.t
new file mode 100644
index 0000000000..843b053e08
--- /dev/null
+++ b/tools/tryselect/test/test_fuzzy.t
@@ -0,0 +1,252 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test fuzzy selector
+
+ $ ./mach try fuzzy $testargs -q "'foo"
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+
+ $ ./mach try fuzzy $testargs -q "'bar"
+ no tasks selected
+ $ ./mach try fuzzy $testargs --full -q "'bar"
+ Commit message:
+ Fuzzy query='bar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/bar-debug",
+ "test/bar-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+Test multiple selectors
+
+ $ ./mach try fuzzy $testargs --full -q "'foo" -q "'bar"
+ Commit message:
+ Fuzzy query='foo&query='bar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/bar-debug",
+ "test/bar-opt",
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+Test query intersection
+
+ $ ./mach try fuzzy $testargs --and -q "'foo" -q "'opt"
+ Commit message:
+ Fuzzy query='foo&query='opt
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+Test intersection with preset containing multiple queries
+
+ $ ./mach try fuzzy --save foo -q "'test" -q "'opt"
+ preset saved, run with: --preset=foo
+
+ $ ./mach try fuzzy $testargs --preset foo -xq "'test"
+ Commit message:
+ Fuzzy query='test&query='opt&query='test
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+ $ ./mach try $testargs --preset foo -xq "'test"
+ Commit message:
+ Fuzzy query='test&query='opt&query='test
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+Test exact match
+
+ $ ./mach try fuzzy $testargs --full -q "testfoo | 'testbar"
+ Commit message:
+ Fuzzy query=testfoo | 'testbar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+ $ ./mach try fuzzy $testargs --full --exact -q "testfoo | 'testbar"
+ Commit message:
+ Fuzzy query=testfoo | 'testbar
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/bar-debug",
+ "test/bar-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+Test task config
+
+ $ ./mach try fuzzy --no-push --artifact -q "'foo"
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "disable-pgo": true,
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ],
+ "use-artifact-builds": true
+ }
+ },
+ "version": 2
+ }
+
+ $ ./mach try fuzzy $testargs --env FOO=1 --env BAR=baz -q "'foo"
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "BAR": "baz",
+ "FOO": "1",
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
diff --git a/tools/tryselect/test/test_message.t b/tools/tryselect/test/test_message.t
new file mode 100644
index 0000000000..a707e410fb
--- /dev/null
+++ b/tools/tryselect/test/test_message.t
@@ -0,0 +1,73 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test custom commit messages with fuzzy selector
+
+ $ ./mach try fuzzy $testargs -q foo --message "Foobar"
+ Commit message:
+ Foobar
+
+ Fuzzy query=foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+ $ ./mach try fuzzy $testargs -q foo -m "Foobar: {msg}"
+ Commit message:
+ Foobar: Fuzzy query=foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+ $ unset EDITOR
+ $ ./mach try fuzzy $testargs -q foo -m > /dev/null 2>&1
+ [2]
+
+
+Test custom commit messages with syntax selector
+
+ $ ./mach try syntax $testargs -p linux -u mochitests --message "Foobar"
+ Commit message:
+ Foobar
+
+ try: -b do -p linux -u mochitests
+
+ Pushed via `mach try syntax`
+ $ ./mach try syntax $testargs -p linux -u mochitests -m "Foobar: {msg}"
+ Commit message:
+ Foobar: try: -b do -p linux -u mochitests
+
+ Pushed via `mach try syntax`
+ $ unset EDITOR
+ $ ./mach try syntax $testargs -p linux -u mochitests -m > /dev/null 2>&1
+ [2]
diff --git a/tools/tryselect/test/test_mozharness_integration.py b/tools/tryselect/test/test_mozharness_integration.py
new file mode 100644
index 0000000000..abeaaf370e
--- /dev/null
+++ b/tools/tryselect/test/test_mozharness_integration.py
@@ -0,0 +1,145 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+
+import mozunit
+import pytest
+from mozfile import load_source
+from tryselect.tasks import build, resolve_tests_by_suite
+
+MOZHARNESS_SCRIPTS = {
+ "android_emulator_unittest": {
+ "class_name": "AndroidEmulatorTest",
+ "configs": [
+ "android/android_common.py",
+ ],
+ "xfail": [
+ "cppunittest",
+ "crashtest-qr",
+ "gtest",
+ "geckoview-junit",
+ "jittest",
+ "jsreftest",
+ "reftest-qr",
+ ],
+ },
+ "desktop_unittest": {
+ "class_name": "DesktopUnittest",
+ "configs": [
+ "unittests/linux_unittest.py",
+ "unittests/mac_unittest.py",
+ "unittests/win_unittest.py",
+ ],
+ "xfail": [
+ "cppunittest",
+ "gtest",
+ "jittest",
+ "jittest-chunked",
+ "jittest1",
+ "jittest2",
+ "jsreftest",
+ "mochitest-valgrind-plain",
+ "reftest-no-accel",
+ "reftest-snapshot",
+ "xpcshell-msix",
+ ],
+ },
+}
+"""A suite being listed in a script's `xfail` list means it won't work
+properly with MOZHARNESS_TEST_PATHS (the mechanism |mach try fuzzy <path>|
+uses).
+"""
+
+
+def get_mozharness_test_paths(name):
+ scriptdir = os.path.join(build.topsrcdir, "testing", "mozharness")
+ mod = load_source(
+ "scripts." + name, os.path.join(scriptdir, "scripts", name + ".py")
+ )
+
+ class_name = MOZHARNESS_SCRIPTS[name]["class_name"]
+ cls = getattr(mod, class_name)
+ return cls(require_config_file=False)._get_mozharness_test_paths
+
+
+@pytest.fixture(scope="module")
+def all_suites():
+ from moztest.resolve import _test_flavors, _test_subsuites
+
+ all_suites = []
+ for flavor in _test_flavors:
+ all_suites.append({"flavor": flavor, "srcdir_relpath": "test"})
+
+ for flavor, subsuite in _test_subsuites:
+ all_suites.append(
+ {"flavor": flavor, "subsuite": subsuite, "srcdir_relpath": "test"}
+ )
+
+ return all_suites
+
+
+def generate_suites_from_config(path):
+ parent, name = os.path.split(path)
+ name = os.path.splitext(name)[0]
+
+ configdir = os.path.join(
+ build.topsrcdir, "testing", "mozharness", "configs", parent
+ )
+
+ mod = load_source(name, os.path.join(configdir, name + ".py"))
+
+ config = mod.config
+
+ for category in sorted(config["suite_definitions"]):
+ key = "all_{}_suites".format(category)
+ if key not in config:
+ yield category,
+ continue
+
+ for suite in sorted(config["all_{}_suites".format(category)]):
+ yield category, suite
+
+
+def generate_suites():
+ for name, script in MOZHARNESS_SCRIPTS.items():
+ seen = set()
+
+ for path in script["configs"]:
+ for suite in generate_suites_from_config(path):
+ if suite in seen:
+ continue
+ seen.add(suite)
+
+ item = (name, suite)
+
+ if suite[-1] in script["xfail"]:
+ item = pytest.param(item, marks=pytest.mark.xfail)
+
+ yield item
+
+
+def idfn(item):
+ name, suite = item
+ return "{}/{}".format(name, suite[-1])
+
+
+@pytest.mark.parametrize("item", generate_suites(), ids=idfn)
+def test_suites(item, patch_resolver, all_suites):
+ """An integration test to make sure the suites returned by
+ `tasks.resolve_tests_by_suite` match up with the names defined in
+ mozharness.
+ """
+ patch_resolver([], all_suites)
+ suites = resolve_tests_by_suite(["test"])
+ os.environ["MOZHARNESS_TEST_PATHS"] = json.dumps(suites)
+
+ name, suite = item
+ func = get_mozharness_test_paths(name)
+ assert func(*suite)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_perf.py b/tools/tryselect/test/test_perf.py
new file mode 100644
index 0000000000..0db45df83e
--- /dev/null
+++ b/tools/tryselect/test/test_perf.py
@@ -0,0 +1,1425 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import pathlib
+import shutil
+import tempfile
+from unittest import mock
+
+import mozunit
+import pytest
+from tryselect.selectors.perf import (
+ MAX_PERF_TASKS,
+ Apps,
+ InvalidCategoryException,
+ InvalidRegressionDetectorQuery,
+ PerfParser,
+ Platforms,
+ Suites,
+ Variants,
+ run,
+)
+from tryselect.selectors.perf_preview import plain_display
+from tryselect.selectors.perfselector.classification import (
+ check_for_live_sites,
+ check_for_profile,
+)
+
+TASKS = [
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-motionmark-animometer",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-godot-optimizing",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-webaudio",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-speedometer",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-misc",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-jetstream2",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-ares6",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-misc-optimizing",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-sunspider",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-matrix-react-bench",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-godot-baseline",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-twitch-animation",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-assorted-dom",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-stylebench",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-misc-baseline",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-motionmark-htmlsuite",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-firefox-unity-webgl",
+ "test-linux1804-64-shippable-qr/opt-browsertime-benchmark-wasm-firefox-wasm-godot",
+]
+
+# The TEST_VARIANTS, and TEST_CATEGORIES are used to force
+# a particular set of categories to show up in testing. Otherwise,
+# every time someone adds a category, or a variant, we'll need
+# to redo all the category counts. The platforms, and apps are
+# not forced because they change infrequently.
+TEST_VARIANTS = {
+ # Bug 1837058 - Switch this back to Variants.NO_FISSION when
+ # the default flips to fission on android
+ Variants.FISSION.value: {
+ "query": "'nofis",
+ "negation": "!nofis",
+ "platforms": [Platforms.ANDROID.value],
+ "apps": [Apps.FENIX.value, Apps.GECKOVIEW.value],
+ },
+ Variants.BYTECODE_CACHED.value: {
+ "query": "'bytecode",
+ "negation": "!bytecode",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+ Variants.LIVE_SITES.value: {
+ "query": "'live",
+ "negation": "!live",
+ "restriction": check_for_live_sites,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": list(PerfParser.apps.keys()),
+ },
+ Variants.PROFILING.value: {
+ "query": "'profil",
+ "negation": "!profil",
+ "restriction": check_for_profile,
+ "platforms": [Platforms.DESKTOP.value, Platforms.ANDROID.value],
+ "apps": [Apps.FIREFOX.value, Apps.GECKOVIEW.value, Apps.FENIX.value],
+ },
+ Variants.SWR.value: {
+ "query": "'swr",
+ "negation": "!swr",
+ "platforms": [Platforms.DESKTOP.value],
+ "apps": [Apps.FIREFOX.value],
+ },
+}
+
+TEST_CATEGORIES = {
+ "Pageload": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'tp6"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "tasks": [],
+ "description": "",
+ },
+ "Pageload (essential)": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'tp6 'essential"],
+ },
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.FISSION.value]},
+ "suites": [Suites.RAPTOR.value],
+ "tasks": [],
+ "description": "",
+ },
+ "Responsiveness": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'responsive"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "tasks": [],
+ "description": "",
+ },
+ "Benchmarks": {
+ "query": {
+ Suites.RAPTOR.value: ["'browsertime 'benchmark"],
+ },
+ "suites": [Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: []},
+ "tasks": [],
+ "description": "",
+ },
+ "DAMP (Devtools)": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'damp"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ "description": "",
+ },
+ "Talos PerfTests": {
+ "query": {
+ Suites.TALOS.value: ["'talos"],
+ },
+ "suites": [Suites.TALOS.value],
+ "tasks": [],
+ "description": "",
+ },
+ "Resource Usage": {
+ "query": {
+ Suites.TALOS.value: ["'talos 'xperf | 'tp5"],
+ Suites.RAPTOR.value: ["'power 'osx"],
+ Suites.AWSY.value: ["'awsy"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value, Suites.AWSY.value],
+ "platform-restrictions": [Platforms.DESKTOP.value],
+ "variant-restrictions": {
+ Suites.RAPTOR.value: [],
+ Suites.TALOS.value: [],
+ },
+ "app-restrictions": {
+ Suites.RAPTOR.value: [Apps.FIREFOX.value],
+ Suites.TALOS.value: [Apps.FIREFOX.value],
+ },
+ "tasks": [],
+ "description": "",
+ },
+ "Graphics, & Media Playback": {
+ "query": {
+ # XXX This might not be an exhaustive list for talos atm
+ Suites.TALOS.value: ["'talos 'svgr | 'bcv | 'webgl"],
+ Suites.RAPTOR.value: ["'browsertime 'youtube-playback"],
+ },
+ "suites": [Suites.TALOS.value, Suites.RAPTOR.value],
+ "variant-restrictions": {Suites.RAPTOR.value: [Variants.FISSION.value]},
+ "tasks": [],
+ "description": "",
+ },
+}
+
+
+@pytest.mark.parametrize(
+ "category_options, expected_counts, unique_categories, missing",
+ [
+ # Default should show the premade live category, but no chrome or android
+ # The benchmark desktop category should be visible in all configurations
+ # except for when there are requested apps/variants/platforms
+ (
+ {},
+ 58,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Pageload macosx": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Resource Usage desktop": {
+ "awsy": ["'awsy", "!android 'shippable !-32 !clang"],
+ "raptor": [
+ "'power 'osx",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ "talos": [
+ "'talos 'xperf | 'tp5",
+ "!android 'shippable !-32 !clang",
+ "!profil",
+ "!swr",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-p2 geckoview",
+ "Benchmarks desktop chromium",
+ ],
+ ), # Default settings
+ (
+ {"live_sites": True},
+ 66,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Pageload macosx": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Pageload macosx live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'live",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-p2 geckoview",
+ "Benchmarks desktop chromium",
+ "Benchmarks desktop firefox profiling",
+ "Talos desktop live-sites",
+ "Talos desktop profiling+swr",
+ "Benchmarks desktop firefox live-sites+profiling"
+ "Benchmarks desktop firefox live-sites",
+ ],
+ ),
+ (
+ {"live_sites": True, "safari": True},
+ 72,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!m-car",
+ ]
+ },
+ "Pageload macosx safari": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'safari",
+ "!bytecode",
+ "!profil",
+ ]
+ },
+ "Pageload macosx safari live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'safari",
+ "'live",
+ "!bytecode",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Pageload linux safari",
+ "Pageload desktop safari",
+ ],
+ ),
+ (
+ {"live_sites": True, "chrome": True},
+ 114,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!profil",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Pageload macosx live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'osx 'shippable",
+ "'live",
+ "!bytecode",
+ "!profil",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ },
+ "Benchmarks desktop chromium": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "'chromium",
+ "!bytecode",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-p2 geckoview",
+ "Firefox Pageload linux chrome",
+ "Talos PerfTests desktop swr",
+ ],
+ ),
+ (
+ {"android": True},
+ 78,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ },
+ "Responsiveness android-a51 geckoview": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "'android 'a51 'shippable 'aarch64",
+ "'geckoview",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Responsiveness android-a51 chrome-m",
+ "Firefox Pageload android",
+ "Pageload android-a51 fenix",
+ ],
+ ),
+ (
+ {"android": True, "chrome": True},
+ 128,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!profil",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ },
+ "Responsiveness android-a51 chrome-m": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "'android 'a51 'shippable 'aarch64",
+ "'chrome-m",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ ["Responsiveness android-p2 chrome-m", "Resource Usage android"],
+ ),
+ (
+ {"android": True, "chrome": True, "profile": True},
+ 164,
+ {
+ "Benchmarks desktop": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!android 'shippable !-32 !clang",
+ "!bytecode",
+ "!live",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Talos PerfTests desktop profiling": {
+ "talos": [
+ "'talos",
+ "!android 'shippable !-32 !clang",
+ "'profil",
+ "!swr",
+ ]
+ },
+ },
+ [
+ "Resource Usage desktop profiling",
+ "DAMP (Devtools) desktop chrome",
+ "Resource Usage android",
+ "Resource Usage windows chromium",
+ ],
+ ),
+ (
+ {"android": True, "fenix": True},
+ 88,
+ {
+ "Pageload android-a51": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "!nofis",
+ "!live",
+ "!profil",
+ "!chrom",
+ "!safari",
+ "!m-car",
+ ]
+ },
+ "Pageload android-a51 fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!nofis",
+ "!live",
+ "!profil",
+ ]
+ },
+ },
+ [
+ "Resource Usage desktop profiling",
+ "DAMP (Devtools) desktop chrome",
+ "Resource Usage android",
+ "Resource Usage windows chromium",
+ ],
+ ),
+ # Show all available windows tests, no other platform should exist
+ # including the desktop catgeory
+ (
+ {"requested_platforms": ["windows"]},
+ 14,
+ {
+ "Benchmarks windows firefox": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "!-32 'windows 'shippable",
+ "!chrom !geckoview !fenix !safari !m-car",
+ "!bytecode",
+ "!live",
+ "!profil",
+ ]
+ },
+ },
+ [
+ "Resource Usage desktop",
+ "Benchmarks desktop",
+ "Benchmarks linux firefox bytecode-cached+profiling",
+ ],
+ ),
+ # Can't have fenix on the windows platform
+ (
+ {"requested_platforms": ["windows"], "requested_apps": ["fenix"]},
+ 0,
+ {},
+ ["Benchmarks desktop"],
+ ),
+ # Android flag also needs to be supplied
+ (
+ {"requested_platforms": ["android"], "requested_apps": ["fenix"]},
+ 0,
+ {},
+ ["Benchmarks desktop"],
+ ),
+ # There should be no global categories available, only fenix
+ (
+ {
+ "requested_platforms": ["android"],
+ "requested_apps": ["fenix"],
+ "android": True,
+ },
+ 10,
+ {
+ "Pageload android fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ }
+ },
+ ["Benchmarks desktop", "Pageload (live) android"],
+ ),
+ # Test with multiple apps
+ (
+ {
+ "requested_platforms": ["android"],
+ "requested_apps": ["fenix", "geckoview"],
+ "android": True,
+ },
+ 15,
+ {
+ "Benchmarks android geckoview": {
+ "raptor": [
+ "'browsertime 'benchmark",
+ "'android 'a51 'shippable 'aarch64",
+ "'geckoview",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Pageload android no-fission",
+ "Pageload android fenix live-sites",
+ ],
+ ),
+ # Variants are inclusive, so we'll see the variant alongside the
+ # base here for fenix
+ (
+ {
+ "requested_variants": ["fission"],
+ "requested_apps": ["fenix"],
+ "android": True,
+ },
+ 32,
+ {
+ "Pageload android-a51 fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android-a51 fenix fission": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload (essential) android fenix fission": {
+ "raptor": [
+ "'browsertime 'tp6 'essential",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Pageload (live) android",
+ "Pageload android-p2 fenix live-sites",
+ ],
+ ),
+ # With multiple variants, we'll see the base variant (with no combinations)
+ # for each of them
+ (
+ {
+ "requested_variants": ["fission", "live-sites"],
+ "requested_apps": ["fenix"],
+ "android": True,
+ },
+ 40,
+ {
+ "Pageload android-a51 fenix": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "!profil",
+ ],
+ },
+ "Pageload android-a51 fenix fission": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android-a51 fenix live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'live",
+ "!nofis",
+ "!profil",
+ ],
+ },
+ "Pageload (essential) android fenix fission": {
+ "raptor": [
+ "'browsertime 'tp6 'essential",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "!live",
+ "!profil",
+ ],
+ },
+ "Pageload android fenix fission+live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "'android 'a51 'shippable 'aarch64",
+ "'fenix",
+ "'nofis",
+ "'live",
+ "!profil",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Pageload (live) android",
+ "Pageload android-p2 fenix live-sites",
+ "Pageload (essential) android fenix no-fission+live-sites",
+ ],
+ ),
+ # Make sure that no no-fission tasks are selected when a variant cannot
+ # run on a requested platform
+ (
+ {
+ "requested_variants": ["no-fission"],
+ "requested_platforms": ["windows"],
+ },
+ 14,
+ {
+ "Responsiveness windows firefox": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "!-32 'windows 'shippable",
+ "!chrom !geckoview !fenix !safari !m-car",
+ "!bytecode",
+ "!live",
+ "!profil",
+ ],
+ },
+ },
+ ["Benchmarks desktop", "Responsiveness windows firefox no-fisson"],
+ ),
+ # We should only see the base and the live-site variants here for windows
+ (
+ {
+ "requested_variants": ["no-fission", "live-sites"],
+ "requested_platforms": ["windows"],
+ "android": True,
+ },
+ 16,
+ {
+ "Responsiveness windows firefox": {
+ "raptor": [
+ "'browsertime 'responsive",
+ "!-32 'windows 'shippable",
+ "!chrom !geckoview !fenix !safari !m-car",
+ "!bytecode",
+ "!profil",
+ ],
+ },
+ "Pageload windows live-sites": {
+ "raptor": [
+ "'browsertime 'tp6",
+ "!-32 'windows 'shippable",
+ "'live",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ },
+ "Graphics, & Media Playback windows": {
+ "raptor": [
+ "'browsertime 'youtube-playback",
+ "!-32 'windows 'shippable",
+ "!bytecode",
+ "!profil",
+ "!chrom",
+ "!fenix",
+ "!safari",
+ "!m-car",
+ ],
+ "talos": [
+ "'talos 'svgr | 'bcv | 'webgl",
+ "!-32 'windows 'shippable",
+ "!profil",
+ "!swr",
+ ],
+ },
+ },
+ [
+ "Benchmarks desktop",
+ "Responsiveness windows firefox no-fisson",
+ "Pageload (live) android",
+ "Talos desktop live-sites",
+ "Talos android",
+ "Graphics, & Media Playback windows live-sites",
+ "Graphics, & Media Playback android no-fission",
+ ],
+ ),
+ ],
+)
+def test_category_expansion(
+ category_options, expected_counts, unique_categories, missing
+):
+ # Set the categories, and variants to expand
+ PerfParser.categories = TEST_CATEGORIES
+ PerfParser.variants = TEST_VARIANTS
+
+ # Expand the categories, then either check if the unique_categories,
+ # exist or are missing from the categories
+ expanded_cats = PerfParser.get_categories(**category_options)
+
+ assert len(expanded_cats) == expected_counts
+ assert not any([expanded_cats.get(ucat, None) is not None for ucat in missing])
+ assert all(
+ [expanded_cats.get(ucat, None) is not None for ucat in unique_categories.keys()]
+ )
+
+ # Ensure that the queries are as expected
+ for cat_name, cat_query in unique_categories.items():
+ # Don't use get here because these fields should always exist
+ assert cat_query == expanded_cats[cat_name]["queries"]
+
+
+@pytest.mark.parametrize(
+ "options, call_counts, log_ind, expected_log_message",
+ [
+ (
+ {},
+ [10, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"query": "'Pageload 'linux 'firefox"},
+ [10, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"cached_revision": "cached_base_revision"},
+ [10, 1, 1, 10, 2, 0],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=cached_base_revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"dry_run": True},
+ [10, 1, 1, 10, 2, 0],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"show_all": True},
+ [1, 2, 2, 8, 2, 1],
+ 0,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"show_all": True, "query": "'shippable !32 speedometer 'firefox"},
+ [1, 2, 2, 8, 2, 1],
+ 0,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"single_run": True},
+ [10, 1, 1, 4, 2, 0],
+ 2,
+ (
+ "If you need any help, you can find us in the #perf-help Matrix channel:\n"
+ "https://matrix.to/#/#perf-help:mozilla.org\n"
+ ),
+ ),
+ (
+ {"detect_changes": True},
+ [11, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://treeherder.mozilla.org/perfherder/compare?originalProject=try&original"
+ "Revision=revision&newProject=try&newRevision=revision\n"
+ ),
+ ),
+ (
+ {"perfcompare_beta": True},
+ [10, 2, 2, 10, 2, 1],
+ 2,
+ (
+ "\n!!!NOTE!!!\n You'll be able to find a performance comparison "
+ "here once the tests are complete (ensure you select the right framework): "
+ "https://beta--mozilla-perfcompare.netlify.app/compare-results?"
+ "baseRev=revision&newRev=revision&baseRepo=try&newRepo=try\n"
+ ),
+ ),
+ ],
+)
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+def test_full_run(options, call_counts, log_ind, expected_log_message):
+ with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch(
+ "tryselect.selectors.perf.run_fzf"
+ ) as fzf, mock.patch(
+ "tryselect.selectors.perf.get_repository_object", new=mock.MagicMock()
+ ), mock.patch(
+ "tryselect.selectors.perf.LogProcessor.revision",
+ new_callable=mock.PropertyMock,
+ return_value="revision",
+ ) as logger, mock.patch(
+ "tryselect.selectors.perf.PerfParser.check_cached_revision",
+ ) as ccr, mock.patch(
+ "tryselect.selectors.perf.PerfParser.save_revision_treeherder"
+ ) as srt, mock.patch(
+ "tryselect.selectors.perf.print",
+ ) as perf_print:
+ fzf_side_effects = [
+ ["", ["Benchmarks linux"]],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", ["Perftest Change Detector"]],
+ ]
+ # Number of side effects for fzf should always be greater than
+ # or equal to the number of calls expected
+ assert len(fzf_side_effects) >= call_counts[0]
+
+ fzf.side_effect = fzf_side_effects
+ ccr.return_value = options.get("cached_revision", "")
+
+ run(**options)
+
+ assert fzf.call_count == call_counts[0]
+ assert ptt.call_count == call_counts[1]
+ assert logger.call_count == call_counts[2]
+ assert perf_print.call_count == call_counts[3]
+ assert ccr.call_count == call_counts[4]
+ assert srt.call_count == call_counts[5]
+ assert perf_print.call_args_list[log_ind][0][0] == expected_log_message
+
+
+@pytest.mark.parametrize(
+ "options, call_counts, log_ind, expected_log_message, expected_failure",
+ [
+ (
+ {"detect_changes": True},
+ [11, 0, 0, 2, 1],
+ 1,
+ (
+ "Executing raptor queries: 'browsertime 'benchmark, !clang 'linux "
+ "'shippable, !bytecode, !live, !profil, !chrom, !fenix, !safari, !m-car"
+ ),
+ InvalidRegressionDetectorQuery,
+ ),
+ ],
+)
+@pytest.mark.skipif(os.name == "nt", reason="fzf not installed on host")
+def test_change_detection_task_injection_failure(
+ options,
+ call_counts,
+ log_ind,
+ expected_log_message,
+ expected_failure,
+):
+ with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch(
+ "tryselect.selectors.perf.run_fzf"
+ ) as fzf, mock.patch(
+ "tryselect.selectors.perf.get_repository_object", new=mock.MagicMock()
+ ), mock.patch(
+ "tryselect.selectors.perf.LogProcessor.revision",
+ new_callable=mock.PropertyMock,
+ return_value="revision",
+ ) as logger, mock.patch(
+ "tryselect.selectors.perf.PerfParser.check_cached_revision"
+ ) as ccr, mock.patch(
+ "tryselect.selectors.perf.print",
+ ) as perf_print:
+ fzf_side_effects = [
+ ["", ["Benchmarks linux"]],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ["", TASKS],
+ ]
+ assert len(fzf_side_effects) >= call_counts[0]
+
+ fzf.side_effect = fzf_side_effects
+
+ with pytest.raises(expected_failure):
+ run(**options)
+
+ assert fzf.call_count == call_counts[0]
+ assert ptt.call_count == call_counts[1]
+ assert logger.call_count == call_counts[2]
+ assert perf_print.call_count == call_counts[3]
+ assert ccr.call_count == call_counts[4]
+ assert perf_print.call_args_list[log_ind][0][0] == expected_log_message
+
+
+@pytest.mark.parametrize(
+ "query, should_fail",
+ [
+ (
+ {
+ "query": {
+ # Raptor has all variants available so it
+ # should fail on this category
+ "raptor": ["browsertime 'live 'no-fission"],
+ }
+ },
+ True,
+ ),
+ (
+ {
+ "query": {
+ # Awsy has no variants defined so it shouldn't fail
+ # on a query like this
+ "awsy": ["browsertime 'live 'no-fission"],
+ }
+ },
+ False,
+ ),
+ ],
+)
+def test_category_rules(query, should_fail):
+ # Set the categories, and variants to expand
+ PerfParser.categories = {"test-live": query}
+ PerfParser.variants = TEST_VARIANTS
+
+ if should_fail:
+ with pytest.raises(InvalidCategoryException):
+ PerfParser.run_category_checks()
+ else:
+ assert PerfParser.run_category_checks()
+
+ # Reset the categories, and variants to expand
+ PerfParser.categories = TEST_CATEGORIES
+ PerfParser.variants = TEST_VARIANTS
+
+
+@pytest.mark.parametrize(
+ "apk_name, apk_content, should_fail, failure_message",
+ [
+ (
+ "real-file",
+ "file-content",
+ False,
+ None,
+ ),
+ ("bad-file", None, True, "Path does not exist:"),
+ ],
+)
+def test_apk_upload(apk_name, apk_content, should_fail, failure_message):
+ with mock.patch("tryselect.selectors.perf.subprocess") as _, mock.patch(
+ "tryselect.selectors.perf.shutil"
+ ) as _:
+ temp_dir = None
+ try:
+ temp_dir = tempfile.mkdtemp()
+ sample_apk = pathlib.Path(temp_dir, apk_name)
+ if apk_content is not None:
+ with sample_apk.open("w") as f:
+ f.write(apk_content)
+
+ if should_fail:
+ with pytest.raises(Exception) as exc_info:
+ PerfParser.setup_apk_upload("browsertime", str(sample_apk))
+ assert failure_message in str(exc_info)
+ else:
+ PerfParser.setup_apk_upload("browsertime", str(sample_apk))
+ finally:
+ if temp_dir is not None:
+ shutil.rmtree(temp_dir)
+
+
+@pytest.mark.parametrize(
+ "args, load_data, return_value, call_counts, exists_cache_file",
+ [
+ (
+ (
+ [],
+ "base_commit",
+ ),
+ {
+ "base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": [],
+ },
+ ],
+ },
+ "2b04563b5",
+ [1, 0],
+ True,
+ ),
+ (
+ (
+ ["task-a"],
+ "subset_base_commit",
+ ),
+ {
+ "subset_base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": ["task-a", "task-b"],
+ },
+ ],
+ },
+ "2b04563b5",
+ [1, 0],
+ True,
+ ),
+ (
+ ([], "not_exist_cached_base_commit"),
+ {
+ "base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": [],
+ },
+ ],
+ },
+ None,
+ [1, 0],
+ True,
+ ),
+ (
+ (
+ ["task-a", "task-b"],
+ "superset_base_commit",
+ ),
+ {
+ "superset_base_commit": [
+ {
+ "base_revision_treeherder": "2b04563b5",
+ "date": "2023-03-31",
+ "tasks": ["task-a"],
+ },
+ ],
+ },
+ None,
+ [1, 0],
+ True,
+ ),
+ (
+ ([], None),
+ {},
+ None,
+ [1, 1],
+ True,
+ ),
+ (
+ ([], None),
+ {},
+ None,
+ [0, 0],
+ False,
+ ),
+ ],
+)
+def test_check_cached_revision(
+ args, load_data, return_value, call_counts, exists_cache_file
+):
+ with mock.patch("tryselect.selectors.perf.json.load") as load, mock.patch(
+ "tryselect.selectors.perf.json.dump"
+ ) as dump, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.is_file"
+ ) as is_file, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.open"
+ ):
+ load.return_value = load_data
+ is_file.return_value = exists_cache_file
+ result = PerfParser.check_cached_revision(*args)
+
+ assert load.call_count == call_counts[0]
+ assert dump.call_count == call_counts[1]
+ assert result == return_value
+
+
+@pytest.mark.parametrize(
+ "args, call_counts, exists_cache_file",
+ [
+ (
+ ["base_commit", "base_revision_treeherder"],
+ [0, 1],
+ False,
+ ),
+ (
+ ["base_commit", "base_revision_treeherder"],
+ [1, 1],
+ True,
+ ),
+ ],
+)
+def test_save_revision_treeherder(args, call_counts, exists_cache_file):
+ with mock.patch("tryselect.selectors.perf.json.load") as load, mock.patch(
+ "tryselect.selectors.perf.json.dump"
+ ) as dump, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.is_file"
+ ) as is_file, mock.patch(
+ "tryselect.selectors.perf.pathlib.Path.open"
+ ):
+ is_file.return_value = exists_cache_file
+ PerfParser.save_revision_treeherder(TASKS, args[0], args[1])
+
+ assert load.call_count == call_counts[0]
+ assert dump.call_count == call_counts[1]
+
+
+@pytest.mark.parametrize(
+ "total_tasks, options, call_counts, expected_log_message, expected_failure",
+ [
+ (
+ MAX_PERF_TASKS + 1,
+ {},
+ [1, 0, 0, 1],
+ (
+ "\n\n----------------------------------------------------------------------------------------------\n"
+ f"You have selected {MAX_PERF_TASKS+1} total test runs! (selected tasks({MAX_PERF_TASKS+1}) * rebuild"
+ f" count(1) \nThese tests won't be triggered as the current maximum for a single ./mach try "
+ f"perf run is {MAX_PERF_TASKS}. \nIf this was unexpected, please file a bug in Testing :: Performance."
+ "\n----------------------------------------------------------------------------------------------\n\n"
+ ),
+ True,
+ ),
+ (
+ MAX_PERF_TASKS,
+ {"show_all": True},
+ [9, 0, 0, 8],
+ (
+ "For more information on the performance tests, see our "
+ "PerfDocs here:\nhttps://firefox-source-docs.mozilla.org/testing/perfdocs/"
+ ),
+ False,
+ ),
+ (
+ int((MAX_PERF_TASKS + 2) / 2),
+ {
+ "show_all": True,
+ "try_config_params": {"try_task_config": {"rebuild": 2}},
+ },
+ [1, 0, 0, 1],
+ (
+ "\n\n----------------------------------------------------------------------------------------------\n"
+ f"You have selected {int((MAX_PERF_TASKS + 2) / 2) * 2} total test runs! (selected tasks("
+ f"{int((MAX_PERF_TASKS + 2) / 2)}) * rebuild"
+ f" count(2) \nThese tests won't be triggered as the current maximum for a single ./mach try "
+ f"perf run is {MAX_PERF_TASKS}. \nIf this was unexpected, please file a bug in Testing :: Performance."
+ "\n----------------------------------------------------------------------------------------------\n\n"
+ ),
+ True,
+ ),
+ (0, {}, [1, 0, 0, 1], ("No tasks selected"), True),
+ ],
+)
+def test_max_perf_tasks(
+ total_tasks,
+ options,
+ call_counts,
+ expected_log_message,
+ expected_failure,
+):
+ # Set the categories, and variants to expand
+ PerfParser.categories = TEST_CATEGORIES
+ PerfParser.variants = TEST_VARIANTS
+
+ with mock.patch("tryselect.selectors.perf.push_to_try") as ptt, mock.patch(
+ "tryselect.selectors.perf.print",
+ ) as perf_print, mock.patch(
+ "tryselect.selectors.perf.LogProcessor.revision",
+ new_callable=mock.PropertyMock,
+ return_value="revision",
+ ), mock.patch(
+ "tryselect.selectors.perf.PerfParser.perf_push_to_try",
+ new_callable=mock.MagicMock,
+ return_value=("revision1", "revision2"),
+ ) as perf_push_to_try_mock, mock.patch(
+ "tryselect.selectors.perf.PerfParser.get_perf_tasks"
+ ) as get_perf_tasks_mock, mock.patch(
+ "tryselect.selectors.perf.PerfParser.get_tasks"
+ ) as get_tasks_mock, mock.patch(
+ "tryselect.selectors.perf.run_fzf"
+ ) as fzf, mock.patch(
+ "tryselect.selectors.perf.fzf_bootstrap", return_value=mock.MagicMock()
+ ):
+ tasks = ["a-task"] * total_tasks
+ get_tasks_mock.return_value = tasks
+ get_perf_tasks_mock.return_value = tasks, [], []
+
+ run(**options)
+
+ assert perf_push_to_try_mock.call_count == 0 if expected_failure else 1
+ assert ptt.call_count == call_counts[1]
+ assert perf_print.call_count == call_counts[3]
+ assert fzf.call_count == 0
+ assert perf_print.call_args_list[-1][0][0] == expected_log_message
+
+
+@pytest.mark.parametrize(
+ "try_config, selected_tasks, expected_try_config",
+ [
+ (
+ {"use-artifact-builds": True},
+ ["some-android-task"],
+ {"use-artifact-builds": False},
+ ),
+ (
+ {"use-artifact-builds": True},
+ ["some-desktop-task"],
+ {"use-artifact-builds": True},
+ ),
+ (
+ {"use-artifact-builds": False},
+ ["some-android-task"],
+ {"use-artifact-builds": False},
+ ),
+ (
+ {"use-artifact-builds": True},
+ ["some-desktop-task", "some-android-task"],
+ {"use-artifact-builds": False},
+ ),
+ ],
+)
+def test_artifact_mode_autodisable(try_config, selected_tasks, expected_try_config):
+ PerfParser.setup_try_config({"try_task_config": try_config}, [], selected_tasks)
+ assert (
+ try_config["use-artifact-builds"] == expected_try_config["use-artifact-builds"]
+ )
+
+
+def test_build_category_description():
+ base_cmd = ["--preview", '-t "{+f}"']
+
+ with mock.patch("tryselect.selectors.perf.json.dump") as dump:
+ PerfParser.build_category_description(base_cmd, "")
+
+ assert dump.call_count == 1
+ assert str(base_cmd).count("-d") == 1
+ assert str(base_cmd).count("-l") == 1
+
+
+@pytest.mark.parametrize(
+ "options, call_count",
+ [
+ ({}, [1, 1, 2]),
+ ({"show_all": True}, [0, 0, 1]),
+ ],
+)
+def test_preview_description(options, call_count):
+ with mock.patch("tryselect.selectors.perf.PerfParser.perf_push_to_try"), mock.patch(
+ "tryselect.selectors.perf.fzf_bootstrap"
+ ), mock.patch(
+ "tryselect.selectors.perf.PerfParser.get_perf_tasks"
+ ) as get_perf_tasks, mock.patch(
+ "tryselect.selectors.perf.PerfParser.get_tasks"
+ ), mock.patch(
+ "tryselect.selectors.perf.PerfParser.build_category_description"
+ ) as bcd:
+ get_perf_tasks.return_value = [], [], []
+
+ run(**options)
+
+ assert bcd.call_count == call_count[0]
+
+ base_cmd = ["--preview", '-t "{+f}"']
+ option = base_cmd[base_cmd.index("--preview") + 1].split(" ")
+ description, line = None, None
+ if call_count[0] == 1:
+ PerfParser.build_category_description(base_cmd, "")
+ option = base_cmd[base_cmd.index("--preview") + 1].split(" ")
+ description = option[option.index("-d") + 1]
+ line = "Current line"
+
+ taskfile = option[option.index("-t") + 1]
+
+ with mock.patch("tryselect.selectors.perf_preview.open"), mock.patch(
+ "tryselect.selectors.perf_preview.pathlib.Path.open"
+ ), mock.patch("tryselect.selectors.perf_preview.json.load") as load, mock.patch(
+ "tryselect.selectors.perf_preview.print"
+ ) as preview_print:
+ load.return_value = {line: "test description"}
+
+ plain_display(taskfile, description, line)
+
+ assert load.call_count == call_count[1]
+ assert preview_print.call_count == call_count[2]
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_perfcomparators.py b/tools/tryselect/test/test_perfcomparators.py
new file mode 100644
index 0000000000..51f0bdb287
--- /dev/null
+++ b/tools/tryselect/test/test_perfcomparators.py
@@ -0,0 +1,150 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import tempfile
+from unittest import mock
+
+import mozunit
+import pytest
+from tryselect.selectors.perfselector.perfcomparators import (
+ BadComparatorArgs,
+ BenchmarkComparator,
+ ComparatorNotFound,
+ get_comparator,
+)
+
+
+@pytest.mark.parametrize(
+ "test_link",
+ [
+ "https://github.com/mozilla-mobile/firefox-android/pull/1627",
+ "https://github.com/mozilla-mobile/firefox-android/pull/1876/"
+ "commits/17c7350cc37a4a85cea140a7ce54e9fd037b5365",
+ ],
+)
+def test_benchmark_comparator(test_link):
+ def _verify_extra_args(extra_args):
+ assert len(extra_args) == 3
+ if "commit" in test_link:
+ assert (
+ "benchmark-revision=17c7350cc37a4a85cea140a7ce54e9fd037b5365"
+ in extra_args
+ )
+ else:
+ assert "benchmark-revision=sha-for-link" in extra_args
+ assert "benchmark-repository=url-for-link" in extra_args
+ assert "benchmark-branch=ref-for-link" in extra_args
+
+ comparator = BenchmarkComparator(
+ None, None, None, [f"base-link={test_link}", f"new-link={test_link}"]
+ )
+
+ with mock.patch("requests.get") as mocked_get:
+ magic_get = mock.MagicMock()
+ magic_get.json.return_value = {
+ "head": {
+ "repo": {
+ "html_url": "url-for-link",
+ },
+ "sha": "sha-for-link",
+ "ref": "ref-for-link",
+ }
+ }
+ magic_get.status_code = 200
+ mocked_get.return_value = magic_get
+
+ extra_args = []
+ comparator.setup_base_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+ extra_args = []
+ comparator.setup_new_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+
+def test_benchmark_comparator_no_pr_links():
+ def _verify_extra_args(extra_args):
+ assert len(extra_args) == 3
+ assert "benchmark-revision=rev" in extra_args
+ assert "benchmark-repository=link" in extra_args
+ assert "benchmark-branch=fake" in extra_args
+
+ comparator = BenchmarkComparator(
+ None,
+ None,
+ None,
+ [
+ "base-repo=link",
+ "base-branch=fake",
+ "base-revision=rev",
+ "new-repo=link",
+ "new-branch=fake",
+ "new-revision=rev",
+ ],
+ )
+
+ with mock.patch("requests.get") as mocked_get:
+ magic_get = mock.MagicMock()
+ magic_get.json.return_value = {
+ "head": {
+ "repo": {
+ "html_url": "url-for-link",
+ },
+ "sha": "sha-for-link",
+ "ref": "ref-for-link",
+ }
+ }
+ magic_get.status_code = 200
+ mocked_get.return_value = magic_get
+
+ extra_args = []
+ comparator.setup_base_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+ extra_args = []
+ comparator.setup_new_revision(extra_args)
+ _verify_extra_args(extra_args)
+
+
+def test_benchmark_comparator_bad_args():
+ comparator = BenchmarkComparator(
+ None,
+ None,
+ None,
+ [
+ "base-bad-args=val",
+ ],
+ )
+
+ with pytest.raises(BadComparatorArgs):
+ comparator.setup_base_revision([])
+
+
+def test_get_comparator_bad_name():
+ with pytest.raises(ComparatorNotFound):
+ get_comparator("BadName")
+
+
+def test_get_comparator_bad_script():
+ with pytest.raises(ComparatorNotFound):
+ with tempfile.NamedTemporaryFile() as tmpf:
+ tmpf.close()
+ get_comparator(tmpf.name)
+
+
+def test_get_comparator_benchmark_name():
+ comparator_klass = get_comparator("BenchmarkComparator")
+ assert comparator_klass.__name__ == "BenchmarkComparator"
+
+
+def test_get_comparator_benchmark_script():
+ # If the get_comparator method is working for scripts, then
+ # it should find the first defined class in this file, or the
+ # first imported class that matches it
+ comparator_klass = get_comparator(__file__)
+ assert comparator_klass.__name__ == "BenchmarkComparator"
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_preset.t b/tools/tryselect/test/test_preset.t
new file mode 100644
index 0000000000..13e6946d32
--- /dev/null
+++ b/tools/tryselect/test/test_preset.t
@@ -0,0 +1,390 @@
+ $ . $TESTDIR/setup.sh
+ $ cd $topsrcdir
+
+Test preset with no subcommand
+
+ $ ./mach try $testargs --save foo -b do -p linux -u mochitests -t none --tag foo
+ preset saved, run with: --preset=foo
+
+ $ ./mach try $testargs --preset foo
+ Commit message:
+ try: -b do -p linux -u mochitests -t none --tag foo
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try syntax $testargs --preset foo
+ Commit message:
+ try: -b do -p linux -u mochitests -t none --tag foo
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try $testargs --list-presets
+ Presets from */mozbuild/try_presets.yml: (glob)
+
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+ $ unset EDITOR
+ $ ./mach try $testargs --edit-presets
+ error: must set the $EDITOR environment variable to use --edit-presets
+ $ export EDITOR=cat
+ $ ./mach try $testargs --edit-presets
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+Test preset with syntax subcommand
+
+ $ ./mach try syntax $testargs --save bar -b do -p win32 -u none -t all --tag bar
+ preset saved, run with: --preset=bar
+
+ $ ./mach try syntax $testargs --preset bar
+ Commit message:
+ try: -b do -p win32 -u none -t all --tag bar
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try $testargs --preset bar
+ Commit message:
+ try: -b do -p win32 -u none -t all --tag bar
+
+ Pushed via `mach try syntax`
+
+ $ ./mach try syntax $testargs --list-presets
+ Presets from */mozbuild/try_presets.yml: (glob)
+
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+ $ ./mach try syntax $testargs --edit-presets
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+Test preset with fuzzy subcommand
+
+ $ ./mach try fuzzy $testargs --save baz -q "'foo" --rebuild 5
+ preset saved, run with: --preset=baz
+
+ $ ./mach try fuzzy $testargs --preset baz
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+ $ ./mach try $testargs --preset baz
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+Queries can be appended to presets
+
+ $ ./mach try fuzzy $testargs --preset baz -q "'build"
+ Commit message:
+ Fuzzy query='foo&query='build
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "build-baz",
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+ $ ./mach try $testargs --preset baz -xq "'opt"
+ Commit message:
+ Fuzzy query='foo&query='opt
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+
+ $ ./mach try fuzzy $testargs --list-presets
+ Presets from */mozbuild/try_presets.yml: (glob)
+
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ baz:
+ dry_run: true
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+ $ ./mach try fuzzy $testargs --edit-presets
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ baz:
+ dry_run: true
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+
+Test gecko-profile argument handling. Add in profiling to a preset.
+
+ $ ./mach try fuzzy $testargs --preset baz --gecko-profile-features=nostacksampling,cpu
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "gecko-profile": true,
+ "gecko-profile-features": "nostacksampling,cpu",
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+Check whether the gecko-profile flags can be used from a preset, and check
+dashes vs underscores (presets save with underscores to match ArgumentParser
+settings; everything else uses dashes.)
+
+ $ ./mach try fuzzy $testargs --save profile -q "'foo" --rebuild 5 --gecko-profile-features=nostacksampling,cpu
+ preset saved, run with: --preset=profile
+
+ $ ./mach try fuzzy $testargs --preset profile
+ Commit message:
+ Fuzzy query='foo
+
+ Pushed via `mach try fuzzy`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": false,
+ "try_task_config": {
+ "env": {
+ "TRY_SELECTOR": "fuzzy"
+ },
+ "gecko-profile": true,
+ "gecko-profile-features": "nostacksampling,cpu",
+ "rebuild": 5,
+ "tasks": [
+ "test/foo-debug",
+ "test/foo-opt"
+ ]
+ }
+ },
+ "version": 2
+ }
+
+ $ EDITOR=cat ./mach try fuzzy $testargs --edit-preset profile
+ bar:
+ dry_run: true
+ no_artifact: true
+ platforms:
+ - win32
+ selector: syntax
+ tags:
+ - bar
+ talos:
+ - all
+ tests:
+ - none
+ baz:
+ dry_run: true
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+ foo:
+ no_artifact: true
+ platforms:
+ - linux
+ selector: syntax
+ tags:
+ - foo
+ talos:
+ - none
+ tests:
+ - mochitests
+ profile:
+ dry_run: true
+ gecko_profile_features: nostacksampling,cpu
+ no_artifact: true
+ query:
+ - "'foo"
+ rebuild: 5
+ selector: fuzzy
+
+ $ rm $MOZBUILD_STATE_PATH/try_presets.yml
diff --git a/tools/tryselect/test/test_presets.py b/tools/tryselect/test/test_presets.py
new file mode 100644
index 0000000000..89cc810808
--- /dev/null
+++ b/tools/tryselect/test/test_presets.py
@@ -0,0 +1,58 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import pytest
+
+TASKS = [
+ {
+ "kind": "build",
+ "label": "build-windows",
+ "attributes": {
+ "build_platform": "windows",
+ },
+ },
+ {
+ "kind": "test",
+ "label": "test-windows-mochitest-e10s",
+ "attributes": {
+ "unittest_suite": "mochitest",
+ "unittest_flavor": "browser-chrome",
+ "mochitest_try_name": "mochitest",
+ },
+ },
+]
+
+
+@pytest.fixture(autouse=True)
+def skip_taskgraph_generation(monkeypatch, tg):
+ def fake_generate_tasks(*args, **kwargs):
+ return tg
+
+ from tryselect import tasks
+
+ monkeypatch.setattr(tasks, "generate_tasks", fake_generate_tasks)
+
+
+@pytest.mark.xfail(
+ strict=False, reason="Bug 1635204: " "test_shared_presets[sample-suites] is flaky"
+)
+def test_shared_presets(run_mach, shared_name, shared_preset):
+ """This test makes sure that we don't break any of the in-tree presets when
+ renaming/removing variables in any of the selectors.
+ """
+ assert "description" in shared_preset
+ assert "selector" in shared_preset
+
+ selector = shared_preset["selector"]
+ if selector == "fuzzy":
+ assert "query" in shared_preset
+ assert isinstance(shared_preset["query"], list)
+
+ # Run the preset and assert there were no exceptions.
+ assert run_mach(["try", "--no-push", "--preset", shared_name]) == 0
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_push.py b/tools/tryselect/test/test_push.py
new file mode 100644
index 0000000000..97f2e047d7
--- /dev/null
+++ b/tools/tryselect/test/test_push.py
@@ -0,0 +1,54 @@
+import mozunit
+import pytest
+from tryselect import push
+
+
+@pytest.mark.parametrize(
+ "method,labels,params,routes,expected",
+ (
+ pytest.param(
+ "fuzzy",
+ ["task-foo", "task-bar"],
+ None,
+ None,
+ {
+ "parameters": {
+ "optimize_target_tasks": False,
+ "try_task_config": {
+ "env": {"TRY_SELECTOR": "fuzzy"},
+ "tasks": ["task-bar", "task-foo"],
+ },
+ },
+ "version": 2,
+ },
+ id="basic",
+ ),
+ pytest.param(
+ "fuzzy",
+ ["task-foo"],
+ {"existing_tasks": {"task-foo": "123", "task-bar": "abc"}},
+ None,
+ {
+ "parameters": {
+ "existing_tasks": {"task-bar": "abc"},
+ "optimize_target_tasks": False,
+ "try_task_config": {
+ "env": {"TRY_SELECTOR": "fuzzy"},
+ "tasks": ["task-foo"],
+ },
+ },
+ "version": 2,
+ },
+ id="existing_tasks",
+ ),
+ ),
+)
+def test_generate_try_task_config(method, labels, params, routes, expected):
+ assert (
+ push.generate_try_task_config(method, labels, params=params, routes=routes)
+ == expected
+ )
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_release.py b/tools/tryselect/test/test_release.py
new file mode 100644
index 0000000000..a1a0d348b2
--- /dev/null
+++ b/tools/tryselect/test/test_release.py
@@ -0,0 +1,43 @@
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+from textwrap import dedent
+
+import mozunit
+
+
+def test_release(run_mach, capfd):
+ cmd = [
+ "try",
+ "release",
+ "--no-push",
+ "--version=97.0",
+ ]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ expected = dedent(
+ """
+ Commit message:
+ staging release: 97.0
+
+ Pushed via `mach try release`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "optimize_target_tasks": true,
+ "release_type": "release",
+ "target_tasks_method": "staging_release_builds"
+ },
+ "version": 2
+ }
+
+ """
+ ).lstrip()
+ assert expected in output
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_scriptworker.py b/tools/tryselect/test/test_scriptworker.py
new file mode 100644
index 0000000000..e25279ace4
--- /dev/null
+++ b/tools/tryselect/test/test_scriptworker.py
@@ -0,0 +1,39 @@
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+import re
+from textwrap import dedent
+
+import mozunit
+
+
+def test_release(run_mach, capfd):
+ cmd = [
+ "try",
+ "scriptworker",
+ "--no-push",
+ "tree",
+ ]
+ assert run_mach(cmd) == 0
+
+ output = capfd.readouterr().out
+ print(output)
+
+ expected = re.compile(
+ dedent(
+ r"""
+ Pushed via `mach try scriptworker`
+ Calculated try_task_config.json:
+ {
+ "parameters": {
+ "app_version": "\d+\.\d+",
+ "build_number": \d+,
+ """
+ ).lstrip(),
+ re.MULTILINE,
+ )
+ assert expected.search(output)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_task_configs.py b/tools/tryselect/test/test_task_configs.py
new file mode 100644
index 0000000000..afa21bfabf
--- /dev/null
+++ b/tools/tryselect/test/test_task_configs.py
@@ -0,0 +1,257 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import inspect
+from argparse import ArgumentParser
+from textwrap import dedent
+
+import mozunit
+import pytest
+from tryselect.task_config import Pernosco, all_task_configs
+
+TC_URL = "https://firefox-ci-tc.services.mozilla.com"
+TH_URL = "https://treeherder.mozilla.org"
+
+# task configs have a list of tests of the form (input, expected)
+TASK_CONFIG_TESTS = {
+ "artifact": [
+ (["--no-artifact"], None),
+ (
+ ["--artifact"],
+ {"try_task_config": {"use-artifact-builds": True, "disable-pgo": True}},
+ ),
+ ],
+ "chemspill-prio": [
+ ([], None),
+ (["--chemspill-prio"], {"try_task_config": {"chemspill-prio": True}}),
+ ],
+ "env": [
+ ([], None),
+ (
+ ["--env", "foo=bar", "--env", "num=10"],
+ {"try_task_config": {"env": {"foo": "bar", "num": "10"}}},
+ ),
+ ],
+ "path": [
+ ([], None),
+ (
+ ["dom/indexedDB"],
+ {
+ "try_task_config": {
+ "env": {"MOZHARNESS_TEST_PATHS": '{"xpcshell": ["dom/indexedDB"]}'}
+ }
+ },
+ ),
+ (
+ ["dom/indexedDB", "testing"],
+ {
+ "try_task_config": {
+ "env": {
+ "MOZHARNESS_TEST_PATHS": '{"xpcshell": ["dom/indexedDB", "testing"]}'
+ }
+ }
+ },
+ ),
+ (["invalid/path"], SystemExit),
+ ],
+ "pernosco": [
+ ([], None),
+ ],
+ "rebuild": [
+ ([], None),
+ (["--rebuild", "10"], {"try_task_config": {"rebuild": 10}}),
+ (["--rebuild", "1"], SystemExit),
+ (["--rebuild", "21"], SystemExit),
+ ],
+ "worker-overrides": [
+ ([], None),
+ (
+ ["--worker-override", "alias=worker/pool"],
+ {"try_task_config": {"worker-overrides": {"alias": "worker/pool"}}},
+ ),
+ (
+ [
+ "--worker-override",
+ "alias=worker/pool",
+ "--worker-override",
+ "alias=other/pool",
+ ],
+ SystemExit,
+ ),
+ (
+ ["--worker-suffix", "b-linux=-dev"],
+ {
+ "try_task_config": {
+ "worker-overrides": {"b-linux": "gecko-1/b-linux-dev"}
+ }
+ },
+ ),
+ (
+ [
+ "--worker-override",
+ "b-linux=worker/pool" "--worker-suffix",
+ "b-linux=-dev",
+ ],
+ SystemExit,
+ ),
+ ],
+ "new-test-config": [
+ ([], None),
+ (["--new-test-config"], {"try_task_config": {"new-test-config": True}}),
+ ],
+}
+
+
+@pytest.fixture
+def config_patch_resolver(patch_resolver):
+ def inner(paths):
+ patch_resolver(
+ [], [{"flavor": "xpcshell", "srcdir_relpath": path} for path in paths]
+ )
+
+ return inner
+
+
+def test_task_configs(config_patch_resolver, task_config, args, expected):
+ parser = ArgumentParser()
+
+ cfg = all_task_configs[task_config]()
+ cfg.add_arguments(parser)
+
+ if inspect.isclass(expected) and issubclass(expected, BaseException):
+ with pytest.raises(expected):
+ args = parser.parse_args(args)
+ if task_config == "path":
+ config_patch_resolver(**vars(args))
+
+ cfg.get_parameters(**vars(args))
+ else:
+ args = parser.parse_args(args)
+ if task_config == "path":
+ config_patch_resolver(**vars(args))
+
+ params = cfg.get_parameters(**vars(args))
+ assert params == expected
+
+
+@pytest.fixture
+def patch_ssh_user(mocker):
+ def inner(user):
+ mock_stdout = mocker.Mock()
+ mock_stdout.stdout = dedent(
+ f"""
+ key1 foo
+ user {user}
+ key2 bar
+ """
+ )
+ return mocker.patch(
+ "tryselect.util.ssh.subprocess.run", return_value=mock_stdout
+ )
+
+ return inner
+
+
+def test_pernosco(patch_ssh_user):
+ patch_ssh_user("user@mozilla.com")
+ parser = ArgumentParser()
+
+ cfg = Pernosco()
+ cfg.add_arguments(parser)
+ args = parser.parse_args(["--pernosco"])
+ params = cfg.get_parameters(**vars(args))
+ assert params == {"try_task_config": {"env": {"PERNOSCO": "1"}}}
+
+
+def test_exisiting_tasks(responses, patch_ssh_user):
+ parser = ArgumentParser()
+ cfg = all_task_configs["existing-tasks"]()
+ cfg.add_arguments(parser)
+
+ user = "user@example.com"
+ rev = "a" * 40
+ task_id = "abc"
+ label_to_taskid = {"task-foo": "123", "task-bar": "456"}
+
+ args = ["--use-existing-tasks"]
+ args = parser.parse_args(args)
+
+ responses.add(
+ responses.GET,
+ f"{TH_URL}/api/project/try/push/?count=1&author={user}",
+ json={"meta": {"count": 1}, "results": [{"revision": rev}]},
+ )
+
+ responses.add(
+ responses.GET,
+ f"{TC_URL}/api/index/v1/task/gecko.v2.try.revision.{rev}.taskgraph.decision",
+ json={"taskId": task_id},
+ )
+
+ responses.add(
+ responses.GET,
+ f"{TC_URL}/api/queue/v1/task/{task_id}/artifacts/public/label-to-taskid.json",
+ json=label_to_taskid,
+ )
+
+ m = patch_ssh_user(user)
+ params = cfg.get_parameters(**vars(args))
+ assert params == {"existing_tasks": label_to_taskid}
+
+ m.assert_called_once_with(
+ ["ssh", "-G", "hg.mozilla.org"], text=True, check=True, capture_output=True
+ )
+
+
+def test_exisiting_tasks_task_id(responses):
+ parser = ArgumentParser()
+ cfg = all_task_configs["existing-tasks"]()
+ cfg.add_arguments(parser)
+
+ task_id = "abc"
+ label_to_taskid = {"task-foo": "123", "task-bar": "456"}
+
+ args = ["--use-existing-tasks", f"task-id={task_id}"]
+ args = parser.parse_args(args)
+
+ responses.add(
+ responses.GET,
+ f"{TC_URL}/api/queue/v1/task/{task_id}/artifacts/public/label-to-taskid.json",
+ json=label_to_taskid,
+ )
+
+ params = cfg.get_parameters(**vars(args))
+ assert params == {"existing_tasks": label_to_taskid}
+
+
+def test_exisiting_tasks_rev(responses):
+ parser = ArgumentParser()
+ cfg = all_task_configs["existing-tasks"]()
+ cfg.add_arguments(parser)
+
+ rev = "aaaaaa"
+ task_id = "abc"
+ label_to_taskid = {"task-foo": "123", "task-bar": "456"}
+
+ args = ["--use-existing-tasks", f"rev={rev}"]
+ args = parser.parse_args(args)
+
+ responses.add(
+ responses.GET,
+ f"{TC_URL}/api/index/v1/task/gecko.v2.try.revision.{rev}.taskgraph.decision",
+ json={"taskId": task_id},
+ )
+
+ responses.add(
+ responses.GET,
+ f"{TC_URL}/api/queue/v1/task/{task_id}/artifacts/public/label-to-taskid.json",
+ json=label_to_taskid,
+ )
+
+ params = cfg.get_parameters(**vars(args))
+ assert params == {"existing_tasks": label_to_taskid}
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/test/test_tasks.py b/tools/tryselect/test/test_tasks.py
new file mode 100644
index 0000000000..2e99c72d8b
--- /dev/null
+++ b/tools/tryselect/test/test_tasks.py
@@ -0,0 +1,93 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozunit
+import pytest
+from tryselect.tasks import cache_key, filter_tasks_by_paths, resolve_tests_by_suite
+
+
+def test_filter_tasks_by_paths(patch_resolver):
+ tasks = {"foobar/xpcshell-1": {}, "foobar/mochitest": {}, "foobar/xpcshell": {}}
+
+ patch_resolver(["xpcshell"], {})
+ assert list(filter_tasks_by_paths(tasks, "dummy")) == []
+
+ patch_resolver([], [{"flavor": "xpcshell"}])
+ assert list(filter_tasks_by_paths(tasks, "dummy")) == [
+ "foobar/xpcshell-1",
+ "foobar/xpcshell",
+ ]
+
+
+@pytest.mark.parametrize(
+ "input, tests, expected",
+ (
+ pytest.param(
+ ["xpcshell.js"],
+ [{"flavor": "xpcshell", "srcdir_relpath": "xpcshell.js"}],
+ {"xpcshell": ["xpcshell.js"]},
+ id="single test",
+ ),
+ pytest.param(
+ ["xpcshell.ini"],
+ [
+ {
+ "flavor": "xpcshell",
+ "srcdir_relpath": "xpcshell.js",
+ "manifest_relpath": "xpcshell.ini",
+ },
+ ],
+ {"xpcshell": ["xpcshell.ini"]},
+ id="single manifest",
+ ),
+ pytest.param(
+ ["xpcshell.js", "mochitest.js"],
+ [
+ {"flavor": "xpcshell", "srcdir_relpath": "xpcshell.js"},
+ {"flavor": "mochitest", "srcdir_relpath": "mochitest.js"},
+ ],
+ {
+ "xpcshell": ["xpcshell.js"],
+ "mochitest-plain": ["mochitest.js"],
+ },
+ id="two tests",
+ ),
+ pytest.param(
+ ["test/xpcshell.ini"],
+ [
+ {
+ "flavor": "xpcshell",
+ "srcdir_relpath": "test/xpcshell.js",
+ "manifest_relpath": os.path.join("test", "xpcshell.ini"),
+ },
+ ],
+ {"xpcshell": ["test/xpcshell.ini"]},
+ id="mismatched path separators",
+ ),
+ ),
+)
+def test_resolve_tests_by_suite(patch_resolver, input, tests, expected):
+ patch_resolver([], tests)
+ assert resolve_tests_by_suite(input) == expected
+
+
+@pytest.mark.parametrize(
+ "attr,params,disable_target_task_filter,expected",
+ (
+ ("target_task_set", None, False, "target_task_set"),
+ ("target_task_set", {"project": "autoland"}, False, "target_task_set"),
+ ("target_task_set", {"project": "mozilla-central"}, False, "target_task_set"),
+ ("target_task_set", None, True, "target_task_set-uncommon"),
+ ("full_task_set", {"project": "pine"}, False, "full_task_set-pine"),
+ ("full_task_set", None, True, "full_task_set"),
+ ),
+)
+def test_cache_key(attr, params, disable_target_task_filter, expected):
+ assert cache_key(attr, params, disable_target_task_filter) == expected
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/tools/tryselect/try_presets.yml b/tools/tryselect/try_presets.yml
new file mode 100644
index 0000000000..ebdc94aa03
--- /dev/null
+++ b/tools/tryselect/try_presets.yml
@@ -0,0 +1,298 @@
+---
+# Presets defined here will be available to all users. Run them with:
+# $ mach try --preset <name>
+#
+# If editing this file, make sure to run:
+# $ mach python-test tools/tryselect/test/test_presets.py
+#
+# Descriptions are required. Please keep this in alphabetical order.
+
+# yamllint disable rule:line-length
+
+builds:
+ selector: fuzzy
+ description: >-
+ Run builds without any of the extras.
+ query:
+ - "^build- !fuzzing !notarization !reproduced !rusttests !signing !upload-symbols"
+
+builds-debug:
+ selector: fuzzy
+ description: >-
+ Run the bare minimum of debug build jobs to ensure builds work on
+ all tier-1 platforms.
+ query:
+ - "^build- 'debug !fuzzing !rusttests !signing !plain !asan !tsan !noopt !toolchain !upload-symbols"
+
+builds-debugopt:
+ selector: fuzzy
+ description: >-
+ Run the bare minimum of debug and opt build jobs to ensure builds work on
+ all tier-1 platforms.
+ query:
+ - "^build- !fuzzing !rusttests !signing !plain !asan !tsan !noopt !toolchain !upload-symbols"
+
+desktop-frontend:
+ description: >-
+ Run mochitest-browser, xpcshell, mochitest-chrome, mochitest-a11y,
+ marionette, firefox-ui-functional on all desktop platforms.
+ Excludes non-shipped/duplicate configurations like asan/tsan/msix
+ to reduce the runtime of the push as well as infra load.
+ Use with --artifact to speed up your trypush.
+ If this is green, you can be 99% sure that any frontend change will
+ stick on central.
+ selector: fuzzy
+ query:
+ # Runs 64-bit frontend-tests, plus win7. Tries to avoid running
+ # asan/tsan because they're not available as artifact builds, and
+ # rarely offer different results from debug/opt. It also avoids running
+ # msix/swr/a11y-checks/gpu/nofis/headless variants of otherwise
+ # identical tests, as again those are unlikely to show different
+ # results for frontend-only changes.
+ # This won't run 32-bit debug tests, which seems an acceptable
+ # trade-off for query complexity + runtime on infrastructure.
+ - "'browser-chrome 'windows7 | '64 !spi !asan !tsan !msix !a11y !swr | 'linux"
+ - "'mochitest-chrome 'windows7 | '64 !spi !asan !tsan !swr !gpu"
+ - "'xpcshell 'windows7 | '64 !spi !asan !tsan !msix !nofis !condprof"
+ - "'browser-a11y | 'mochitest-a11y 'windows7 | '64 !spi !asan !tsan !no-cache !swr"
+ - "'marionette 'windows7 | '64 !asan !source !headless !swr"
+ - "'firefox-ui-functional 'windows7 | '64 !asan !tsan"
+
+devtools:
+ selector: fuzzy
+ description: >-
+ Runs the tests relevant to the Firefox Devtools
+ query:
+ - "'node-debugger | 'node-devtools"
+ - "'mozlint-eslint"
+ # Windows: skip jobs on asan and 32 bits platforms
+ - "'mochitest-devtools-chrome | 'mochitest-chrome-1proc 'windows !asan !-32"
+ # macos: no extra platform to filter out
+ - "'mochitest-devtools-chrome | 'mochitest-chrome-1proc 'macosx"
+ # Linux is being named "linux1804" and may change over time, so use a more flexible search
+ - "'mochitest-devtools-chrome | 'mochitest-chrome-1proc 'linux '64-qr/ !swr"
+ - "'xpcshell 'linux !nofis '64-qr/"
+
+devtools-linux:
+ selector: fuzzy
+ description: >-
+ Runs the tests relevant to the Firefox Devtools, on Linux only.
+ query:
+ - "'node-debugger | 'node-devtools"
+ - "'mozlint-eslint"
+ - "'mochitest-devtools-chrome | 'mochitest-chrome-1proc 'linux '64-qr/ !swr"
+ - "'xpcshell 'linux !nofis '64-qr/"
+
+fpush-linux-android:
+ selector: fuzzy
+ description: >-
+ Runs correctness test suites on Linux and Android emulator platforms, as
+ well as builds across main platforms. The resulting jobs on TreeHerder
+ used to end up looking like a "F" shape (not so much these days) and so
+ this is typically referred to as an F-push. This is useful to do as a
+ general sanity check on changes to cross-platform Gecko code where you
+ unsure of what tests might be affected. Linux and Android (emulator)
+ test coverage are relatively cheap to run and cover a lot of the
+ codebase, while the builds on other platforms catch compilation problems
+ that might result from only building locally on one platform.
+ query:
+ - "'test-linux1804 'debug- !-shippable !-asan"
+ - "'test-android-em 'debug"
+ - "^build !-shippable !-signing !-asan !-fuzzing !-rusttests !-base-toolchain !-aar-"
+
+geckodriver:
+ selector: fuzzy
+ description: >-
+ Runs the tests relevant to geckodriver, which implements the WebDriver
+ specification. This preset can be filtered down further to limit it to
+ a specific platform or other tasks only. For example:
+ |mach try --preset geckodriver -xq "'linux"|
+ query:
+ - "'rusttests"
+ - "'platform 'wdspec 'debug 'nofis"
+ - "'browsertime 'amazon 'shippable 'firefox 'nofis"
+
+layout:
+ selector: fuzzy
+ description: >-
+ Runs the tests most relevant to layout.
+ This preset can be filtered down further to limit it to
+ a specific platform or build configuration. For example:
+ |mach try --preset layout -xq "linux64 'opt"|
+ query:
+ # Most mochitest + reftest + crashtest + wpt
+ - "!asan !tsan !jsreftest !shippable !webgl !condprof !media !webgpu 'mochitest | 'web-platform | 'crashtest | 'reftest"
+ # Style system unit tests
+ - "'rusttests"
+
+media-full:
+ selector: fuzzy
+ description: >-
+ Runs tests that exercise media playback and WebRTC code.
+ query:
+ - "mochitest-media !dfpi !nofis"
+ - "mochitest-media android !spi !swr !lite"
+ - "mochitest-browser-chrome !dfpi !nofis !a11y"
+ - "mochitest-browser-media"
+ - "web-platform-tests !dfpi !nofis !shippable"
+ - "web-platform-tests android !wdspec !spi !swr !lite"
+ - "crashtest !wdspec !nofis"
+ - "crashtest android !wdspec !spi !swr !lite"
+ - "'gtest"
+
+mochitest-bc:
+ description: >-
+ Runs mochitest-browser-chrome on all Desktop platforms in both opt
+ and debug. Excludes jobs that require non-artifact builds (asan,
+ tsan, msix, etc.) and some non-default configurations. For frontend
+ only changes, use this with --artifact to speed up your trypushes.
+ query:
+ - "'browser-chrome 'windows7 | '64 !tsan !asan !msix !spi !a11y !swr | 'linux"
+ selector: fuzzy
+
+perf:
+ selector: fuzzy
+ description: >-
+ Runs all performance (raptor and talos) tasks across all platforms.
+ This preset can be filtered down further (e.g to limit it to a specific
+ platform) via |mach try --preset perf -xq "'windows"|.
+
+ Android hardware platforms are excluded due to resource limitations.
+ query:
+ - "^test- !android-hw 'raptor | 'talos"
+ rebuild: 5
+
+perf-chrome:
+ description: >-
+ Runs the talos tests most likely to change when making a change to
+ the browser chrome. This skips a number of talos jobs that are unlikely
+ to be affected in order to conserve resources.
+ query:
+ - "opt-talos- 'chrome | 'svg | 'session | 'tabswitch | 'other | 'g5"
+ rebuild: 6
+ selector: fuzzy
+
+remote-protocol:
+ selector: fuzzy
+ description: >-
+ Runs the tests relevant to the Remote protocol, which underpins
+ many test harnesses as well as our CDP and WebDriver implementations.
+ This preset can be filtered down further to limit it to a specific
+ platform or to opt/debug tasks only. For example:
+ |mach try --preset remote-protocol -xq "'linux 'opt"|
+ query:
+ - "'awsy-base"
+ - "'firefox-ui"
+ - "'marionette !swr | harness"
+ - "'mochitest-browser !spi !swr !nofis '-1$"
+ - "'mochitest-remote !spi !swr"
+ - "'platform 'reftest !swr !nofis | 'android !-lite -1$"
+ - "'platform 'wdspec !swr"
+ - "'platform !reftest !wdspec !swr !nofis | 'android !-lite -1$"
+ - "'puppeteer"
+ - "'reftest !platform !gpu !swr !no-accel !nofis | 'android !-lite -1$"
+ - "'xpcshell !spi !tsan !-lite"
+
+sample-suites:
+ selector: fuzzy
+ description: >-
+ Runs one chunk of every test suite plus all suites that aren't chunked.
+ It is useful for testing infrastructure changes that can affect the
+ harnesses themselves but are unlikely to break specific tests.
+ query:
+ - ^test- -1$
+ # Only run a single talos + raptor suite per platform
+ - ^test- !1$ !2$ !3$ !4$ !5$ !6$ !7$ !8$ !9$ !0$ !raptor !talos
+ - ^test- 'raptor-speedometer | 'talos-g1
+
+sm-shell-all:
+ selector: fuzzy
+ description: <-
+ Runs a set of tests aimed to give a reasonable level of confidence for
+ basic SpiderMonkey changes (shell only), all platforms
+ query:
+ - "'spidermonkey | 'shell-haz"
+ - "!shippable !android 'jittest" # macosx64 jittests
+
+sm-shell:
+ selector: fuzzy
+ description: <-
+ Runs a set of tests aimed to give a reasonable level of confidence for
+ basic SpiderMonkey changes (shell only) (linux only)
+ query:
+ - "!win !osx 'spidermonkey | 'shell-haz"
+
+
+sm-all:
+ selector: fuzzy
+ description: <-
+ Runs a set of tests aimed to give a reasonable level of confidence for
+ basic SpiderMonkey changes, including those that would require a
+ browser build.
+ query:
+ - "'spidermonkey | 'hazard"
+ - "!android !asan !shippable 'xpcshell"
+ - "!android !asan !shippable 'jsreftest"
+ - "!shippable !android 'jittest" # macosx64 jittests
+
+webextensions:
+ selector: fuzzy
+ description: <-
+ Runs most of the unit tests of WebExtension code across all desktop
+ platforms and Android, including mochitests, xpcshell and test-verify.
+ GeckoView JUnit tests are NOT run.
+ paths: # must be duplicate of test_paths, see bug 1556445
+ - browser/components/extensions/test/
+ - mobile/android/components/extensions/test/
+ - toolkit/components/extensions/test/
+ - toolkit/mozapps/extensions/test/
+ test_paths: # must be duplicate of paths, see bug 1556445
+ - browser/components/extensions/test/
+ - mobile/android/components/extensions/test/
+ - toolkit/components/extensions/test/
+ - toolkit/mozapps/extensions/test/
+ query:
+ - "'64-qr/ | 'windows11-64-2009-qr !wpt !gpu !msix"
+
+webgpu:
+ selector: fuzzy
+ description: >-
+ Runs the tests relevant to WebGPU.
+ query:
+ - "'webgpu"
+ - "source-test-mozlint-updatebot"
+ - "source-test-vendor-rust"
+
+webrender:
+ selector: fuzzy
+ description: >-
+ Runs the conformance tests relevant to WebRender.
+ query:
+ - "!talos !raptor !shippable !asan '-qr"
+ - "^webrender-"
+
+webrender-reftests:
+ selector: fuzzy
+ description: >-
+ Runs the reftests relevant to WebRender.
+ query:
+ - "!talos !raptor !shippable !asan !nofis 'reftest"
+
+webrender-reftests-linux:
+ selector: fuzzy
+ description: >-
+ Runs the reftests relevant to WebRender on linux only.
+ query:
+ - "!talos !raptor !shippable !asan !nofis 'linux 'reftest"
+
+webrender-perf:
+ selector: fuzzy
+ description: >-
+ Runs the performance tests relevant to WebRender.
+ query:
+ - "'-qr 'svgr"
+ - "'-qr 'g1"
+ - "'-qr 'g4"
+ - "'-qr 'tp5"
+ - "'-qr 'talos-webgl"
+ - "'-qr 'motionmark-animometer"
diff --git a/tools/tryselect/util/__init__.py b/tools/tryselect/util/__init__.py
new file mode 100644
index 0000000000..c580d191c1
--- /dev/null
+++ b/tools/tryselect/util/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/tools/tryselect/util/dicttools.py b/tools/tryselect/util/dicttools.py
new file mode 100644
index 0000000000..465e4a43de
--- /dev/null
+++ b/tools/tryselect/util/dicttools.py
@@ -0,0 +1,50 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import copy
+
+
+def merge_to(source, dest):
+ """
+ Merge dict and arrays (override scalar values)
+
+ Keys from source override keys from dest, and elements from lists in source
+ are appended to lists in dest.
+
+ :param dict source: to copy from
+ :param dict dest: to copy to (modified in place)
+ """
+
+ for key, value in source.items():
+ # Override mismatching or empty types
+ if type(value) != type(dest.get(key)): # noqa
+ dest[key] = source[key]
+ continue
+
+ # Merge dict
+ if isinstance(value, dict):
+ merge_to(value, dest[key])
+ continue
+
+ if isinstance(value, list):
+ dest[key] = dest[key] + source[key]
+ continue
+
+ dest[key] = source[key]
+
+ return dest
+
+
+def merge(*objects):
+ """
+ Merge the given objects, using the semantics described for merge_to, with
+ objects later in the list taking precedence. From an inheritance
+ perspective, "parents" should be listed before "children".
+
+ Returns the result without modifying any arguments.
+ """
+ if len(objects) == 1:
+ return copy.deepcopy(objects[0])
+ return merge_to(objects[-1], merge(*objects[:-1]))
diff --git a/tools/tryselect/util/estimates.py b/tools/tryselect/util/estimates.py
new file mode 100644
index 0000000000..a15ad72831
--- /dev/null
+++ b/tools/tryselect/util/estimates.py
@@ -0,0 +1,124 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+from datetime import datetime, timedelta
+
+TASK_DURATION_CACHE = "task_duration_history.json"
+GRAPH_QUANTILE_CACHE = "graph_quantile_cache.csv"
+TASK_DURATION_TAG_FILE = "task_duration_tag.json"
+
+
+def find_all_dependencies(graph, tasklist):
+ all_dependencies = dict()
+
+ def find_dependencies(task):
+ dependencies = set()
+ if task in all_dependencies:
+ return all_dependencies[task]
+ if task not in graph:
+ # Don't add tasks (and so durations) for
+ # things optimised out.
+ return dependencies
+ dependencies.add(task)
+ for dep in graph.get(task, list()):
+ all_dependencies[dep] = find_dependencies(dep)
+ dependencies.update(all_dependencies[dep])
+ return dependencies
+
+ full_deps = set()
+ for task in tasklist:
+ full_deps.update(find_dependencies(task))
+
+ # Since these have been asked for, they're not inherited dependencies.
+ return sorted(full_deps - set(tasklist))
+
+
+def find_longest_path(graph, tasklist, duration_data):
+ dep_durations = dict()
+
+ def find_dependency_durations(task):
+ if task in dep_durations:
+ return dep_durations[task]
+
+ durations = [find_dependency_durations(dep) for dep in graph.get(task, list())]
+ durations.append(0.0)
+ md = max(durations) + duration_data.get(task, 0.0)
+ dep_durations[task] = md
+ return md
+
+ longest_paths = [find_dependency_durations(task) for task in tasklist]
+ # Default in case there are no tasks
+ if longest_paths:
+ return max(longest_paths)
+ else:
+ return 0
+
+
+def determine_percentile(quantiles_file, duration):
+ duration = duration.total_seconds()
+
+ with open(quantiles_file) as f:
+ f.readline() # skip header
+ boundaries = [float(l.strip()) for l in f.readlines()]
+
+ boundaries.sort()
+ for i, v in enumerate(boundaries):
+ if duration < v:
+ break
+ # Estimate percentile from len(boundaries)-quantile
+ return int(100 * i / len(boundaries))
+
+
+def task_duration_data(cache_dir):
+ with open(os.path.join(cache_dir, TASK_DURATION_CACHE)) as f:
+ return json.load(f)
+
+
+def duration_summary(graph_cache_file, tasklist, cache_dir):
+ durations = task_duration_data(cache_dir)
+
+ graph = dict()
+ if graph_cache_file:
+ with open(graph_cache_file) as f:
+ graph = json.load(f)
+ dependencies = find_all_dependencies(graph, tasklist)
+ longest_path = find_longest_path(graph, tasklist, durations)
+ dependency_duration = 0.0
+ for task in dependencies:
+ dependency_duration += int(durations.get(task, 0.0))
+
+ total_requested_duration = 0.0
+ for task in tasklist:
+ duration = int(durations.get(task, 0.0))
+ total_requested_duration += duration
+ output = dict()
+
+ total_requested_duration = timedelta(seconds=total_requested_duration)
+ total_dependency_duration = timedelta(seconds=dependency_duration)
+
+ output["selected_duration"] = total_requested_duration
+ output["dependency_duration"] = total_dependency_duration
+ output["dependency_count"] = len(dependencies)
+ output["selected_count"] = len(tasklist)
+
+ percentile = None
+ graph_quantile_cache = os.path.join(cache_dir, GRAPH_QUANTILE_CACHE)
+ if os.path.isfile(graph_quantile_cache):
+ percentile = determine_percentile(
+ graph_quantile_cache, total_dependency_duration + total_requested_duration
+ )
+ if percentile:
+ output["percentile"] = percentile
+
+ output["wall_duration_seconds"] = timedelta(seconds=int(longest_path))
+ output["eta_datetime"] = datetime.now() + timedelta(seconds=longest_path)
+
+ output["task_durations"] = {
+ task: int(durations.get(task, 0.0)) for task in tasklist
+ }
+
+ return output
diff --git a/tools/tryselect/util/fzf.py b/tools/tryselect/util/fzf.py
new file mode 100644
index 0000000000..63318fce18
--- /dev/null
+++ b/tools/tryselect/util/fzf.py
@@ -0,0 +1,424 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+import platform
+import shutil
+import subprocess
+import sys
+
+import mozfile
+import six
+from gecko_taskgraph.target_tasks import filter_by_uncommon_try_tasks
+from mach.util import get_state_dir
+from mozboot.util import http_download_and_save
+from mozbuild.base import MozbuildObject
+from mozterm import Terminal
+from packaging.version import Version
+
+from ..push import check_working_directory
+from ..tasks import generate_tasks
+from ..util.manage_estimates import (
+ download_task_history_data,
+ make_trimmed_taskgraph_cache,
+)
+
+terminal = Terminal()
+
+here = os.path.abspath(os.path.dirname(__file__))
+build = MozbuildObject.from_environment(cwd=here)
+
+PREVIEW_SCRIPT = os.path.join(build.topsrcdir, "tools/tryselect/selectors/preview.py")
+
+FZF_MIN_VERSION = "0.20.0"
+FZF_CURRENT_VERSION = "0.29.0"
+
+# It would make more sense to have the full filename be the key; but that makes
+# the line too long and ./mach lint and black can't agree about what to about that.
+# You can get these from the github release, e.g.
+# https://github.com/junegunn/fzf/releases/download/0.24.1/fzf_0.24.1_checksums.txt
+# However the darwin releases may not be included, so double check you have everything
+FZF_CHECKSUMS = {
+ "linux_armv5.tar.gz": "61d3c2aa77b977ba694836fd1134da9272bd97ee490ececaf87959b985820111",
+ "linux_armv6.tar.gz": "db6b30fcbbd99ac4cf7e3ff6c5db1d3c0afcbe37d10ec3961bdc43e8c4f2e4f9",
+ "linux_armv7.tar.gz": "ed86f0e91e41d2cea7960a78e3eb175dc2a5fc1510380c195d0c3559bfdc701c",
+ "linux_arm64.tar.gz": "47988d8b68905541cbc26587db3ed1cfa8bc3aa8da535120abb4229b988f259e",
+ "linux_amd64.tar.gz": "0106f458b933be65edb0e8f0edb9a16291a79167836fd26a77ff5496269b5e9a",
+ "windows_armv5.zip": "08eaac45b3600d82608d292c23e7312696e7e11b6278b292feba25e8eb91c712",
+ "windows_armv6.zip": "8b6618726a9d591a45120fddebc29f4164e01ce6639ed9aa8fc79ab03eefcfed",
+ "windows_armv7.zip": "c167117b4c08f4f098446291115871ce5f14a8a8b22f0ca70e1b4342452ab5d7",
+ "windows_arm64.zip": "0cda7bf68850a3e867224a05949612405e63a4421d52396c1a6c9427d4304d72",
+ "windows_amd64.zip": "f0797ceee089017108c80b09086c71b8eec43d4af11ce939b78b1d5cfd202540",
+ "darwin_arm64.zip": "2571b4d381f1fc691e7603bbc8113a67116da2404751ebb844818d512dd62b4b",
+ "darwin_amd64.zip": "bc541e8ae0feb94efa96424bfe0b944f746db04e22f5cccfe00709925839a57f",
+ "openbsd_amd64.tar.gz": "b62343827ff83949c09d5e2c8ca0c1198d05f733c9a779ec37edd840541ccdab",
+ "freebsd_amd64.tar.gz": "f0367f2321c070d103589c7c7eb6a771bc7520820337a6c2fbb75be37ff783a9",
+}
+
+FZF_INSTALL_MANUALLY = """
+The `mach try fuzzy` command depends on fzf. Please install it following the
+appropriate instructions for your platform:
+
+ https://github.com/junegunn/fzf#installation
+
+Only the binary is required, if you do not wish to install the shell and
+editor integrations, download the appropriate binary and put it on your $PATH:
+
+ https://github.com/junegunn/fzf/releases
+""".lstrip()
+
+FZF_COULD_NOT_DETERMINE_PLATFORM = (
+ """
+Could not automatically obtain the `fzf` binary because we could not determine
+your Operating System.
+
+""".lstrip()
+ + FZF_INSTALL_MANUALLY
+)
+
+FZF_COULD_NOT_DETERMINE_MACHINE = (
+ """
+Could not automatically obtain the `fzf` binary because we could not determine
+your machine type. It's reported as '%s' and we don't handle that case; but fzf
+may still be available as a prebuilt binary.
+
+""".lstrip()
+ + FZF_INSTALL_MANUALLY
+)
+
+FZF_NOT_SUPPORTED_X86 = (
+ """
+We don't believe that a prebuilt binary for `fzf` if available on %s, but we
+could be wrong.
+
+""".lstrip()
+ + FZF_INSTALL_MANUALLY
+)
+
+FZF_NOT_FOUND = (
+ """
+Could not find the `fzf` binary.
+
+""".lstrip()
+ + FZF_INSTALL_MANUALLY
+)
+
+FZF_VERSION_FAILED = (
+ """
+Could not obtain the 'fzf' version; we require version > 0.20.0 for some of
+the features.
+
+""".lstrip()
+ + FZF_INSTALL_MANUALLY
+)
+
+FZF_INSTALL_FAILED = (
+ """
+Failed to install fzf.
+
+""".lstrip()
+ + FZF_INSTALL_MANUALLY
+)
+
+FZF_HEADER = """
+For more shortcuts, see {t.italic_white}mach help try fuzzy{t.normal} and {t.italic_white}man fzf
+{shortcuts}
+""".strip()
+
+fzf_shortcuts = {
+ "ctrl-a": "select-all",
+ "ctrl-d": "deselect-all",
+ "ctrl-t": "toggle-all",
+ "alt-bspace": "beginning-of-line+kill-line",
+ "?": "toggle-preview",
+}
+
+fzf_header_shortcuts = [
+ ("select", "tab"),
+ ("accept", "enter"),
+ ("cancel", "ctrl-c"),
+ ("select-all", "ctrl-a"),
+ ("cursor-up", "up"),
+ ("cursor-down", "down"),
+]
+
+
+def get_fzf_platform():
+ if platform.machine() in ["i386", "i686"]:
+ print(FZF_NOT_SUPPORTED_X86 % platform.machine())
+ sys.exit(1)
+
+ if platform.system().lower() == "windows":
+ if platform.machine().lower() in ["x86_64", "amd64"]:
+ return "windows_amd64.zip"
+ elif platform.machine().lower() == "arm64":
+ return "windows_arm64.zip"
+ else:
+ print(FZF_COULD_NOT_DETERMINE_MACHINE % platform.machine())
+ sys.exit(1)
+ elif platform.system().lower() == "darwin":
+ if platform.machine().lower() in ["x86_64", "amd64"]:
+ return "darwin_amd64.zip"
+ elif platform.machine().lower() == "arm64":
+ return "darwin_arm64.zip"
+ else:
+ print(FZF_COULD_NOT_DETERMINE_MACHINE % platform.machine())
+ sys.exit(1)
+ elif platform.system().lower() == "linux":
+ if platform.machine().lower() in ["x86_64", "amd64"]:
+ return "linux_amd64.tar.gz"
+ elif platform.machine().lower() == "arm64":
+ return "linux_arm64.tar.gz"
+ else:
+ print(FZF_COULD_NOT_DETERMINE_MACHINE % platform.machine())
+ sys.exit(1)
+ else:
+ print(FZF_COULD_NOT_DETERMINE_PLATFORM)
+ sys.exit(1)
+
+
+def get_fzf_state_dir():
+ return os.path.join(get_state_dir(), "fzf")
+
+
+def get_fzf_filename():
+ return "fzf-%s-%s" % (FZF_CURRENT_VERSION, get_fzf_platform())
+
+
+def get_fzf_download_link():
+ return "https://github.com/junegunn/fzf/releases/download/%s/%s" % (
+ FZF_CURRENT_VERSION,
+ get_fzf_filename(),
+ )
+
+
+def clean_up_state_dir():
+ """
+ We used to have a checkout of fzf that we would update.
+ Now we only download the bin and cpin the hash; so if
+ we find the old git checkout, wipe it
+ """
+
+ fzf_path = os.path.join(get_state_dir(), "fzf")
+ git_path = os.path.join(fzf_path, ".git")
+ if os.path.isdir(git_path):
+ shutil.rmtree(fzf_path, ignore_errors=True)
+
+ # Also delete any existing fzf binary
+ fzf_bin = shutil.which("fzf", path=fzf_path)
+ if fzf_bin:
+ mozfile.remove(fzf_bin)
+
+ # Make sure the state dir is present
+ if not os.path.isdir(fzf_path):
+ os.makedirs(fzf_path)
+
+
+def download_and_install_fzf():
+ clean_up_state_dir()
+ download_link = get_fzf_download_link()
+ download_file = get_fzf_filename()
+ download_destination_path = get_fzf_state_dir()
+ download_destination_file = os.path.join(download_destination_path, download_file)
+ http_download_and_save(
+ download_link, download_destination_file, FZF_CHECKSUMS[get_fzf_platform()]
+ )
+
+ mozfile.extract(download_destination_file, download_destination_path)
+ mozfile.remove(download_destination_file)
+
+
+def get_fzf_version(fzf_bin):
+ cmd = [fzf_bin, "--version"]
+ try:
+ fzf_version = subprocess.check_output(cmd)
+ except subprocess.CalledProcessError:
+ print(FZF_VERSION_FAILED)
+ sys.exit(1)
+
+ # Some fzf versions have extra, e.g 0.18.0 (ff95134)
+ fzf_version = six.ensure_text(fzf_version.split()[0])
+
+ return fzf_version
+
+
+def should_force_fzf_update(fzf_bin):
+ fzf_version = get_fzf_version(fzf_bin)
+
+ # 0.20.0 introduced passing selections through a temporary file,
+ # which is good for large ctrl-a actions.
+ if Version(fzf_version) < Version(FZF_MIN_VERSION):
+ print("fzf version is old, you must update to use ./mach try fuzzy.")
+ return True
+ return False
+
+
+def fzf_bootstrap(update=False):
+ """
+ Bootstrap fzf if necessary and return path to the executable.
+
+ This function is a bit complicated. We fetch a new version of fzf if:
+ 1) an existing fzf is too outdated
+ 2) the user says --update and we are behind the recommended version
+ 3) no fzf can be found and
+ 3a) user passes --update
+ 3b) user agrees to a prompt
+
+ """
+ fzf_path = get_fzf_state_dir()
+
+ fzf_bin = shutil.which("fzf")
+ if not fzf_bin:
+ fzf_bin = shutil.which("fzf", path=fzf_path)
+
+ if fzf_bin and should_force_fzf_update(fzf_bin): # Case (1)
+ update = True
+
+ if fzf_bin and not update:
+ return fzf_bin
+
+ elif fzf_bin and update:
+ # Case 2
+ fzf_version = get_fzf_version(fzf_bin)
+ if Version(fzf_version) < Version(FZF_CURRENT_VERSION) and update:
+ # Bug 1623197: We only want to run fzf's `install` if it's not in the $PATH
+ # Swap to os.path.commonpath when we're not on Py2
+ if fzf_bin and update and not fzf_bin.startswith(fzf_path):
+ print(
+ "fzf installed somewhere other than {}, please update manually".format(
+ fzf_path
+ )
+ )
+ sys.exit(1)
+
+ download_and_install_fzf()
+ print("Updated fzf to {}".format(FZF_CURRENT_VERSION))
+ else:
+ print("fzf is the recommended version and does not need an update")
+
+ else: # not fzf_bin:
+ if not update:
+ # Case 3b
+ install = input("Could not detect fzf, install it now? [y/n]: ")
+ if install.lower() != "y":
+ return
+
+ # Case 3a and 3b-fall-through
+ download_and_install_fzf()
+ fzf_bin = shutil.which("fzf", path=fzf_path)
+ print("Installed fzf to {}".format(fzf_path))
+
+ return fzf_bin
+
+
+def format_header():
+ shortcuts = []
+ for action, key in fzf_header_shortcuts:
+ shortcuts.append(
+ "{t.white}{action}{t.normal}: {t.yellow}<{key}>{t.normal}".format(
+ t=terminal, action=action, key=key
+ )
+ )
+ return FZF_HEADER.format(shortcuts=", ".join(shortcuts), t=terminal)
+
+
+def run_fzf(cmd, tasks):
+ env = dict(os.environ)
+ env.update(
+ {"PYTHONPATH": os.pathsep.join([p for p in sys.path if "requests" in p])}
+ )
+ # Make sure fzf uses Windows' shell rather than MozillaBuild bash or
+ # whatever our caller uses, since it doesn't quote the arguments properly
+ # and thus windows paths like: C:\moz\foo end up as C:mozfoo...
+ if platform.system() == "Windows":
+ env["SHELL"] = env["COMSPEC"]
+ proc = subprocess.Popen(
+ cmd,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ env=env,
+ universal_newlines=True,
+ )
+ out = proc.communicate("\n".join(tasks))[0].splitlines()
+
+ selected = []
+ query = None
+ if out:
+ query = out[0]
+ selected = out[1:]
+ return query, selected
+
+
+def setup_tasks_for_fzf(
+ push,
+ parameters,
+ full=False,
+ disable_target_task_filter=False,
+ show_estimates=True,
+):
+ check_working_directory(push)
+ tg = generate_tasks(
+ parameters, full=full, disable_target_task_filter=disable_target_task_filter
+ )
+ all_tasks = sorted(tg.tasks.keys())
+
+ # graph_Cache created by generate_tasks, recreate the path to that file.
+ cache_dir = os.path.join(
+ get_state_dir(specific_to_topsrcdir=True), "cache", "taskgraph"
+ )
+ if full:
+ graph_cache = os.path.join(cache_dir, "full_task_graph")
+ dep_cache = os.path.join(cache_dir, "full_task_dependencies")
+ target_set = os.path.join(cache_dir, "full_task_set")
+ else:
+ graph_cache = os.path.join(cache_dir, "target_task_graph")
+ dep_cache = os.path.join(cache_dir, "target_task_dependencies")
+ target_set = os.path.join(cache_dir, "target_task_set")
+
+ if show_estimates:
+ download_task_history_data(cache_dir=cache_dir)
+ make_trimmed_taskgraph_cache(graph_cache, dep_cache, target_file=target_set)
+
+ if not full and not disable_target_task_filter:
+ # Put all_tasks into a list because it's used multiple times, and "filter()"
+ # returns a consumable iterator.
+ all_tasks = list(filter(filter_by_uncommon_try_tasks, all_tasks))
+
+ return all_tasks, dep_cache, cache_dir
+
+
+def build_base_cmd(
+ fzf, dep_cache, cache_dir, show_estimates=True, preview_script=PREVIEW_SCRIPT
+):
+ key_shortcuts = [k + ":" + v for k, v in fzf_shortcuts.items()]
+ base_cmd = [
+ fzf,
+ "-m",
+ "--bind",
+ ",".join(key_shortcuts),
+ "--header",
+ format_header(),
+ "--preview-window=right:30%",
+ "--print-query",
+ ]
+
+ if show_estimates:
+ base_cmd.extend(
+ [
+ "--preview",
+ '{} {} -g {} -s -c {} -t "{{+f}}"'.format(
+ sys.executable, preview_script, dep_cache, cache_dir
+ ),
+ ]
+ )
+ else:
+ base_cmd.extend(
+ [
+ "--preview",
+ '{} {} -t "{{+f}}"'.format(sys.executable, preview_script),
+ ]
+ )
+
+ return base_cmd
diff --git a/tools/tryselect/util/manage_estimates.py b/tools/tryselect/util/manage_estimates.py
new file mode 100644
index 0000000000..23fa481228
--- /dev/null
+++ b/tools/tryselect/util/manage_estimates.py
@@ -0,0 +1,132 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+from datetime import datetime, timedelta
+
+import requests
+import six
+
+TASK_DURATION_URL = (
+ "https://storage.googleapis.com/mozilla-mach-data/task_duration_history.json"
+)
+GRAPH_QUANTILES_URL = (
+ "https://storage.googleapis.com/mozilla-mach-data/machtry_quantiles.csv"
+)
+from .estimates import GRAPH_QUANTILE_CACHE, TASK_DURATION_CACHE, TASK_DURATION_TAG_FILE
+
+
+def check_downloaded_history(tag_file, duration_cache, quantile_cache):
+ if not os.path.isfile(tag_file):
+ return False
+
+ try:
+ with open(tag_file) as f:
+ duration_tags = json.load(f)
+ download_date = datetime.strptime(
+ duration_tags.get("download_date"), "%Y-%M-%d"
+ )
+ if download_date < datetime.now() - timedelta(days=7):
+ return False
+ except (OSError, ValueError):
+ return False
+
+ if not os.path.isfile(duration_cache):
+ return False
+ # Check for old format version of file.
+ with open(duration_cache) as f:
+ data = json.load(f)
+ if isinstance(data, list):
+ return False
+ if not os.path.isfile(quantile_cache):
+ return False
+
+ return True
+
+
+def download_task_history_data(cache_dir):
+ """Fetch task duration data exported from BigQuery."""
+ task_duration_cache = os.path.join(cache_dir, TASK_DURATION_CACHE)
+ task_duration_tag_file = os.path.join(cache_dir, TASK_DURATION_TAG_FILE)
+ graph_quantile_cache = os.path.join(cache_dir, GRAPH_QUANTILE_CACHE)
+
+ if check_downloaded_history(
+ task_duration_tag_file, task_duration_cache, graph_quantile_cache
+ ):
+ return
+
+ try:
+ os.unlink(task_duration_tag_file)
+ os.unlink(task_duration_cache)
+ os.unlink(graph_quantile_cache)
+ except OSError:
+ print("No existing task history to clean up.")
+
+ try:
+ r = requests.get(TASK_DURATION_URL, stream=True)
+ r.raise_for_status()
+ except requests.exceptions.RequestException as exc:
+ # This is fine, the durations just won't be in the preview window.
+ print(
+ "Error fetching task duration cache from {}: {}".format(
+ TASK_DURATION_URL, exc
+ )
+ )
+ return
+
+ # The data retrieved from google storage is a newline-separated
+ # list of json entries, which Python's json module can't parse.
+ duration_data = list()
+ for line in r.text.splitlines():
+ duration_data.append(json.loads(line))
+
+ # Reformat duration data to avoid list of dicts, as this is slow in the preview window
+ duration_data = {d["name"]: d["mean_duration_seconds"] for d in duration_data}
+
+ with open(task_duration_cache, "w") as f:
+ json.dump(duration_data, f, indent=4)
+
+ try:
+ r = requests.get(GRAPH_QUANTILES_URL, stream=True)
+ r.raise_for_status()
+ except requests.exceptions.RequestException as exc:
+ # This is fine, the percentile just won't be in the preview window.
+ print(
+ "Error fetching task group percentiles from {}: {}".format(
+ GRAPH_QUANTILES_URL, exc
+ )
+ )
+ return
+
+ with open(graph_quantile_cache, "w") as f:
+ f.write(six.ensure_text(r.content))
+
+ with open(task_duration_tag_file, "w") as f:
+ json.dump({"download_date": datetime.now().strftime("%Y-%m-%d")}, f, indent=4)
+
+
+def make_trimmed_taskgraph_cache(graph_cache, dep_cache, target_file=None):
+ """Trim the taskgraph cache used for dependencies.
+
+ Speeds up the fzf preview window to less human-perceptible
+ ranges."""
+ if not os.path.isfile(graph_cache):
+ return
+
+ target_task_set = set()
+ if target_file and os.path.isfile(target_file):
+ with open(target_file) as f:
+ target_task_set = set(json.load(f).keys())
+
+ with open(graph_cache) as f:
+ graph = json.load(f)
+ graph = {
+ name: list(defn["dependencies"].values())
+ for name, defn in graph.items()
+ if name in target_task_set
+ }
+ with open(dep_cache, "w") as f:
+ json.dump(graph, f, indent=4)
diff --git a/tools/tryselect/util/ssh.py b/tools/tryselect/util/ssh.py
new file mode 100644
index 0000000000..7682306bc7
--- /dev/null
+++ b/tools/tryselect/util/ssh.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+import subprocess
+
+
+def get_ssh_user(host="hg.mozilla.org"):
+ ssh_config = subprocess.run(
+ ["ssh", "-G", host],
+ text=True,
+ check=True,
+ capture_output=True,
+ ).stdout
+
+ lines = [l.strip() for l in ssh_config.splitlines()]
+ for line in lines:
+ if not line:
+ continue
+ key, value = line.split(" ", 1)
+ if key.lower() == "user":
+ return value
+
+ raise Exception(f"Could not detect ssh user for '{host}'!")
diff --git a/tools/tryselect/watchman.json b/tools/tryselect/watchman.json
new file mode 100644
index 0000000000..a41b1829d7
--- /dev/null
+++ b/tools/tryselect/watchman.json
@@ -0,0 +1,15 @@
+[
+ "trigger",
+ ".",
+ {
+ "name": "rebuild-taskgraph-cache",
+ "expression": ["match", "taskcluster/**", "wholename"],
+ "command": [
+ "./mach",
+ "python",
+ "-c",
+ "from tryselect.tasks import generate_tasks; generate_tasks()"
+ ],
+ "append_files": false
+ }
+]