summaryrefslogtreecommitdiffstats
path: root/taskcluster/gecko_taskgraph/util
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /taskcluster/gecko_taskgraph/util
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--taskcluster/gecko_taskgraph/util/__init__.py0
-rw-r--r--taskcluster/gecko_taskgraph/util/attributes.py147
-rw-r--r--taskcluster/gecko_taskgraph/util/backstop.py84
-rw-r--r--taskcluster/gecko_taskgraph/util/bugbug.py125
-rw-r--r--taskcluster/gecko_taskgraph/util/cached_tasks.py82
-rw-r--r--taskcluster/gecko_taskgraph/util/chunking.py351
-rw-r--r--taskcluster/gecko_taskgraph/util/copy_task.py40
-rw-r--r--taskcluster/gecko_taskgraph/util/declarative_artifacts.py92
-rw-r--r--taskcluster/gecko_taskgraph/util/dependencies.py156
-rw-r--r--taskcluster/gecko_taskgraph/util/docker.py333
-rw-r--r--taskcluster/gecko_taskgraph/util/hash.py68
-rw-r--r--taskcluster/gecko_taskgraph/util/hg.py139
-rw-r--r--taskcluster/gecko_taskgraph/util/partials.py297
-rw-r--r--taskcluster/gecko_taskgraph/util/partners.py555
-rw-r--r--taskcluster/gecko_taskgraph/util/perfile.py104
-rw-r--r--taskcluster/gecko_taskgraph/util/platforms.py58
-rw-r--r--taskcluster/gecko_taskgraph/util/scriptworker.py865
-rw-r--r--taskcluster/gecko_taskgraph/util/signed_artifacts.py198
-rw-r--r--taskcluster/gecko_taskgraph/util/taskcluster.py128
-rw-r--r--taskcluster/gecko_taskgraph/util/taskgraph.py49
-rw-r--r--taskcluster/gecko_taskgraph/util/templates.py59
-rw-r--r--taskcluster/gecko_taskgraph/util/verify.py454
-rw-r--r--taskcluster/gecko_taskgraph/util/workertypes.py103
23 files changed, 4487 insertions, 0 deletions
diff --git a/taskcluster/gecko_taskgraph/util/__init__.py b/taskcluster/gecko_taskgraph/util/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/__init__.py
diff --git a/taskcluster/gecko_taskgraph/util/attributes.py b/taskcluster/gecko_taskgraph/util/attributes.py
new file mode 100644
index 0000000000..2d01e9c5e0
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/attributes.py
@@ -0,0 +1,147 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import re
+
+INTEGRATION_PROJECTS = {
+ "autoland",
+}
+
+TRUNK_PROJECTS = INTEGRATION_PROJECTS | {"mozilla-central", "comm-central"}
+
+RELEASE_PROJECTS = {
+ "mozilla-central",
+ "mozilla-beta",
+ "mozilla-release",
+ "mozilla-esr115",
+ "comm-central",
+ "comm-beta",
+ "comm-release",
+ "comm-esr115",
+ # bug 1845368: pine is a permanent project branch used for testing
+ # nightly updates
+ "pine",
+ # bug 1877483: larch has similar needs for nightlies
+ "larch",
+}
+
+RELEASE_PROMOTION_PROJECTS = {
+ "jamun",
+ "maple",
+ "try",
+ "try-comm-central",
+} | RELEASE_PROJECTS
+
+TEMPORARY_PROJECTS = set(
+ {
+ # When using a "Disposable Project Branch" you can specify your branch here. e.g.:
+ "oak",
+ }
+)
+
+TRY_PROJECTS = {
+ "try",
+ "try-comm-central",
+}
+
+ALL_PROJECTS = RELEASE_PROMOTION_PROJECTS | TRUNK_PROJECTS | TEMPORARY_PROJECTS
+
+RUN_ON_PROJECT_ALIASES = {
+ # key is alias, value is lambda to test it against
+ "all": lambda project: True,
+ "integration": lambda project: (
+ project in INTEGRATION_PROJECTS or project == "toolchains"
+ ),
+ "release": lambda project: (project in RELEASE_PROJECTS or project == "toolchains"),
+ "trunk": lambda project: (project in TRUNK_PROJECTS or project == "toolchains"),
+ "trunk-only": lambda project: project in TRUNK_PROJECTS,
+ "autoland": lambda project: project in ("autoland", "toolchains"),
+ "autoland-only": lambda project: project == "autoland",
+ "mozilla-central": lambda project: project in ("mozilla-central", "toolchains"),
+ "mozilla-central-only": lambda project: project == "mozilla-central",
+}
+
+_COPYABLE_ATTRIBUTES = (
+ "accepted-mar-channel-ids",
+ "artifact_map",
+ "artifact_prefix",
+ "build_platform",
+ "build_type",
+ "l10n_chunk",
+ "locale",
+ "mar-channel-id",
+ "maven_packages",
+ "nightly",
+ "required_signoffs",
+ "shippable",
+ "shipping_phase",
+ "shipping_product",
+ "signed",
+ "stub-installer",
+ "update-channel",
+)
+
+
+def match_run_on_projects(project, run_on_projects):
+ """Determine whether the given project is included in the `run-on-projects`
+ parameter, applying expansions for things like "integration" mentioned in
+ the attribute documentation."""
+ aliases = RUN_ON_PROJECT_ALIASES.keys()
+ run_aliases = set(aliases) & set(run_on_projects)
+ if run_aliases:
+ if any(RUN_ON_PROJECT_ALIASES[alias](project) for alias in run_aliases):
+ return True
+
+ return project in run_on_projects
+
+
+def match_run_on_hg_branches(hg_branch, run_on_hg_branches):
+ """Determine whether the given project is included in the `run-on-hg-branches`
+ parameter. Allows 'all'."""
+ if "all" in run_on_hg_branches:
+ return True
+
+ for expected_hg_branch_pattern in run_on_hg_branches:
+ if re.match(expected_hg_branch_pattern, hg_branch):
+ return True
+
+ return False
+
+
+def copy_attributes_from_dependent_job(dep_job, denylist=()):
+ return {
+ attr: dep_job.attributes[attr]
+ for attr in _COPYABLE_ATTRIBUTES
+ if attr in dep_job.attributes and attr not in denylist
+ }
+
+
+def sorted_unique_list(*args):
+ """Join one or more lists, and return a sorted list of unique members"""
+ combined = set().union(*args)
+ return sorted(combined)
+
+
+def release_level(project):
+ """
+ Whether this is a staging release or not.
+
+ :return str: One of "production" or "staging".
+ """
+ return "production" if project in RELEASE_PROJECTS else "staging"
+
+
+def is_try(params):
+ """
+ Determine whether this graph is being built on a try project or for
+ `mach try fuzzy`.
+ """
+ return "try" in params["project"] or params["try_mode"] == "try_select"
+
+
+def task_name(task):
+ if task.label.startswith(task.kind + "-"):
+ return task.label[len(task.kind) + 1 :]
+ raise AttributeError(f"Task {task.label} does not have a name.")
diff --git a/taskcluster/gecko_taskgraph/util/backstop.py b/taskcluster/gecko_taskgraph/util/backstop.py
new file mode 100644
index 0000000000..26c9a4fb91
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/backstop.py
@@ -0,0 +1,84 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from requests import HTTPError
+from taskgraph.util.taskcluster import find_task_id, get_artifact
+
+from gecko_taskgraph.util.attributes import INTEGRATION_PROJECTS, TRY_PROJECTS
+from gecko_taskgraph.util.taskcluster import state_task
+
+BACKSTOP_PUSH_INTERVAL = 20
+BACKSTOP_TIME_INTERVAL = 60 * 4 # minutes
+BACKSTOP_INDEX = "{trust-domain}.v2.{project}.latest.taskgraph.backstop"
+
+
+def is_backstop(
+ params,
+ push_interval=BACKSTOP_PUSH_INTERVAL,
+ time_interval=BACKSTOP_TIME_INTERVAL,
+ trust_domain="gecko",
+ integration_projects=INTEGRATION_PROJECTS,
+):
+ """Determines whether the given parameters represent a backstop push.
+
+ Args:
+ push_interval (int): Number of pushes
+ time_interval (int): Minutes between forced schedules.
+ Use 0 to disable.
+ trust_domain (str): "gecko" for Firefox, "comm" for Thunderbird
+ integration_projects (set): project that uses backstop optimization
+ Returns:
+ bool: True if this is a backstop, otherwise False.
+ """
+ # In case this is being faked on try.
+ if params.get("backstop", False):
+ return True
+
+ project = params["project"]
+ pushid = int(params["pushlog_id"])
+ pushdate = int(params["pushdate"])
+
+ if project in TRY_PROJECTS:
+ return False
+ if project not in integration_projects:
+ return True
+
+ # On every Nth push, want to run all tasks.
+ if pushid % push_interval == 0:
+ return True
+
+ if time_interval <= 0:
+ return False
+
+ # We also want to ensure we run all tasks at least once per N minutes.
+ subs = {"trust-domain": trust_domain, "project": project}
+ index = BACKSTOP_INDEX.format(**subs)
+
+ try:
+ last_backstop_id = find_task_id(index)
+ except KeyError:
+ # Index wasn't found, implying there hasn't been a backstop push yet.
+ return True
+
+ if state_task(last_backstop_id) in ("failed", "exception"):
+ # If the last backstop failed its decision task, make this a backstop.
+ return True
+
+ try:
+ last_pushdate = get_artifact(last_backstop_id, "public/parameters.yml")[
+ "pushdate"
+ ]
+ except HTTPError as e:
+ # If the last backstop decision task exists in the index, but
+ # parameters.yml isn't available yet, it means the decision task is
+ # still running. If that's the case, we can be pretty sure the time
+ # component will not cause a backstop, so just return False.
+ if e.response.status_code == 404:
+ return False
+ raise
+
+ if (pushdate - last_pushdate) / 60 >= time_interval:
+ return True
+ return False
diff --git a/taskcluster/gecko_taskgraph/util/bugbug.py b/taskcluster/gecko_taskgraph/util/bugbug.py
new file mode 100644
index 0000000000..50e02d69c6
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/bugbug.py
@@ -0,0 +1,125 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+import sys
+import time
+
+import requests
+from mozbuild.util import memoize
+from taskgraph import create
+from taskgraph.util.taskcluster import requests_retry_session
+
+try:
+ # TODO(py3): use time.monotonic()
+ from time import monotonic
+except ImportError:
+ from time import time as monotonic
+
+BUGBUG_BASE_URL = "https://bugbug.herokuapp.com"
+RETRY_TIMEOUT = 9 * 60 # seconds
+RETRY_INTERVAL = 10 # seconds
+
+# Preset confidence thresholds.
+CT_LOW = 0.7
+CT_MEDIUM = 0.8
+CT_HIGH = 0.9
+
+GROUP_TRANSLATIONS = {
+ "testing/web-platform/tests": "",
+ "testing/web-platform/mozilla/tests": "/_mozilla",
+}
+
+
+def translate_group(group):
+ for prefix, value in GROUP_TRANSLATIONS.items():
+ if group.startswith(prefix):
+ return group.replace(prefix, value)
+
+ return group
+
+
+class BugbugTimeoutException(Exception):
+ pass
+
+
+@memoize
+def get_session():
+ s = requests.Session()
+ s.headers.update({"X-API-KEY": "gecko-taskgraph"})
+ return requests_retry_session(retries=5, session=s)
+
+
+def _write_perfherder_data(lower_is_better):
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ perfherder_data = {
+ "framework": {"name": "build_metrics"},
+ "suites": [
+ {
+ "name": suite,
+ "value": value,
+ "lowerIsBetter": True,
+ "shouldAlert": False,
+ "subtests": [],
+ }
+ for suite, value in lower_is_better.items()
+ ],
+ }
+ print(f"PERFHERDER_DATA: {json.dumps(perfherder_data)}", file=sys.stderr)
+
+
+@memoize
+def push_schedules(branch, rev):
+ # Noop if we're in test-action-callback
+ if create.testing:
+ return
+
+ url = BUGBUG_BASE_URL + "/push/{branch}/{rev}/schedules".format(
+ branch=branch, rev=rev
+ )
+ start = monotonic()
+ session = get_session()
+
+ # On try there is no fallback and pulling is slower, so we allow bugbug more
+ # time to compute the results.
+ # See https://github.com/mozilla/bugbug/issues/1673.
+ timeout = RETRY_TIMEOUT
+ if branch == "try":
+ timeout += int(timeout / 3)
+
+ attempts = timeout / RETRY_INTERVAL
+ i = 0
+ while i < attempts:
+ r = session.get(url)
+ r.raise_for_status()
+
+ if r.status_code != 202:
+ break
+
+ time.sleep(RETRY_INTERVAL)
+ i += 1
+ end = monotonic()
+
+ _write_perfherder_data(
+ lower_is_better={
+ "bugbug_push_schedules_time": end - start,
+ "bugbug_push_schedules_retries": i,
+ }
+ )
+
+ data = r.json()
+ if r.status_code == 202:
+ raise BugbugTimeoutException(f"Timed out waiting for result from '{url}'")
+
+ if "groups" in data:
+ data["groups"] = {translate_group(k): v for k, v in data["groups"].items()}
+
+ if "config_groups" in data:
+ data["config_groups"] = {
+ translate_group(k): v for k, v in data["config_groups"].items()
+ }
+
+ return data
diff --git a/taskcluster/gecko_taskgraph/util/cached_tasks.py b/taskcluster/gecko_taskgraph/util/cached_tasks.py
new file mode 100644
index 0000000000..fff9bb9844
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/cached_tasks.py
@@ -0,0 +1,82 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import hashlib
+import time
+
+TARGET_CACHE_INDEX = "{trust_domain}.cache.level-{level}.{type}.{name}.hash.{digest}"
+EXTRA_CACHE_INDEXES = [
+ "{trust_domain}.cache.level-{level}.{type}.{name}.latest",
+ "{trust_domain}.cache.level-{level}.{type}.{name}.pushdate.{build_date_long}",
+]
+
+
+def add_optimization(
+ config, taskdesc, cache_type, cache_name, digest=None, digest_data=None
+):
+ """
+ Allow the results of this task to be cached. This adds index routes to the
+ task so it can be looked up for future runs, and optimization hints so that
+ cached artifacts can be found. Exactly one of `digest` and `digest_data`
+ must be passed.
+
+ :param TransformConfig config: The configuration for the kind being transformed.
+ :param dict taskdesc: The description of the current task.
+ :param str cache_type: The type of task result being cached.
+ :param str cache_name: The name of the object being cached.
+ :param digest: A unique string indentifying this version of the artifacts
+ being generated. Typically this will be the hash of inputs to the task.
+ :type digest: bytes or None
+ :param digest_data: A list of bytes representing the inputs of this task.
+ They will be concatenated and hashed to create the digest for this
+ task.
+ :type digest_data: list of bytes or None
+ """
+ cached_task = taskdesc.get("attributes", {}).get("cached_task")
+ if cached_task is False:
+ return
+
+ if (digest is None) == (digest_data is None):
+ raise Exception("Must pass exactly one of `digest` and `digest_data`.")
+ if digest is None:
+ digest = hashlib.sha256("\n".join(digest_data).encode("utf-8")).hexdigest()
+
+ subs = {
+ "trust_domain": config.graph_config["trust-domain"],
+ "type": cache_type,
+ "name": cache_name,
+ "digest": digest,
+ }
+
+ # We'll try to find a cached version of the toolchain at levels above
+ # and including the current level, starting at the highest level.
+ index_routes = []
+ for level in reversed(range(int(config.params["level"]), 4)):
+ subs["level"] = level
+ index_routes.append(TARGET_CACHE_INDEX.format(**subs))
+ taskdesc["optimization"] = {"index-search": index_routes}
+
+ # ... and cache at the lowest level.
+ taskdesc.setdefault("routes", []).append(
+ f"index.{TARGET_CACHE_INDEX.format(**subs)}"
+ )
+
+ # ... and add some extra routes for humans
+ subs["build_date_long"] = time.strftime(
+ "%Y.%m.%d.%Y%m%d%H%M%S", time.gmtime(config.params["build_date"])
+ )
+ taskdesc["routes"].extend(
+ [f"index.{route.format(**subs)}" for route in EXTRA_CACHE_INDEXES]
+ )
+
+ taskdesc["attributes"]["cached_task"] = {
+ "type": cache_type,
+ "name": cache_name,
+ "digest": digest,
+ }
+
+ # Allow future pushes to find this task before it completes
+ # Implementation in morphs
+ taskdesc["attributes"]["eager_indexes"] = [TARGET_CACHE_INDEX.format(**subs)]
diff --git a/taskcluster/gecko_taskgraph/util/chunking.py b/taskcluster/gecko_taskgraph/util/chunking.py
new file mode 100644
index 0000000000..a0ed56de78
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/chunking.py
@@ -0,0 +1,351 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+"""Utility functions to handle test chunking."""
+
+import json
+import logging
+import os
+from abc import ABCMeta, abstractmethod
+
+from manifestparser import TestManifest
+from manifestparser.filters import chunk_by_runtime, tags
+from mozbuild.util import memoize
+from moztest.resolve import TEST_SUITES, TestManifestLoader, TestResolver
+from taskgraph.util.yaml import load_yaml
+
+from gecko_taskgraph import GECKO
+from gecko_taskgraph.util.bugbug import CT_LOW, BugbugTimeoutException, push_schedules
+
+logger = logging.getLogger(__name__)
+here = os.path.abspath(os.path.dirname(__file__))
+resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
+
+TEST_VARIANTS = {}
+if os.path.exists(os.path.join(GECKO, "taskcluster", "ci", "test", "variants.yml")):
+ TEST_VARIANTS = load_yaml(GECKO, "taskcluster", "ci", "test", "variants.yml")
+
+WPT_SUBSUITES = {
+ "canvas": "html/canvas",
+ "webgpu": "_mozilla/webgpu",
+ "privatebrowsing": "/service-workers/cache-storage",
+}
+
+
+def guess_mozinfo_from_task(task, repo=""):
+ """Attempt to build a mozinfo dict from a task definition.
+
+ This won't be perfect and many values used in the manifests will be missing. But
+ it should cover most of the major ones and be "good enough" for chunking in the
+ taskgraph.
+
+ Args:
+ task (dict): A task definition.
+
+ Returns:
+ A dict that can be used as a mozinfo replacement.
+ """
+ setting = task["test-setting"]
+ runtime_keys = setting["runtime"].keys()
+ arch = setting["platform"]["arch"]
+ p_os = setting["platform"]["os"]
+
+ info = {
+ "asan": setting["build"].get("asan", False),
+ "bits": 32 if "32" in arch else 64,
+ "ccov": setting["build"].get("ccov", False),
+ "debug": setting["build"]["type"] in ("debug", "debug-isolated-process"),
+ "tsan": setting["build"].get("tsan", False),
+ "nightly_build": repo in ["mozilla-central", "autoland", "try", ""], # trunk
+ }
+
+ for platform in ("android", "linux", "mac", "win"):
+ if p_os["name"].startswith(platform):
+ info["os"] = platform
+ break
+ else:
+ raise ValueError("{} is not a known platform!".format(p_os["name"]))
+
+ # crashreporter is disabled for asan / tsan builds
+ if info["asan"] or info["tsan"]:
+ info["crashreporter"] = False
+ else:
+ info["crashreporter"] = True
+
+ info["appname"] = "fennec" if info["os"] == "android" else "firefox"
+
+ # guess processor
+ if arch == "aarch64":
+ info["processor"] = "aarch64"
+ elif info["os"] == "android" and "arm" in arch:
+ info["processor"] = "arm"
+ elif info["bits"] == 32:
+ info["processor"] = "x86"
+ else:
+ info["processor"] = "x86_64"
+
+ # guess toolkit
+ if info["os"] == "android":
+ info["toolkit"] = "android"
+ elif info["os"] == "win":
+ info["toolkit"] = "windows"
+ elif info["os"] == "mac":
+ info["toolkit"] = "cocoa"
+ else:
+ info["toolkit"] = "gtk"
+
+ # guess os_version
+ os_versions = {
+ ("linux", "1804"): "18.04",
+ ("macosx", "1015"): "10.15",
+ ("macosx", "1100"): "11.00",
+ ("windows", "7"): "6.1",
+ ("windows", "10"): "10.0",
+ }
+ for (name, old_ver), new_ver in os_versions.items():
+ if p_os["name"] == name and p_os["version"] == old_ver:
+ info["os_version"] = new_ver
+ break
+
+ for variant in TEST_VARIANTS:
+ tag = TEST_VARIANTS[variant].get("mozinfo", "")
+ if tag == "":
+ continue
+
+ value = variant in runtime_keys
+
+ if variant == "1proc":
+ value = not value
+ elif "fission" in variant:
+ value = any(
+ "1proc" not in key or "no-fission" not in key for key in runtime_keys
+ )
+ if "no-fission" not in variant:
+ value = not value
+ elif tag == "xorigin":
+ value = any("xorigin" in key for key in runtime_keys)
+
+ info[tag] = value
+
+ # wpt has canvas and webgpu as tags, lets find those
+ for tag in WPT_SUBSUITES.keys():
+ if tag in task["test-name"]:
+ info[tag] = True
+ else:
+ info[tag] = False
+ return info
+
+
+@memoize
+def get_runtimes(platform, suite_name):
+ if not suite_name or not platform:
+ raise TypeError("suite_name and platform cannot be empty.")
+
+ base = os.path.join(GECKO, "testing", "runtimes", "manifest-runtimes-{}.json")
+ for key in ("android", "windows"):
+ if key in platform:
+ path = base.format(key)
+ break
+ else:
+ path = base.format("unix")
+
+ if not os.path.exists(path):
+ raise OSError(f"manifest runtime file at {path} not found.")
+
+ with open(path) as fh:
+ return json.load(fh)[suite_name]
+
+
+def chunk_manifests(suite, platform, chunks, manifests):
+ """Run the chunking algorithm.
+
+ Args:
+ platform (str): Platform used to find runtime info.
+ chunks (int): Number of chunks to split manifests into.
+ manifests(list): Manifests to chunk.
+
+ Returns:
+ A list of length `chunks` where each item contains a list of manifests
+ that run in that chunk.
+ """
+ ini_manifests = set([x.replace(".toml", ".ini") for x in manifests])
+
+ if "web-platform-tests" not in suite:
+ runtimes = {
+ k: v for k, v in get_runtimes(platform, suite).items() if k in ini_manifests
+ }
+ retVal = []
+ for c in chunk_by_runtime(None, chunks, runtimes).get_chunked_manifests(
+ ini_manifests
+ ):
+ retVal.append(
+ [m if m in manifests else m.replace(".ini", ".toml") for m in c[1]]
+ )
+
+ # Keep track of test paths for each chunk, and the runtime information.
+ chunked_manifests = [[] for _ in range(chunks)]
+
+ # Spread out the test manifests evenly across all chunks.
+ for index, key in enumerate(sorted(manifests)):
+ chunked_manifests[index % chunks].append(key)
+
+ # One last sort by the number of manifests. Chunk size should be more or less
+ # equal in size.
+ chunked_manifests.sort(key=lambda x: len(x))
+
+ # Return just the chunked test paths.
+ return chunked_manifests
+
+
+class BaseManifestLoader(metaclass=ABCMeta):
+ def __init__(self, params):
+ self.params = params
+
+ @abstractmethod
+ def get_manifests(self, flavor, subsuite, mozinfo):
+ """Compute which manifests should run for the given flavor, subsuite and mozinfo.
+
+ This function returns skipped manifests separately so that more balanced
+ chunks can be achieved by only considering "active" manifests in the
+ chunking algorithm.
+
+ Args:
+ flavor (str): The suite to run. Values are defined by the 'build_flavor' key
+ in `moztest.resolve.TEST_SUITES`.
+ subsuite (str): The subsuite to run or 'undefined' to denote no subsuite.
+ mozinfo (frozenset): Set of data in the form of (<key>, <value>) used
+ for filtering.
+
+ Returns:
+ A tuple of two manifest lists. The first is the set of active manifests (will
+ run at least one test. The second is a list of skipped manifests (all tests are
+ skipped).
+ """
+
+
+class DefaultLoader(BaseManifestLoader):
+ """Load manifests using metadata from the TestResolver."""
+
+ @memoize
+ def get_tests(self, suite):
+ suite_definition = TEST_SUITES[suite]
+ return list(
+ resolver.resolve_tests(
+ flavor=suite_definition["build_flavor"],
+ subsuite=suite_definition.get("kwargs", {}).get(
+ "subsuite", "undefined"
+ ),
+ )
+ )
+
+ @memoize
+ def get_manifests(self, suite, mozinfo):
+ mozinfo = dict(mozinfo)
+ # Compute all tests for the given suite/subsuite.
+ tests = self.get_tests(suite)
+
+ # TODO: the only exception here is we schedule webgpu as that is a --tag
+ if "web-platform-tests" in suite:
+ manifests = set()
+ subsuite = [x for x in WPT_SUBSUITES.keys() if mozinfo[x]]
+ for t in tests:
+ if subsuite:
+ # add specific directories
+ if WPT_SUBSUITES[subsuite[0]] in t["manifest"]:
+ manifests.add(t["manifest"])
+ else:
+ if any(x in t["manifest"] for x in WPT_SUBSUITES.values()):
+ continue
+ manifests.add(t["manifest"])
+ return {
+ "active": list(manifests),
+ "skipped": [],
+ "other_dirs": dict.fromkeys(manifests, ""),
+ }
+
+ manifests = {chunk_by_runtime.get_manifest(t) for t in tests}
+
+ filters = None
+ if mozinfo["condprof"]:
+ filters = [tags(["condprof"])]
+
+ # Compute the active tests.
+ m = TestManifest()
+ m.tests = tests
+ tests = m.active_tests(disabled=False, exists=False, filters=filters, **mozinfo)
+ active = {}
+ # map manifests and 'other' directories included
+ for t in tests:
+ mp = chunk_by_runtime.get_manifest(t)
+ active.setdefault(mp, [])
+
+ if not mp.startswith(t["dir_relpath"]):
+ active[mp].append(t["dir_relpath"])
+
+ skipped = manifests - set(active.keys())
+ other = {}
+ for m in active:
+ if len(active[m]) > 0:
+ other[m] = list(set(active[m]))
+ return {
+ "active": list(active.keys()),
+ "skipped": list(skipped),
+ "other_dirs": other,
+ }
+
+
+class BugbugLoader(DefaultLoader):
+ """Load manifests using metadata from the TestResolver, and then
+ filter them based on a query to bugbug."""
+
+ CONFIDENCE_THRESHOLD = CT_LOW
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.timedout = False
+
+ @memoize
+ def get_manifests(self, suite, mozinfo):
+ manifests = super().get_manifests(suite, mozinfo)
+
+ # Don't prune any manifests if we're on a backstop push or there was a timeout.
+ if self.params["backstop"] or self.timedout:
+ return manifests
+
+ try:
+ data = push_schedules(self.params["project"], self.params["head_rev"])
+ except BugbugTimeoutException:
+ logger.warning("Timed out waiting for bugbug, loading all test manifests.")
+ self.timedout = True
+ return self.get_manifests(suite, mozinfo)
+
+ bugbug_manifests = {
+ m
+ for m, c in data.get("groups", {}).items()
+ if c >= self.CONFIDENCE_THRESHOLD
+ }
+
+ manifests["active"] = list(set(manifests["active"]) & bugbug_manifests)
+ manifests["skipped"] = list(set(manifests["skipped"]) & bugbug_manifests)
+ return manifests
+
+
+manifest_loaders = {
+ "bugbug": BugbugLoader,
+ "default": DefaultLoader,
+}
+
+_loader_cache = {}
+
+
+def get_manifest_loader(name, params):
+ # Ensure we never create more than one instance of the same loader type for
+ # performance reasons.
+ if name in _loader_cache:
+ return _loader_cache[name]
+
+ loader = manifest_loaders[name](dict(params))
+ _loader_cache[name] = loader
+ return loader
diff --git a/taskcluster/gecko_taskgraph/util/copy_task.py b/taskcluster/gecko_taskgraph/util/copy_task.py
new file mode 100644
index 0000000000..0aaf43361e
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/copy_task.py
@@ -0,0 +1,40 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from mozbuild.util import ReadOnlyDict
+from taskgraph.task import Task
+
+immutable_types = {int, float, bool, str, type(None), ReadOnlyDict}
+
+
+def copy_task(obj):
+ """
+ Perform a deep copy of a task that has a tree-like structure.
+
+ Unlike copy.deepcopy, this does *not* support copying graph-like structure,
+ but it does it more efficiently than deepcopy.
+ """
+ ty = type(obj)
+ if ty in immutable_types:
+ return obj
+ if ty is dict:
+ return {k: copy_task(v) for k, v in obj.items()}
+ if ty is list:
+ return [copy_task(elt) for elt in obj]
+ if ty is Task:
+ task = Task(
+ kind=copy_task(obj.kind),
+ label=copy_task(obj.label),
+ attributes=copy_task(obj.attributes),
+ task=copy_task(obj.task),
+ description=copy_task(obj.description),
+ optimization=copy_task(obj.optimization),
+ dependencies=copy_task(obj.dependencies),
+ soft_dependencies=copy_task(obj.soft_dependencies),
+ if_dependencies=copy_task(obj.if_dependencies),
+ )
+ if obj.task_id:
+ task.task_id = obj.task_id
+ return task
+ raise NotImplementedError(f"copying '{ty}' from '{obj}'")
diff --git a/taskcluster/gecko_taskgraph/util/declarative_artifacts.py b/taskcluster/gecko_taskgraph/util/declarative_artifacts.py
new file mode 100644
index 0000000000..24689ae94c
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/declarative_artifacts.py
@@ -0,0 +1,92 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import re
+
+from gecko_taskgraph.util.scriptworker import (
+ generate_beetmover_artifact_map,
+ generate_beetmover_upstream_artifacts,
+)
+
+_ARTIFACT_ID_PER_PLATFORM = {
+ "android-aarch64-opt": "{package}-default-omni-arm64-v8a",
+ "android-arm-opt": "{package}-default-omni-armeabi-v7a",
+ "android-x86-opt": "{package}-default-omni-x86",
+ "android-x86_64-opt": "{package}-default-omni-x86_64",
+ "android-geckoview-fat-aar-opt": "{package}-default",
+ "android-aarch64-shippable": "{package}{update_channel}-omni-arm64-v8a",
+ "android-aarch64-shippable-lite": "{package}{update_channel}-arm64-v8a",
+ "android-arm-shippable": "{package}{update_channel}-omni-armeabi-v7a",
+ "android-arm-shippable-lite": "{package}{update_channel}-armeabi-v7a",
+ "android-x86-shippable": "{package}{update_channel}-omni-x86",
+ "android-x86-shippable-lite": "{package}{update_channel}-x86",
+ "android-x86_64-shippable": "{package}{update_channel}-omni-x86_64",
+ "android-x86_64-shippable-lite": "{package}{update_channel}-x86_64",
+ "android-geckoview-fat-aar-shippable": "{package}{update_channel}-omni",
+ "android-geckoview-fat-aar-shippable-lite": "{package}{update_channel}",
+}
+
+
+def get_geckoview_artifact_map(config, job):
+ return generate_beetmover_artifact_map(
+ config,
+ job,
+ **get_geckoview_template_vars(
+ config,
+ job["attributes"]["build_platform"],
+ job["maven-package"],
+ job["attributes"].get("update-channel"),
+ ),
+ )
+
+
+def get_geckoview_upstream_artifacts(config, job, package, platform=""):
+ if not platform:
+ platform = job["attributes"]["build_platform"]
+ upstream_artifacts = generate_beetmover_upstream_artifacts(
+ config,
+ job,
+ platform="",
+ **get_geckoview_template_vars(
+ config, platform, package, job["attributes"].get("update-channel")
+ ),
+ )
+ return [
+ {key: value for key, value in upstream_artifact.items() if key != "locale"}
+ for upstream_artifact in upstream_artifacts
+ ]
+
+
+def get_geckoview_template_vars(config, platform, package, update_channel):
+ version_groups = re.match(r"(\d+).(\d+).*", config.params["version"])
+ if version_groups:
+ major_version, minor_version = version_groups.groups()
+
+ return {
+ "artifact_id": get_geckoview_artifact_id(
+ config,
+ platform,
+ package,
+ update_channel,
+ ),
+ "build_date": config.params["moz_build_date"],
+ "major_version": major_version,
+ "minor_version": minor_version,
+ }
+
+
+def get_geckoview_artifact_id(config, platform, package, update_channel=None):
+ if update_channel == "release":
+ update_channel = ""
+ elif update_channel is not None:
+ update_channel = f"-{update_channel}"
+ else:
+ # For shippable builds, mozharness defaults to using
+ # "nightly-{project}" for the update channel. For other builds, the
+ # update channel is not set, but the value is not substituted.
+ update_channel = "-nightly-{}".format(config.params["project"])
+ return _ARTIFACT_ID_PER_PLATFORM[platform].format(
+ update_channel=update_channel, package=package
+ )
diff --git a/taskcluster/gecko_taskgraph/util/dependencies.py b/taskcluster/gecko_taskgraph/util/dependencies.py
new file mode 100644
index 0000000000..bf747926d8
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/dependencies.py
@@ -0,0 +1,156 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from taskgraph.util.dependencies import group_by
+
+
+def skip_only_or_not(config, task):
+ """Return True if we should skip this task based on `only_` or `not_` config."""
+ only_platforms = config.get("only-for-build-platforms")
+ not_platforms = config.get("not-for-build-platforms")
+ only_attributes = config.get("only-for-attributes")
+ not_attributes = config.get("not-for-attributes")
+ task_attrs = task.attributes
+ if only_platforms or not_platforms:
+ platform = task_attrs.get("build_platform")
+ build_type = task_attrs.get("build_type")
+ if not platform or not build_type:
+ return True
+ combined_platform = f"{platform}/{build_type}"
+ if only_platforms and combined_platform not in only_platforms:
+ return True
+ if not_platforms and combined_platform in not_platforms:
+ return True
+ if only_attributes:
+ if not set(only_attributes) & set(task_attrs):
+ # make sure any attribute exists
+ return True
+ if not_attributes:
+ if set(not_attributes) & set(task_attrs):
+ return True
+ return False
+
+
+@group_by("single-with-filters")
+def single_grouping(config, tasks):
+ for task in tasks:
+ if skip_only_or_not(config.config, task):
+ continue
+ yield [task]
+
+
+@group_by("platform")
+def platform_grouping(config, tasks):
+ groups = {}
+ for task in tasks:
+ if task.kind not in config.config.get("kind-dependencies", []):
+ continue
+ if skip_only_or_not(config.config, task):
+ continue
+ platform = task.attributes.get("build_platform")
+ build_type = task.attributes.get("build_type")
+ product = task.attributes.get(
+ "shipping_product", task.task.get("shipping-product")
+ )
+
+ groups.setdefault((platform, build_type, product), []).append(task)
+ return groups.values()
+
+
+@group_by("single-locale")
+def single_locale_grouping(config, tasks):
+ """Split by a single locale (but also by platform, build-type, product)
+
+ The locale can be `None` (en-US build/signing/repackage), a single locale,
+ or multiple locales per task, e.g. for l10n chunking. In the case of a task
+ with, say, five locales, the task will show up in all five locale groupings.
+
+ This grouping is written for non-partner-repack beetmover, but might also
+ be useful elsewhere.
+
+ """
+ groups = {}
+
+ for task in tasks:
+ if task.kind not in config.config.get("kind-dependencies", []):
+ continue
+ if skip_only_or_not(config.config, task):
+ continue
+ platform = task.attributes.get("build_platform")
+ build_type = task.attributes.get("build_type")
+ product = task.attributes.get(
+ "shipping_product", task.task.get("shipping-product")
+ )
+ task_locale = task.attributes.get("locale")
+ chunk_locales = task.attributes.get("chunk_locales")
+ locales = chunk_locales or [task_locale]
+
+ for locale in locales:
+ locale_key = (platform, build_type, product, locale)
+ groups.setdefault(locale_key, [])
+ if task not in groups[locale_key]:
+ groups[locale_key].append(task)
+
+ return groups.values()
+
+
+@group_by("chunk-locales")
+def chunk_locale_grouping(config, tasks):
+ """Split by a chunk_locale (but also by platform, build-type, product)
+
+ This grouping is written for mac signing with notarization, but might also
+ be useful elsewhere.
+
+ """
+ groups = {}
+
+ for task in tasks:
+ if task.kind not in config.config.get("kind-dependencies", []):
+ continue
+ if skip_only_or_not(config.config, task):
+ continue
+ platform = task.attributes.get("build_platform")
+ build_type = task.attributes.get("build_type")
+ product = task.attributes.get(
+ "shipping_product", task.task.get("shipping-product")
+ )
+ chunk_locales = tuple(sorted(task.attributes.get("chunk_locales", [])))
+
+ chunk_locale_key = (platform, build_type, product, chunk_locales)
+ groups.setdefault(chunk_locale_key, [])
+ if task not in groups[chunk_locale_key]:
+ groups[chunk_locale_key].append(task)
+
+ return groups.values()
+
+
+@group_by("partner-repack-ids")
+def partner_repack_ids_grouping(config, tasks):
+ """Split by partner_repack_ids (but also by platform, build-type, product)
+
+ This grouping is written for release-{eme-free,partner}-repack-signing.
+
+ """
+ groups = {}
+
+ for task in tasks:
+ if task.kind not in config.config.get("kind-dependencies", []):
+ continue
+ if skip_only_or_not(config.config, task):
+ continue
+ platform = task.attributes.get("build_platform")
+ build_type = task.attributes.get("build_type")
+ product = task.attributes.get(
+ "shipping_product", task.task.get("shipping-product")
+ )
+ partner_repack_ids = tuple(
+ sorted(task.task.get("extra", {}).get("repack_ids", []))
+ )
+
+ partner_repack_ids_key = (platform, build_type, product, partner_repack_ids)
+ groups.setdefault(partner_repack_ids_key, [])
+ if task not in groups[partner_repack_ids_key]:
+ groups[partner_repack_ids_key].append(task)
+
+ return groups.values()
diff --git a/taskcluster/gecko_taskgraph/util/docker.py b/taskcluster/gecko_taskgraph/util/docker.py
new file mode 100644
index 0000000000..e8de7d1fdb
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/docker.py
@@ -0,0 +1,333 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import hashlib
+import json
+import os
+import re
+import sys
+from collections.abc import Mapping
+from urllib.parse import quote, urlencode, urlunparse
+
+import requests
+import requests_unixsocket
+from mozbuild.util import memoize
+from mozpack.archive import create_tar_gz_from_files
+from mozpack.files import GeneratedFile
+from taskgraph.util.yaml import load_yaml
+
+from .. import GECKO
+
+IMAGE_DIR = os.path.join(GECKO, "taskcluster", "docker")
+
+
+def docker_url(path, **kwargs):
+ docker_socket = os.environ.get("DOCKER_SOCKET", "/var/run/docker.sock")
+ return urlunparse(
+ ("http+unix", quote(docker_socket, safe=""), path, "", urlencode(kwargs), "")
+ )
+
+
+def post_to_docker(tar, api_path, **kwargs):
+ """POSTs a tar file to a given docker API path.
+
+ The tar argument can be anything that can be passed to requests.post()
+ as data (e.g. iterator or file object).
+ The extra keyword arguments are passed as arguments to the docker API.
+ """
+ # requests-unixsocket doesn't honor requests timeouts
+ # See https://github.com/msabramo/requests-unixsocket/issues/44
+ # We have some large docker images that trigger the default timeout,
+ # so we increase the requests-unixsocket timeout here.
+ session = requests.Session()
+ session.mount(
+ requests_unixsocket.DEFAULT_SCHEME,
+ requests_unixsocket.UnixAdapter(timeout=120),
+ )
+ req = session.post(
+ docker_url(api_path, **kwargs),
+ data=tar,
+ stream=True,
+ headers={"Content-Type": "application/x-tar"},
+ )
+ if req.status_code != 200:
+ message = req.json().get("message")
+ if not message:
+ message = f"docker API returned HTTP code {req.status_code}"
+ raise Exception(message)
+ status_line = {}
+
+ buf = b""
+ for content in req.iter_content(chunk_size=None):
+ if not content:
+ continue
+ # Sometimes, a chunk of content is not a complete json, so we cumulate
+ # with leftovers from previous iterations.
+ buf += content
+ try:
+ data = json.loads(buf)
+ except Exception:
+ continue
+ buf = b""
+ # data is sometimes an empty dict.
+ if not data:
+ continue
+ # Mimick how docker itself presents the output. This code was tested
+ # with API version 1.18 and 1.26.
+ if "status" in data:
+ if "id" in data:
+ if sys.stderr.isatty():
+ total_lines = len(status_line)
+ line = status_line.setdefault(data["id"], total_lines)
+ n = total_lines - line
+ if n > 0:
+ # Move the cursor up n lines.
+ sys.stderr.write(f"\033[{n}A")
+ # Clear line and move the cursor to the beginning of it.
+ sys.stderr.write("\033[2K\r")
+ sys.stderr.write(
+ "{}: {} {}\n".format(
+ data["id"], data["status"], data.get("progress", "")
+ )
+ )
+ if n > 1:
+ # Move the cursor down n - 1 lines, which, considering
+ # the carriage return on the last write, gets us back
+ # where we started.
+ sys.stderr.write(f"\033[{n - 1}B")
+ else:
+ status = status_line.get(data["id"])
+ # Only print status changes.
+ if status != data["status"]:
+ sys.stderr.write("{}: {}\n".format(data["id"], data["status"]))
+ status_line[data["id"]] = data["status"]
+ else:
+ status_line = {}
+ sys.stderr.write("{}\n".format(data["status"]))
+ elif "stream" in data:
+ sys.stderr.write(data["stream"])
+ elif "aux" in data:
+ sys.stderr.write(repr(data["aux"]))
+ elif "error" in data:
+ sys.stderr.write("{}\n".format(data["error"]))
+ # Sadly, docker doesn't give more than a plain string for errors,
+ # so the best we can do to propagate the error code from the command
+ # that failed is to parse the error message...
+ errcode = 1
+ m = re.search(r"returned a non-zero code: (\d+)", data["error"])
+ if m:
+ errcode = int(m.group(1))
+ sys.exit(errcode)
+ else:
+ raise NotImplementedError(repr(data))
+ sys.stderr.flush()
+
+
+def docker_image(name, by_tag=False):
+ """
+ Resolve in-tree prebuilt docker image to ``<registry>/<repository>@sha256:<digest>``,
+ or ``<registry>/<repository>:<tag>`` if `by_tag` is `True`.
+ """
+ try:
+ with open(os.path.join(IMAGE_DIR, name, "REGISTRY")) as f:
+ registry = f.read().strip()
+ except OSError:
+ with open(os.path.join(IMAGE_DIR, "REGISTRY")) as f:
+ registry = f.read().strip()
+
+ if not by_tag:
+ hashfile = os.path.join(IMAGE_DIR, name, "HASH")
+ try:
+ with open(hashfile) as f:
+ return f"{registry}/{name}@{f.read().strip()}"
+ except OSError:
+ raise Exception(f"Failed to read HASH file {hashfile}")
+
+ try:
+ with open(os.path.join(IMAGE_DIR, name, "VERSION")) as f:
+ tag = f.read().strip()
+ except OSError:
+ tag = "latest"
+ return f"{registry}/{name}:{tag}"
+
+
+class VoidWriter:
+ """A file object with write capabilities that does nothing with the written
+ data."""
+
+ def write(self, buf):
+ pass
+
+
+def generate_context_hash(topsrcdir, image_path, image_name, args):
+ """Generates a sha256 hash for context directory used to build an image."""
+
+ return stream_context_tar(
+ topsrcdir, image_path, VoidWriter(), image_name, args=args
+ )
+
+
+class HashingWriter:
+ """A file object with write capabilities that hashes the written data at
+ the same time it passes down to a real file object."""
+
+ def __init__(self, writer):
+ self._hash = hashlib.sha256()
+ self._writer = writer
+
+ def write(self, buf):
+ self._hash.update(buf)
+ self._writer.write(buf)
+
+ def hexdigest(self):
+ return self._hash.hexdigest()
+
+
+def create_context_tar(topsrcdir, context_dir, out_path, image_name, args):
+ """Create a context tarball.
+
+ A directory ``context_dir`` containing a Dockerfile will be assembled into
+ a gzipped tar file at ``out_path``.
+
+ We also scan the source Dockerfile for special syntax that influences
+ context generation.
+
+ If a line in the Dockerfile has the form ``# %include <path>``,
+ the relative path specified on that line will be matched against
+ files in the source repository and added to the context under the
+ path ``topsrcdir/``. If an entry is a directory, we add all files
+ under that directory.
+
+ Returns the SHA-256 hex digest of the created archive.
+ """
+ with open(out_path, "wb") as fh:
+ return stream_context_tar(
+ topsrcdir,
+ context_dir,
+ fh,
+ image_name=image_name,
+ args=args,
+ )
+
+
+def stream_context_tar(topsrcdir, context_dir, out_file, image_name, args):
+ """Like create_context_tar, but streams the tar file to the `out_file` file
+ object."""
+ archive_files = {}
+ content = []
+
+ context_dir = os.path.join(topsrcdir, context_dir)
+
+ for root, dirs, files in os.walk(context_dir):
+ for f in files:
+ source_path = os.path.join(root, f)
+ archive_path = source_path[len(context_dir) + 1 :]
+ archive_files[archive_path] = source_path
+
+ # Parse Dockerfile for special syntax of extra files to include.
+ with open(os.path.join(context_dir, "Dockerfile"), "r") as fh:
+ for line in fh:
+ content.append(line)
+
+ if not line.startswith("# %include"):
+ continue
+
+ p = line[len("# %include ") :].strip()
+ if os.path.isabs(p):
+ raise Exception("extra include path cannot be absolute: %s" % p)
+
+ fs_path = os.path.normpath(os.path.join(topsrcdir, p))
+ # Check for filesystem traversal exploits.
+ if not fs_path.startswith(topsrcdir):
+ raise Exception("extra include path outside topsrcdir: %s" % p)
+
+ if not os.path.exists(fs_path):
+ raise Exception("extra include path does not exist: %s" % p)
+
+ if os.path.isdir(fs_path):
+ for root, dirs, files in os.walk(fs_path):
+ for f in files:
+ source_path = os.path.join(root, f)
+ rel = source_path[len(fs_path) + 1 :]
+ archive_path = os.path.join("topsrcdir", p, rel)
+ archive_files[archive_path] = source_path
+ else:
+ archive_path = os.path.join("topsrcdir", p)
+ archive_files[archive_path] = fs_path
+
+ archive_files["Dockerfile"] = GeneratedFile("".join(content).encode("utf-8"))
+
+ writer = HashingWriter(out_file)
+ create_tar_gz_from_files(writer, archive_files, f"{image_name}.tar")
+ return writer.hexdigest()
+
+
+class ImagePathsMap(Mapping):
+ """ImagePathsMap contains the mapping of Docker image names to their
+ context location in the filesystem. The register function allows Thunderbird
+ to define additional images under comm/taskcluster.
+ """
+
+ def __init__(self, config_path, image_dir=IMAGE_DIR):
+ config = load_yaml(GECKO, config_path)
+ self.__update_image_paths(config["jobs"], image_dir)
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+ def __iter__(self):
+ return iter(self.__dict__)
+
+ def __len__(self):
+ return len(self.__dict__)
+
+ def __update_image_paths(self, jobs, image_dir):
+ self.__dict__.update(
+ {
+ k: os.path.join(image_dir, v.get("definition", k))
+ for k, v in jobs.items()
+ }
+ )
+
+ def register(self, jobs_config_path, image_dir):
+ """Register additional image_paths. In this case, there is no 'jobs'
+ key in the loaded YAML as this file is loaded via jobs-from in kind.yml."""
+ jobs = load_yaml(GECKO, jobs_config_path)
+ self.__update_image_paths(jobs, image_dir)
+
+
+image_paths = ImagePathsMap("taskcluster/ci/docker-image/kind.yml")
+
+
+def image_path(name):
+ if name in image_paths:
+ return image_paths[name]
+ return os.path.join(IMAGE_DIR, name)
+
+
+@memoize
+def parse_volumes(image):
+ """Parse VOLUME entries from a Dockerfile for an image."""
+ volumes = set()
+
+ path = image_path(image)
+
+ with open(os.path.join(path, "Dockerfile"), "rb") as fh:
+ for line in fh:
+ line = line.strip()
+ # We assume VOLUME definitions don't use ARGS.
+ if not line.startswith(b"VOLUME "):
+ continue
+
+ v = line.split(None, 1)[1]
+ if v.startswith(b"["):
+ raise ValueError(
+ "cannot parse array syntax for VOLUME; "
+ "convert to multiple entries"
+ )
+
+ volumes |= {v.decode("utf-8") for v in v.split()}
+
+ return volumes
diff --git a/taskcluster/gecko_taskgraph/util/hash.py b/taskcluster/gecko_taskgraph/util/hash.py
new file mode 100644
index 0000000000..485c9a7c48
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/hash.py
@@ -0,0 +1,68 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import hashlib
+
+import mozpack.path as mozpath
+from mozbuild.util import memoize
+from mozversioncontrol import get_repository_object
+
+
+@memoize
+def hash_path(path):
+ """Hash a single file.
+
+ Returns the SHA-256 hash in hex form.
+ """
+ with open(path, mode="rb") as fh:
+ return hashlib.sha256(fh.read()).hexdigest()
+
+
+@memoize
+def get_file_finder(base_path):
+ from pathlib import Path
+
+ repo = get_repository_object(base_path)
+ if repo:
+ files = repo.get_tracked_files_finder(base_path)
+ if files:
+ return files
+ else:
+ return None
+ else:
+ return get_repository_object(Path(base_path)).get_tracked_files_finder(
+ base_path
+ )
+
+
+def hash_paths(base_path, patterns):
+ """
+ Give a list of path patterns, return a digest of the contents of all
+ the corresponding files, similarly to git tree objects or mercurial
+ manifests.
+
+ Each file is hashed. The list of all hashes and file paths is then
+ itself hashed to produce the result.
+ """
+ finder = get_file_finder(base_path)
+ h = hashlib.sha256()
+ files = {}
+ if finder:
+ for pattern in patterns:
+ found = list(finder.find(pattern))
+ if found:
+ files.update(found)
+ else:
+ raise Exception("%s did not match anything" % pattern)
+ for path in sorted(files.keys()):
+ if path.endswith((".pyc", ".pyd", ".pyo")):
+ continue
+ h.update(
+ "{} {}\n".format(
+ hash_path(mozpath.abspath(mozpath.join(base_path, path))),
+ mozpath.normsep(path),
+ ).encode("utf-8")
+ )
+
+ return h.hexdigest()
diff --git a/taskcluster/gecko_taskgraph/util/hg.py b/taskcluster/gecko_taskgraph/util/hg.py
new file mode 100644
index 0000000000..18a92fbd0d
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/hg.py
@@ -0,0 +1,139 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import subprocess
+
+import requests
+from mozbuild.util import memoize
+from redo import retry
+
+logger = logging.getLogger(__name__)
+
+PUSHLOG_CHANGESET_TMPL = (
+ "{repository}/json-pushes?version=2&changeset={revision}&tipsonly=1"
+)
+PUSHLOG_PUSHES_TMPL = (
+ "{repository}/json-pushes/?version=2&startID={push_id_start}&endID={push_id_end}"
+)
+
+
+def _query_pushlog(url):
+ response = retry(
+ requests.get,
+ attempts=5,
+ sleeptime=10,
+ args=(url,),
+ kwargs={"timeout": 60, "headers": {"User-Agent": "TaskCluster"}},
+ )
+
+ return response.json()["pushes"]
+
+
+def find_hg_revision_push_info(repository, revision):
+ """Given the parameters for this action and a revision, find the
+ pushlog_id of the revision."""
+ url = PUSHLOG_CHANGESET_TMPL.format(repository=repository, revision=revision)
+
+ pushes = _query_pushlog(url)
+
+ if len(pushes) != 1:
+ raise RuntimeError(
+ "Found {} pushlog_ids, expected 1, for {} revision {}: {}".format(
+ len(pushes), repository, revision, pushes
+ )
+ )
+
+ pushid = list(pushes.keys())[0]
+ return {
+ "pushdate": pushes[pushid]["date"],
+ "pushid": pushid,
+ "user": pushes[pushid]["user"],
+ }
+
+
+@memoize
+def get_push_data(repository, project, push_id_start, push_id_end):
+ url = PUSHLOG_PUSHES_TMPL.format(
+ repository=repository,
+ push_id_start=push_id_start - 1,
+ push_id_end=push_id_end,
+ )
+
+ try:
+ pushes = _query_pushlog(url)
+
+ return {
+ push_id: pushes[str(push_id)]
+ for push_id in range(push_id_start, push_id_end + 1)
+ }
+
+ # In the event of request times out, requests will raise a TimeoutError.
+ except requests.exceptions.Timeout:
+ logger.warning("json-pushes timeout")
+
+ # In the event of a network problem (e.g. DNS failure, refused connection, etc),
+ # requests will raise a ConnectionError.
+ except requests.exceptions.ConnectionError:
+ logger.warning("json-pushes connection error")
+
+ # In the event of the rare invalid HTTP response(e.g 404, 401),
+ # requests will raise an HTTPError exception
+ except requests.exceptions.HTTPError:
+ logger.warning("Bad Http response")
+
+ # When we get invalid JSON (i.e. 500 error), it results in a ValueError (bug 1313426)
+ except ValueError as error:
+ logger.warning(f"Invalid JSON, possible server error: {error}")
+
+ # We just print the error out as a debug message if we failed to catch the exception above
+ except requests.exceptions.RequestException as error:
+ logger.warning(error)
+
+ return None
+
+
+@memoize
+def get_json_automationrelevance(repository, revision):
+ url = "{}/json-automationrelevance/{}".format(repository.rstrip("/"), revision)
+ logger.debug("Querying version control for metadata: %s", url)
+
+ def get_automationrelevance():
+ response = requests.get(url, timeout=30)
+ return response.json()
+
+ return retry(get_automationrelevance, attempts=10, sleeptime=10)
+
+
+def get_hg_revision_branch(root, revision):
+ """Given the parameters for a revision, find the hg_branch (aka
+ relbranch) of the revision."""
+ return subprocess.check_output(
+ [
+ "hg",
+ "identify",
+ "-T",
+ "{branch}",
+ "--rev",
+ revision,
+ ],
+ cwd=root,
+ universal_newlines=True,
+ )
+
+
+# For these functions, we assume that run-task has correctly checked out the
+# revision indicated by GECKO_HEAD_REF, so all that remains is to see what the
+# current revision is. Mercurial refers to that as `.`.
+def get_hg_commit_message(root, rev="."):
+ return subprocess.check_output(
+ ["hg", "log", "-r", rev, "-T", "{desc}"], cwd=root, universal_newlines=True
+ )
+
+
+def calculate_head_rev(root):
+ return subprocess.check_output(
+ ["hg", "log", "-r", ".", "-T", "{node}"], cwd=root, universal_newlines=True
+ )
diff --git a/taskcluster/gecko_taskgraph/util/partials.py b/taskcluster/gecko_taskgraph/util/partials.py
new file mode 100644
index 0000000000..1a3affcc42
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/partials.py
@@ -0,0 +1,297 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+
+import redo
+import requests
+
+from gecko_taskgraph.util.scriptworker import (
+ BALROG_SCOPE_ALIAS_TO_PROJECT,
+ BALROG_SERVER_SCOPES,
+)
+
+logger = logging.getLogger(__name__)
+
+PLATFORM_RENAMES = {
+ "windows2012-32": "win32",
+ "windows2012-64": "win64",
+ "windows2012-aarch64": "win64-aarch64",
+ "osx-cross": "macosx64",
+ "osx": "macosx64",
+}
+
+BALROG_PLATFORM_MAP = {
+ "linux": ["Linux_x86-gcc3"],
+ "linux32": ["Linux_x86-gcc3"],
+ "linux64": ["Linux_x86_64-gcc3"],
+ "linux64-asan-reporter": ["Linux_x86_64-gcc3-asan"],
+ "macosx64": [
+ "Darwin_x86_64-gcc3-u-i386-x86_64",
+ "Darwin_x86-gcc3-u-i386-x86_64",
+ "Darwin_aarch64-gcc3",
+ "Darwin_x86-gcc3",
+ "Darwin_x86_64-gcc3",
+ ],
+ "win32": ["WINNT_x86-msvc", "WINNT_x86-msvc-x86", "WINNT_x86-msvc-x64"],
+ "win64": ["WINNT_x86_64-msvc", "WINNT_x86_64-msvc-x64"],
+ "win64-asan-reporter": ["WINNT_x86_64-msvc-x64-asan"],
+ "win64-aarch64": [
+ "WINNT_aarch64-msvc-aarch64",
+ ],
+}
+
+FTP_PLATFORM_MAP = {
+ "Darwin_x86-gcc3": "mac",
+ "Darwin_x86-gcc3-u-i386-x86_64": "mac",
+ "Darwin_x86_64-gcc3": "mac",
+ "Darwin_x86_64-gcc3-u-i386-x86_64": "mac",
+ "Darwin_aarch64-gcc3": "mac",
+ "Linux_x86-gcc3": "linux-i686",
+ "Linux_x86_64-gcc3": "linux-x86_64",
+ "Linux_x86_64-gcc3-asan": "linux-x86_64-asan-reporter",
+ "WINNT_x86_64-msvc-x64-asan": "win64-asan-reporter",
+ "WINNT_x86-msvc": "win32",
+ "WINNT_x86-msvc-x64": "win32",
+ "WINNT_x86-msvc-x86": "win32",
+ "WINNT_x86_64-msvc": "win64",
+ "WINNT_x86_64-msvc-x64": "win64",
+ "WINNT_aarch64-msvc-aarch64": "win64-aarch64",
+}
+
+
+def get_balrog_platform_name(platform):
+ """Convert build platform names into balrog platform names.
+
+ Remove known values instead to catch aarch64 and other platforms
+ that may be added.
+ """
+ removals = ["-devedition", "-shippable"]
+ for remove in removals:
+ platform = platform.replace(remove, "")
+ return PLATFORM_RENAMES.get(platform, platform)
+
+
+def _sanitize_platform(platform):
+ platform = get_balrog_platform_name(platform)
+ if platform not in BALROG_PLATFORM_MAP:
+ return platform
+ return BALROG_PLATFORM_MAP[platform][0]
+
+
+def get_builds(release_history, platform, locale):
+ """Examine cached balrog release history and return the list of
+ builds we need to generate diffs from"""
+ platform = _sanitize_platform(platform)
+ return release_history.get(platform, {}).get(locale, {})
+
+
+def get_partials_artifacts_from_params(release_history, platform, locale):
+ platform = _sanitize_platform(platform)
+ return [
+ (artifact, details.get("previousVersion", None))
+ for artifact, details in release_history.get(platform, {})
+ .get(locale, {})
+ .items()
+ ]
+
+
+def get_partials_info_from_params(release_history, platform, locale):
+ platform = _sanitize_platform(platform)
+
+ artifact_map = {}
+ for k in release_history.get(platform, {}).get(locale, {}):
+ details = release_history[platform][locale][k]
+ attributes = ("buildid", "previousBuildNumber", "previousVersion")
+ artifact_map[k] = {
+ attr: details[attr] for attr in attributes if attr in details
+ }
+ return artifact_map
+
+
+def _retry_on_http_errors(url, verify, params, errors):
+ if params:
+ params_str = "&".join("=".join([k, str(v)]) for k, v in params.items())
+ else:
+ params_str = ""
+ logger.info("Connecting to %s?%s", url, params_str)
+ for _ in redo.retrier(sleeptime=5, max_sleeptime=30, attempts=10):
+ try:
+ req = requests.get(url, verify=verify, params=params, timeout=10)
+ req.raise_for_status()
+ return req
+ except requests.HTTPError as e:
+ if e.response.status_code in errors:
+ logger.exception(
+ "Got HTTP %s trying to reach %s", e.response.status_code, url
+ )
+ else:
+ raise
+ else:
+ raise Exception(f"Cannot connect to {url}!")
+
+
+def get_sorted_releases(product, branch):
+ """Returns a list of release names from Balrog.
+ :param product: product name, AKA appName
+ :param branch: branch name, e.g. mozilla-central
+ :return: a sorted list of release names, most recent first.
+ """
+ url = f"{_get_balrog_api_root(branch)}/releases"
+ params = {
+ "product": product,
+ # Adding -nightly-2 (2 stands for the beginning of build ID
+ # based on date) should filter out release and latest blobs.
+ # This should be changed to -nightly-3 in 3000 ;)
+ "name_prefix": f"{product}-{branch}-nightly-2",
+ "names_only": True,
+ }
+ req = _retry_on_http_errors(url=url, verify=True, params=params, errors=[500])
+ releases = req.json()["names"]
+ releases = sorted(releases, reverse=True)
+ return releases
+
+
+def get_release_builds(release, branch):
+ url = f"{_get_balrog_api_root(branch)}/releases/{release}"
+ req = _retry_on_http_errors(url=url, verify=True, params=None, errors=[500])
+ return req.json()
+
+
+def _get_balrog_api_root(branch):
+ # Query into the scopes scriptworker uses to make sure we check against the same balrog server
+ # That our jobs would use.
+ scope = None
+ for alias, projects in BALROG_SCOPE_ALIAS_TO_PROJECT:
+ if branch in projects and alias in BALROG_SERVER_SCOPES:
+ scope = BALROG_SERVER_SCOPES[alias]
+ break
+ else:
+ scope = BALROG_SERVER_SCOPES["default"]
+
+ if scope == "balrog:server:dep":
+ return "https://stage.balrog.nonprod.cloudops.mozgcp.net/api/v1"
+ return "https://aus5.mozilla.org/api/v1"
+
+
+def find_localtest(fileUrls):
+ for channel in fileUrls:
+ if "-localtest" in channel:
+ return channel
+
+
+def populate_release_history(
+ product, branch, maxbuilds=4, maxsearch=10, partial_updates=None
+):
+ # Assuming we are using release branches when we know the list of previous
+ # releases in advance
+ if partial_updates is not None:
+ return _populate_release_history(
+ product, branch, partial_updates=partial_updates
+ )
+ return _populate_nightly_history(
+ product, branch, maxbuilds=maxbuilds, maxsearch=maxsearch
+ )
+
+
+def _populate_nightly_history(product, branch, maxbuilds=4, maxsearch=10):
+ """Find relevant releases in Balrog
+ Not all releases have all platforms and locales, due
+ to Taskcluster migration.
+
+ Args:
+ product (str): capitalized product name, AKA appName, e.g. Firefox
+ branch (str): branch name (mozilla-central)
+ maxbuilds (int): Maximum number of historical releases to populate
+ maxsearch(int): Traverse at most this many releases, to avoid
+ working through the entire history.
+ Returns:
+ json object based on data from balrog api
+
+ results = {
+ 'platform1': {
+ 'locale1': {
+ 'buildid1': mar_url,
+ 'buildid2': mar_url,
+ 'buildid3': mar_url,
+ },
+ 'locale2': {
+ 'target.partial-1.mar': {'buildid1': 'mar_url'},
+ }
+ },
+ 'platform2': {
+ }
+ }
+ """
+ last_releases = get_sorted_releases(product, branch)
+
+ partial_mar_tmpl = "target.partial-{}.mar"
+
+ builds = dict()
+ for release in last_releases[:maxsearch]:
+ # maxbuilds in all categories, don't make any more queries
+ full = len(builds) > 0 and all(
+ len(builds[platform][locale]) >= maxbuilds
+ for platform in builds
+ for locale in builds[platform]
+ )
+ if full:
+ break
+ history = get_release_builds(release, branch)
+
+ for platform in history["platforms"]:
+ if "alias" in history["platforms"][platform]:
+ continue
+ if platform not in builds:
+ builds[platform] = dict()
+ for locale in history["platforms"][platform]["locales"]:
+ if locale not in builds[platform]:
+ builds[platform][locale] = dict()
+ if len(builds[platform][locale]) >= maxbuilds:
+ continue
+ buildid = history["platforms"][platform]["locales"][locale]["buildID"]
+ url = history["platforms"][platform]["locales"][locale]["completes"][0][
+ "fileUrl"
+ ]
+ nextkey = len(builds[platform][locale]) + 1
+ builds[platform][locale][partial_mar_tmpl.format(nextkey)] = {
+ "buildid": buildid,
+ "mar_url": url,
+ }
+ return builds
+
+
+def _populate_release_history(product, branch, partial_updates):
+ builds = dict()
+ for version, release in partial_updates.items():
+ prev_release_blob = "{product}-{version}-build{build_number}".format(
+ product=product, version=version, build_number=release["buildNumber"]
+ )
+ partial_mar_key = f"target-{version}.partial.mar"
+ history = get_release_builds(prev_release_blob, branch)
+ # use one of the localtest channels to avoid relying on bouncer
+ localtest = find_localtest(history["fileUrls"])
+ url_pattern = history["fileUrls"][localtest]["completes"]["*"]
+
+ for platform in history["platforms"]:
+ if "alias" in history["platforms"][platform]:
+ continue
+ if platform not in builds:
+ builds[platform] = dict()
+ for locale in history["platforms"][platform]["locales"]:
+ if locale not in builds[platform]:
+ builds[platform][locale] = dict()
+ buildid = history["platforms"][platform]["locales"][locale]["buildID"]
+ url = url_pattern.replace(
+ "%OS_FTP%", FTP_PLATFORM_MAP[platform]
+ ).replace("%LOCALE%", locale)
+ builds[platform][locale][partial_mar_key] = {
+ "buildid": buildid,
+ "mar_url": url,
+ "previousVersion": version,
+ "previousBuildNumber": release["buildNumber"],
+ "product": product,
+ }
+ return builds
diff --git a/taskcluster/gecko_taskgraph/util/partners.py b/taskcluster/gecko_taskgraph/util/partners.py
new file mode 100644
index 0000000000..2546e1ae88
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/partners.py
@@ -0,0 +1,555 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import logging
+import os
+import xml.etree.ElementTree as ET
+from urllib.parse import urlencode
+
+import requests
+import yaml
+from redo import retry
+from taskgraph.util.schema import resolve_keyed_by
+
+from gecko_taskgraph.util.attributes import release_level
+from gecko_taskgraph.util.copy_task import copy_task
+
+# Suppress chatty requests logging
+logging.getLogger("requests").setLevel(logging.WARNING)
+
+log = logging.getLogger(__name__)
+
+GITHUB_API_ENDPOINT = "https://api.github.com/graphql"
+
+"""
+LOGIN_QUERY, MANIFEST_QUERY, and REPACK_CFG_QUERY are all written to the Github v4 API,
+which users GraphQL. See https://developer.github.com/v4/
+"""
+
+LOGIN_QUERY = """query {
+ viewer {
+ login
+ name
+ }
+}
+"""
+
+# Returns the contents of default.xml from a manifest repository
+MANIFEST_QUERY = """query {
+ repository(owner:"%(owner)s", name:"%(repo)s") {
+ object(expression: "master:%(file)s") {
+ ... on Blob {
+ text
+ }
+ }
+ }
+}
+"""
+# Example response:
+# {
+# "data": {
+# "repository": {
+# "object": {
+# "text": "<?xml version=\"1.0\" ?>\n<manifest>\n " +
+# "<remote fetch=\"git@github.com:mozilla-partners/\" name=\"mozilla-partners\"/>\n " +
+# "<remote fetch=\"git@github.com:mozilla/\" name=\"mozilla\"/>\n\n " +
+# "<project name=\"repack-scripts\" path=\"scripts\" remote=\"mozilla-partners\" " +
+# "revision=\"master\"/>\n <project name=\"build-tools\" path=\"scripts/tools\" " +
+# "remote=\"mozilla\" revision=\"master\"/>\n <project name=\"mozilla-EME-free\" " +
+# "path=\"partners/mozilla-EME-free\" remote=\"mozilla-partners\" " +
+# "revision=\"master\"/>\n</manifest>\n"
+# }
+# }
+# }
+# }
+
+# Returns the contents of desktop/*/repack.cfg for a partner repository
+REPACK_CFG_QUERY = """query{
+ repository(owner:"%(owner)s", name:"%(repo)s") {
+ object(expression: "%(revision)s:desktop/"){
+ ... on Tree {
+ entries {
+ name
+ object {
+ ... on Tree {
+ entries {
+ name
+ object {
+ ... on Blob {
+ text
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+"""
+# Example response:
+# {
+# "data": {
+# "repository": {
+# "object": {
+# "entries": [
+# {
+# "name": "mozilla-EME-free",
+# "object": {
+# "entries": [
+# {
+# "name": "distribution",
+# "object": {}
+# },
+# {
+# "name": "repack.cfg",
+# "object": {
+# "text": "aus=\"mozilla-EMEfree\"\ndist_id=\"mozilla-EMEfree\"\n" +
+# "dist_version=\"1.0\"\nlinux-i686=true\nlinux-x86_64=true\n" +
+# " locales=\"ach af de en-US\"\nmac=true\nwin32=true\nwin64=true\n" +
+# "output_dir=\"%(platform)s-EME-free/%(locale)s\"\n\n" +
+# "# Upload params\nbucket=\"net-mozaws-prod-delivery-firefox\"\n" +
+# "upload_to_candidates=true\n"
+# }
+# }
+# ]
+# }
+# }
+# ]
+# }
+# }
+# }
+# }
+
+# Map platforms in repack.cfg into their equivalents in taskcluster
+TC_PLATFORM_PER_FTP = {
+ "linux-i686": "linux-shippable",
+ "linux-x86_64": "linux64-shippable",
+ "mac": "macosx64-shippable",
+ "win32": "win32-shippable",
+ "win64": "win64-shippable",
+ "win64-aarch64": "win64-aarch64-shippable",
+}
+
+TASKCLUSTER_PROXY_SECRET_ROOT = "http://taskcluster/secrets/v1/secret"
+
+LOCALES_FILE = os.path.join(
+ os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
+ "browser",
+ "locales",
+ "l10n-changesets.json",
+)
+
+# cache data at the module level
+partner_configs = {}
+
+
+def get_token(params):
+ """We use a Personal Access Token from Github to lookup partner config. No extra scopes are
+ needed on the token to read public repositories, but need the 'repo' scope to see private
+ repositories. This is not fine grained and also grants r/w access, but is revoked at the repo
+ level.
+ """
+
+ # Allow for local taskgraph debugging
+ if os.environ.get("GITHUB_API_TOKEN"):
+ return os.environ["GITHUB_API_TOKEN"]
+
+ # The 'usual' method - via taskClusterProxy for decision tasks
+ url = "{secret_root}/project/releng/gecko/build/level-{level}/partner-github-api".format(
+ secret_root=TASKCLUSTER_PROXY_SECRET_ROOT, **params
+ )
+ try:
+ resp = retry(
+ requests.get,
+ attempts=2,
+ sleeptime=10,
+ args=(url,),
+ kwargs={"timeout": 60, "headers": ""},
+ )
+ j = resp.json()
+ return j["secret"]["key"]
+ except (requests.ConnectionError, ValueError, KeyError):
+ raise RuntimeError("Could not get Github API token to lookup partner data")
+
+
+def query_api(query, token):
+ """Make a query with a Github auth header, returning the json"""
+ headers = {"Authorization": "bearer %s" % token}
+ r = requests.post(GITHUB_API_ENDPOINT, json={"query": query}, headers=headers)
+ r.raise_for_status()
+
+ j = r.json()
+ if "errors" in j:
+ raise RuntimeError("Github query error - %s", j["errors"])
+ return j
+
+
+def check_login(token):
+ log.debug("Checking we have a valid login")
+ query_api(LOGIN_QUERY, token)
+
+
+def get_repo_params(repo):
+ """Parse the organisation and repo name from an https or git url for a repo"""
+ if repo.startswith("https"):
+ # eg https://github.com/mozilla-partners/mozilla-EME-free
+ return repo.rsplit("/", 2)[-2:]
+ if repo.startswith("git@"):
+ # eg git@github.com:mozilla-partners/mailru.git
+ repo = repo.replace(".git", "")
+ return repo.split(":")[-1].split("/")
+
+
+def get_partners(manifestRepo, token):
+ """Given the url to a manifest repository, retrieve the default.xml and parse it into a
+ list of partner repos.
+ """
+ log.debug("Querying for manifest default.xml in %s", manifestRepo)
+ owner, repo = get_repo_params(manifestRepo)
+ query = MANIFEST_QUERY % {"owner": owner, "repo": repo, "file": "default.xml"}
+ raw_manifest = query_api(query, token)
+ log.debug("Raw manifest: %s", raw_manifest)
+ if not raw_manifest["data"]["repository"]:
+ raise RuntimeError(
+ "Couldn't load partner manifest at %s, insufficient permissions ?"
+ % manifestRepo
+ )
+ e = ET.fromstring(raw_manifest["data"]["repository"]["object"]["text"])
+
+ remotes = {}
+ partners = {}
+ for child in e:
+ if child.tag == "remote":
+ name = child.attrib["name"]
+ url = child.attrib["fetch"]
+ remotes[name] = url
+ log.debug("Added remote %s at %s", name, url)
+ elif child.tag == "project":
+ # we don't need to check any code repos
+ if "scripts" in child.attrib["path"]:
+ continue
+ owner, _ = get_repo_params(remotes[child.attrib["remote"]] + "_")
+ partner_url = {
+ "owner": owner,
+ "repo": child.attrib["name"],
+ "revision": child.attrib["revision"],
+ }
+ partners[child.attrib["name"]] = partner_url
+ log.debug(
+ "Added partner %s at revision %s"
+ % (partner_url["repo"], partner_url["revision"])
+ )
+ return partners
+
+
+def parse_config(data):
+ """Parse a single repack.cfg file into a python dictionary.
+ data is contents of the file, in "foo=bar\nbaz=buzz" style. We do some translation on
+ locales and platforms data, otherwise passthrough
+ """
+ ALLOWED_KEYS = (
+ "locales",
+ "platforms",
+ "upload_to_candidates",
+ "repack_stub_installer",
+ "publish_to_releases",
+ )
+ config = {"platforms": []}
+ for l in data.splitlines():
+ if "=" in l:
+ l = str(l)
+ key, value = l.split("=", 1)
+ value = value.strip("'\"").rstrip("'\"")
+ if key in TC_PLATFORM_PER_FTP.keys():
+ if value.lower() == "true":
+ config["platforms"].append(TC_PLATFORM_PER_FTP[key])
+ continue
+ if key not in ALLOWED_KEYS:
+ continue
+ if key == "locales":
+ # a list please
+ value = value.split(" ")
+ config[key] = value
+ return config
+
+
+def get_repack_configs(repackRepo, token):
+ """For a partner repository, retrieve all the repack.cfg files and parse them into a dict"""
+ log.debug("Querying for configs in %s", repackRepo)
+ query = REPACK_CFG_QUERY % repackRepo
+ raw_configs = query_api(query, token)
+ raw_configs = raw_configs["data"]["repository"]["object"]["entries"]
+
+ configs = {}
+ for sub_config in raw_configs:
+ name = sub_config["name"]
+ for file in sub_config["object"].get("entries", []):
+ if file["name"] != "repack.cfg":
+ continue
+ configs[name] = parse_config(file["object"]["text"])
+ return configs
+
+
+def get_attribution_config(manifestRepo, token):
+ log.debug("Querying for manifest attribution_config.yml in %s", manifestRepo)
+ owner, repo = get_repo_params(manifestRepo)
+ query = MANIFEST_QUERY % {
+ "owner": owner,
+ "repo": repo,
+ "file": "attribution_config.yml",
+ }
+ raw_manifest = query_api(query, token)
+ if not raw_manifest["data"]["repository"]:
+ raise RuntimeError(
+ "Couldn't load partner manifest at %s, insufficient permissions ?"
+ % manifestRepo
+ )
+ # no file has been set up, gracefully continue
+ if raw_manifest["data"]["repository"]["object"] is None:
+ log.debug("No attribution_config.yml file found")
+ return {}
+
+ return yaml.safe_load(raw_manifest["data"]["repository"]["object"]["text"])
+
+
+def get_partner_config_by_url(manifest_url, kind, token, partner_subset=None):
+ """Retrieve partner data starting from the manifest url, which points to a repository
+ containing a default.xml that is intended to be drive the Google tool 'repo'. It
+ descends into each partner repo to lookup and parse the repack.cfg file(s).
+
+ If partner_subset is a list of sub_config names only return data for those.
+
+ Supports caching data by kind to avoid repeated requests, relying on the related kinds for
+ partner repacking, signing, repackage, repackage signing all having the same kind prefix.
+ """
+ if not manifest_url:
+ raise RuntimeError(f"Manifest url for {kind} not defined")
+ if kind not in partner_configs:
+ log.info("Looking up data for %s from %s", kind, manifest_url)
+ check_login(token)
+ if kind == "release-partner-attribution":
+ partner_configs[kind] = get_attribution_config(manifest_url, token)
+ else:
+ partners = get_partners(manifest_url, token)
+
+ partner_configs[kind] = {}
+ for partner, partner_url in partners.items():
+ if partner_subset and partner not in partner_subset:
+ continue
+ partner_configs[kind][partner] = get_repack_configs(partner_url, token)
+
+ return partner_configs[kind]
+
+
+def check_if_partners_enabled(config, tasks):
+ if (
+ (
+ config.params["release_enable_partner_repack"]
+ and config.kind.startswith("release-partner-repack")
+ )
+ or (
+ config.params["release_enable_partner_attribution"]
+ and config.kind.startswith("release-partner-attribution")
+ )
+ or (
+ config.params["release_enable_emefree"]
+ and config.kind.startswith("release-eme-free-")
+ )
+ ):
+ yield from tasks
+
+
+def get_partner_config_by_kind(config, kind):
+ """Retrieve partner data starting from the manifest url, which points to a repository
+ containing a default.xml that is intended to be drive the Google tool 'repo'. It
+ descends into each partner repo to lookup and parse the repack.cfg file(s).
+
+ Supports caching data by kind to avoid repeated requests, relying on the related kinds for
+ partner repacking, signing, repackage, repackage signing all having the same kind prefix.
+ """
+ partner_subset = config.params["release_partners"]
+ partner_configs = config.params["release_partner_config"] or {}
+
+ # TODO eme-free should be a partner; we shouldn't care about per-kind
+ for k in partner_configs:
+ if kind.startswith(k):
+ kind_config = partner_configs[k]
+ break
+ else:
+ return {}
+ # if we're only interested in a subset of partners we remove the rest
+ if partner_subset:
+ if kind.startswith("release-partner-repack"):
+ # TODO - should be fatal to have an unknown partner in partner_subset
+ for partner in [p for p in kind_config.keys() if p not in partner_subset]:
+ del kind_config[partner]
+ elif kind.startswith("release-partner-attribution") and isinstance(
+ kind_config, dict
+ ):
+ all_configs = copy_task(kind_config.get("configs", []))
+ kind_config["configs"] = []
+ for this_config in all_configs:
+ if this_config["campaign"] in partner_subset:
+ kind_config["configs"].append(this_config)
+ return kind_config
+
+
+def _fix_subpartner_locales(orig_config, all_locales):
+ subpartner_config = copy_task(orig_config)
+ # Get an ordered list of subpartner locales that is a subset of all_locales
+ subpartner_config["locales"] = sorted(
+ list(set(orig_config["locales"]) & set(all_locales))
+ )
+ return subpartner_config
+
+
+def fix_partner_config(orig_config):
+ pc = {}
+ with open(LOCALES_FILE) as fh:
+ all_locales = list(json.load(fh).keys())
+ # l10n-changesets.json doesn't include en-US, but the repack list does
+ if "en-US" not in all_locales:
+ all_locales.append("en-US")
+ for kind, kind_config in orig_config.items():
+ if kind == "release-partner-attribution":
+ pc[kind] = {}
+ if kind_config:
+ pc[kind] = {"defaults": kind_config["defaults"]}
+ for config in kind_config["configs"]:
+ # Make sure our locale list is a subset of all_locales
+ pc[kind].setdefault("configs", []).append(
+ _fix_subpartner_locales(config, all_locales)
+ )
+ else:
+ for partner, partner_config in kind_config.items():
+ for subpartner, subpartner_config in partner_config.items():
+ # get rid of empty subpartner configs
+ if not subpartner_config:
+ continue
+ # Make sure our locale list is a subset of all_locales
+ pc.setdefault(kind, {}).setdefault(partner, {})[
+ subpartner
+ ] = _fix_subpartner_locales(subpartner_config, all_locales)
+ return pc
+
+
+# seems likely this exists elsewhere already
+def get_ftp_platform(platform):
+ if platform.startswith("win32"):
+ return "win32"
+ if platform.startswith("win64-aarch64"):
+ return "win64-aarch64"
+ if platform.startswith("win64"):
+ return "win64"
+ if platform.startswith("macosx"):
+ return "mac"
+ if platform.startswith("linux-"):
+ return "linux-i686"
+ if platform.startswith("linux64"):
+ return "linux-x86_64"
+ raise ValueError(f"Unimplemented platform {platform}")
+
+
+# Ugh
+def locales_per_build_platform(build_platform, locales):
+ if build_platform.startswith("mac"):
+ exclude = ["ja"]
+ else:
+ exclude = ["ja-JP-mac"]
+ return [locale for locale in locales if locale not in exclude]
+
+
+def get_partner_url_config(parameters, graph_config):
+ partner_url_config = copy_task(graph_config["partner-urls"])
+ substitutions = {
+ "release-product": parameters["release_product"],
+ "release-level": release_level(parameters["project"]),
+ "release-type": parameters["release_type"],
+ }
+ resolve_keyed_by(
+ partner_url_config,
+ "release-eme-free-repack",
+ "eme-free manifest_url",
+ **substitutions,
+ )
+ resolve_keyed_by(
+ partner_url_config,
+ "release-partner-repack",
+ "partner manifest url",
+ **substitutions,
+ )
+ resolve_keyed_by(
+ partner_url_config,
+ "release-partner-attribution",
+ "partner attribution url",
+ **substitutions,
+ )
+ return partner_url_config
+
+
+def get_repack_ids_by_platform(config, build_platform):
+ partner_config = get_partner_config_by_kind(config, config.kind)
+ combinations = []
+ for partner, subconfigs in partner_config.items():
+ for sub_config_name, sub_config in subconfigs.items():
+ if build_platform not in sub_config.get("platforms", []):
+ continue
+ locales = locales_per_build_platform(
+ build_platform, sub_config.get("locales", [])
+ )
+ for locale in locales:
+ combinations.append(f"{partner}/{sub_config_name}/{locale}")
+ return sorted(combinations)
+
+
+def get_partners_to_be_published(config):
+ # hardcoded kind because release-bouncer-aliases doesn't match otherwise
+ partner_config = get_partner_config_by_kind(config, "release-partner-repack")
+ partners = []
+ for partner, subconfigs in partner_config.items():
+ for sub_config_name, sub_config in subconfigs.items():
+ if sub_config.get("publish_to_releases"):
+ partners.append((partner, sub_config_name, sub_config["platforms"]))
+ return partners
+
+
+def apply_partner_priority(config, jobs):
+ priority = None
+ # Reduce the priority of the partner repack jobs because they don't block QE. Meanwhile
+ # leave EME-free jobs alone because they do, and they'll get the branch priority like the rest
+ # of the release. Only bother with this in production, not on staging releases on try.
+ # medium is the same as mozilla-central, see taskcluster/ci/config.yml. ie higher than
+ # integration branches because we don't want to wait a lot for the graph to be done, but
+ # for multiple releases the partner tasks always wait for non-partner.
+ if (
+ config.kind.startswith(
+ ("release-partner-repack", "release-partner-attribution")
+ )
+ and release_level(config.params["project"]) == "production"
+ ):
+ priority = "medium"
+ for job in jobs:
+ if priority:
+ job["priority"] = priority
+ yield job
+
+
+def generate_attribution_code(defaults, partner):
+ params = {
+ "medium": defaults["medium"],
+ "source": defaults["source"],
+ "campaign": partner["campaign"],
+ "content": partner["content"],
+ }
+ if partner.get("variation"):
+ params["variation"] = partner["variation"]
+ if partner.get("experiment"):
+ params["experiment"] = partner["experiment"]
+
+ code = urlencode(params)
+ return code
diff --git a/taskcluster/gecko_taskgraph/util/perfile.py b/taskcluster/gecko_taskgraph/util/perfile.py
new file mode 100644
index 0000000000..4e82d87dad
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/perfile.py
@@ -0,0 +1,104 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import itertools
+import json
+import logging
+import math
+
+import taskgraph
+from mozbuild.util import memoize
+from mozpack.path import match as mozpackmatch
+
+from gecko_taskgraph import files_changed
+
+from .. import GECKO
+
+logger = logging.getLogger(__name__)
+
+
+@memoize
+def perfile_number_of_chunks(is_try, try_task_config, head_repository, head_rev, type):
+ if taskgraph.fast and not is_try:
+ # When iterating on taskgraph changes, the exact number of chunks that
+ # test-verify runs usually isn't important, so skip it when going fast.
+ return 3
+ tests_per_chunk = 10.0
+ if type.startswith("test-coverage"):
+ tests_per_chunk = 30.0
+
+ if type.startswith("test-verify-wpt") or type.startswith("test-coverage-wpt"):
+ file_patterns = [
+ "testing/web-platform/tests/**",
+ "testing/web-platform/mozilla/tests/**",
+ ]
+ elif type.startswith("test-verify-gpu") or type.startswith("test-coverage-gpu"):
+ file_patterns = [
+ "**/*webgl*/**/test_*",
+ "**/dom/canvas/**/test_*",
+ "**/gfx/tests/**/test_*",
+ "**/devtools/canvasdebugger/**/browser_*",
+ "**/reftest*/**",
+ ]
+ elif type.startswith("test-verify") or type.startswith("test-coverage"):
+ file_patterns = [
+ "**/test_*",
+ "**/browser_*",
+ "**/crashtest*/**",
+ "js/src/tests/test/**",
+ "js/src/tests/non262/**",
+ "js/src/tests/test262/**",
+ ]
+ else:
+ # Returning 0 means no tests to run, this captures non test-verify tasks
+ return 1
+
+ changed_files = set()
+ if try_task_config:
+ suite_to_paths = json.loads(try_task_config)
+ specified_files = itertools.chain.from_iterable(suite_to_paths.values())
+ changed_files.update(specified_files)
+
+ if is_try:
+ changed_files.update(files_changed.get_locally_changed_files(GECKO))
+ else:
+ changed_files.update(files_changed.get_changed_files(head_repository, head_rev))
+
+ test_count = 0
+ for pattern in file_patterns:
+ for path in changed_files:
+ # TODO: consider running tests if a manifest changes
+ if path.endswith(".list") or path.endswith(".ini"):
+ continue
+ if path.endswith("^headers^"):
+ continue
+
+ if mozpackmatch(path, pattern):
+ gpu = False
+ if type == "test-verify-e10s" or type == "test-coverage-e10s":
+ # file_patterns for test-verify will pick up some gpu tests, lets ignore
+ # in the case of reftest, we will not have any in the regular case
+ gpu_dirs = [
+ "dom/canvas",
+ "gfx/tests",
+ "devtools/canvasdebugger",
+ "webgl",
+ ]
+ for gdir in gpu_dirs:
+ if len(path.split(gdir)) > 1:
+ gpu = True
+
+ if not gpu:
+ test_count += 1
+
+ chunks = test_count / tests_per_chunk
+ chunks = int(math.ceil(chunks))
+
+ # Never return 0 chunks on try, so that per-file tests can be pushed to try with
+ # an explicit path, and also so "empty" runs can be checked on try.
+ if is_try and chunks == 0:
+ chunks = 1
+
+ return chunks
diff --git a/taskcluster/gecko_taskgraph/util/platforms.py b/taskcluster/gecko_taskgraph/util/platforms.py
new file mode 100644
index 0000000000..2c423223fe
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/platforms.py
@@ -0,0 +1,58 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import re
+
+from taskgraph.util.attributes import keymatch
+
+# platform family is extracted from build platform by taking the alphabetic prefix
+# and then translating win -> windows
+_platform_re = re.compile(r"^[a-z]*")
+_renames = {"win": "windows"}
+
+
+_archive_formats = {
+ "linux": ".tar.bz2",
+ "macosx": ".tar.gz",
+ "windows": ".zip",
+}
+
+_executable_extension = {
+ "linux": "",
+ "macosx": "",
+ "windows": ".exe",
+}
+
+_architectures = {
+ r"linux\b.*": "x86",
+ r"linux64\b.*": "x86_64",
+ r"macosx64\b.*": "macos-x86_64-aarch64",
+ r"win32\b.*": "x86",
+ r"win64\b(?!-aarch64).*": "x86_64",
+ r"win64-aarch64\b.*": "aarch64",
+}
+
+
+def platform_family(build_platform):
+ """Given a build platform, return the platform family (linux, macosx, etc.)"""
+ family = _platform_re.match(build_platform).group(0)
+ return _renames.get(family, family)
+
+
+def archive_format(build_platform):
+ """Given a build platform, return the archive format used on the platform."""
+ return _archive_formats[platform_family(build_platform)]
+
+
+def executable_extension(build_platform):
+ """Given a build platform, return the executable extension used on the platform."""
+ return _executable_extension[platform_family(build_platform)]
+
+
+def architecture(build_platform):
+ matches = keymatch(_architectures, build_platform)
+ if len(matches) == 1:
+ return matches[0]
+ raise Exception(f"Could not determine architecture of platform `{build_platform}`.")
diff --git a/taskcluster/gecko_taskgraph/util/scriptworker.py b/taskcluster/gecko_taskgraph/util/scriptworker.py
new file mode 100644
index 0000000000..0d2e4b805b
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/scriptworker.py
@@ -0,0 +1,865 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""Make scriptworker.cot.verify more user friendly by making scopes dynamic.
+
+Scriptworker uses certain scopes to determine which sets of credentials to use.
+Certain scopes are restricted by branch in chain of trust verification, and are
+checked again at the script level. This file provides functions to adjust
+these scopes automatically by project; this makes pushing to try, forking a
+project branch, and merge day uplifts more user friendly.
+
+In the future, we may adjust scopes by other settings as well, e.g. different
+scopes for `push-to-candidates` rather than `push-to-releases`, even if both
+happen on mozilla-beta and mozilla-release.
+
+Additional configuration is found in the :ref:`graph config <taskgraph-graph-config>`.
+"""
+import functools
+import itertools
+import json
+import os
+from datetime import datetime
+
+import jsone
+from mozbuild.util import memoize
+from taskgraph.util.schema import resolve_keyed_by
+from taskgraph.util.taskcluster import get_artifact_prefix
+from taskgraph.util.yaml import load_yaml
+
+from gecko_taskgraph.util.copy_task import copy_task
+
+# constants {{{1
+"""Map signing scope aliases to sets of projects.
+
+Currently m-c and DevEdition on m-b use nightly signing; Beta on m-b and m-r
+use release signing. These data structures aren't set-up to handle different
+scopes on the same repo, so we use a different set of them for DevEdition, and
+callers are responsible for using the correct one (by calling the appropriate
+helper below). More context on this in https://bugzilla.mozilla.org/show_bug.cgi?id=1358601.
+
+We will need to add esr support at some point. Eventually we want to add
+nuance so certain m-b and m-r tasks use dep or nightly signing, and we only
+release sign when we have a signed-off set of candidate builds. This current
+approach works for now, though.
+
+This is a list of list-pairs, for ordering.
+"""
+SIGNING_SCOPE_ALIAS_TO_PROJECT = [
+ [
+ "all-nightly-branches",
+ {
+ "mozilla-central",
+ "comm-central",
+ # bug 1845368: pine is a permanent project branch used for testing
+ # nightly updates
+ "pine",
+ # bug 1877483: larch has similar needs for nightlies
+ "larch",
+ },
+ ],
+ [
+ "all-release-branches",
+ {
+ "mozilla-beta",
+ "mozilla-release",
+ "mozilla-esr115",
+ "comm-beta",
+ "comm-release",
+ "comm-esr115",
+ },
+ ],
+]
+
+"""Map the signing scope aliases to the actual scopes.
+"""
+SIGNING_CERT_SCOPES = {
+ "all-release-branches": "signing:cert:release-signing",
+ "all-nightly-branches": "signing:cert:nightly-signing",
+ "default": "signing:cert:dep-signing",
+}
+
+DEVEDITION_SIGNING_SCOPE_ALIAS_TO_PROJECT = [
+ [
+ "beta",
+ {
+ "mozilla-beta",
+ },
+ ]
+]
+
+DEVEDITION_SIGNING_CERT_SCOPES = {
+ "beta": "signing:cert:nightly-signing",
+ "default": "signing:cert:dep-signing",
+}
+
+"""Map beetmover scope aliases to sets of projects.
+"""
+BEETMOVER_SCOPE_ALIAS_TO_PROJECT = [
+ [
+ "all-nightly-branches",
+ {
+ "mozilla-central",
+ "comm-central",
+ # bug 1845368: pine is a permanent project branch used for testing
+ # nightly updates
+ "pine",
+ # bug 1877483: larch has similar needs for nightlies
+ "larch",
+ },
+ ],
+ [
+ "all-release-branches",
+ {
+ "mozilla-beta",
+ "mozilla-release",
+ "mozilla-esr115",
+ "comm-beta",
+ "comm-release",
+ "comm-esr115",
+ },
+ ],
+]
+
+"""Map the beetmover scope aliases to the actual scopes.
+"""
+BEETMOVER_BUCKET_SCOPES = {
+ "all-release-branches": "beetmover:bucket:release",
+ "all-nightly-branches": "beetmover:bucket:nightly",
+ "default": "beetmover:bucket:dep",
+}
+
+"""Map the beetmover scope aliases to the actual scopes.
+These are the scopes needed to import artifacts into the product delivery APT repos.
+"""
+BEETMOVER_APT_REPO_SCOPES = {
+ "all-release-branches": "beetmover:apt-repo:release",
+ "all-nightly-branches": "beetmover:apt-repo:nightly",
+ "default": "beetmover:apt-repo:dep",
+}
+
+"""Map the beetmover tasks aliases to the actual action scopes.
+"""
+BEETMOVER_ACTION_SCOPES = {
+ "nightly": "beetmover:action:push-to-nightly",
+ # bug 1845368: pine is a permanent project branch used for testing
+ # nightly updates
+ "nightly-pine": "beetmover:action:push-to-nightly",
+ # bug 1877483: larch has similar needs for nightlies
+ "nightly-larch": "beetmover:action:push-to-nightly",
+ "default": "beetmover:action:push-to-candidates",
+}
+
+"""Map the beetmover tasks aliases to the actual action scopes.
+The action scopes are generic across different repo types.
+"""
+BEETMOVER_REPO_ACTION_SCOPES = {
+ "default": "beetmover:action:import-from-gcs-to-artifact-registry",
+}
+
+"""Known balrog actions."""
+BALROG_ACTIONS = (
+ "submit-locale",
+ "submit-toplevel",
+ "schedule",
+ "v2-submit-locale",
+ "v2-submit-toplevel",
+)
+
+"""Map balrog scope aliases to sets of projects.
+
+This is a list of list-pairs, for ordering.
+"""
+BALROG_SCOPE_ALIAS_TO_PROJECT = [
+ [
+ "nightly",
+ {
+ "mozilla-central",
+ "comm-central",
+ # bug 1845368: pine is a permanent project branch used for testing
+ # nightly updates
+ "pine",
+ # bug 1877483: larch has similar needs for nightlies
+ "larch",
+ },
+ ],
+ [
+ "beta",
+ {
+ "mozilla-beta",
+ "comm-beta",
+ },
+ ],
+ [
+ "release",
+ {
+ "mozilla-release",
+ "comm-release",
+ },
+ ],
+ [
+ "esr115",
+ {
+ "mozilla-esr115",
+ "comm-esr115",
+ },
+ ],
+]
+
+"""Map the balrog scope aliases to the actual scopes.
+"""
+BALROG_SERVER_SCOPES = {
+ "nightly": "balrog:server:nightly",
+ "aurora": "balrog:server:aurora",
+ "beta": "balrog:server:beta",
+ "release": "balrog:server:release",
+ "esr115": "balrog:server:esr",
+ "default": "balrog:server:dep",
+}
+
+
+""" The list of the release promotion phases which we send notifications for
+"""
+RELEASE_NOTIFICATION_PHASES = ("promote", "push", "ship")
+
+
+def add_scope_prefix(config, scope):
+ """
+ Prepends the scriptworker scope prefix from the :ref:`graph config
+ <taskgraph-graph-config>`.
+
+ Args:
+ config (TransformConfig): The configuration for the kind being transformed.
+ scope (string): The suffix of the scope
+
+ Returns:
+ string: the scope to use.
+ """
+ return "{prefix}:{scope}".format(
+ prefix=config.graph_config["scriptworker"]["scope-prefix"],
+ scope=scope,
+ )
+
+
+def with_scope_prefix(f):
+ """
+ Wraps a function, calling :py:func:`add_scope_prefix` on the result of
+ calling the wrapped function.
+
+ Args:
+ f (callable): A function that takes a ``config`` and some keyword
+ arguments, and returns a scope suffix.
+
+ Returns:
+ callable: the wrapped function
+ """
+
+ @functools.wraps(f)
+ def wrapper(config, **kwargs):
+ scope_or_scopes = f(config, **kwargs)
+ if isinstance(scope_or_scopes, list):
+ return map(functools.partial(add_scope_prefix, config), scope_or_scopes)
+ return add_scope_prefix(config, scope_or_scopes)
+
+ return wrapper
+
+
+# scope functions {{{1
+@with_scope_prefix
+def get_scope_from_project(config, alias_to_project_map, alias_to_scope_map):
+ """Determine the restricted scope from `config.params['project']`.
+
+ Args:
+ config (TransformConfig): The configuration for the kind being transformed.
+ alias_to_project_map (list of lists): each list pair contains the
+ alias and the set of projects that match. This is ordered.
+ alias_to_scope_map (dict): the alias alias to scope
+
+ Returns:
+ string: the scope to use.
+ """
+ for alias, projects in alias_to_project_map:
+ if config.params["project"] in projects and alias in alias_to_scope_map:
+ return alias_to_scope_map[alias]
+ return alias_to_scope_map["default"]
+
+
+@with_scope_prefix
+def get_scope_from_release_type(config, release_type_to_scope_map):
+ """Determine the restricted scope from `config.params['target_tasks_method']`.
+
+ Args:
+ config (TransformConfig): The configuration for the kind being transformed.
+ release_type_to_scope_map (dict): the maps release types to scopes
+
+ Returns:
+ string: the scope to use.
+ """
+ return release_type_to_scope_map.get(
+ config.params["release_type"], release_type_to_scope_map["default"]
+ )
+
+
+def get_phase_from_target_method(config, alias_to_tasks_map, alias_to_phase_map):
+ """Determine the phase from `config.params['target_tasks_method']`.
+
+ Args:
+ config (TransformConfig): The configuration for the kind being transformed.
+ alias_to_tasks_map (list of lists): each list pair contains the
+ alias and the set of target methods that match. This is ordered.
+ alias_to_phase_map (dict): the alias to phase map
+
+ Returns:
+ string: the phase to use.
+ """
+ for alias, tasks in alias_to_tasks_map:
+ if (
+ config.params["target_tasks_method"] in tasks
+ and alias in alias_to_phase_map
+ ):
+ return alias_to_phase_map[alias]
+ return alias_to_phase_map["default"]
+
+
+get_signing_cert_scope = functools.partial(
+ get_scope_from_project,
+ alias_to_project_map=SIGNING_SCOPE_ALIAS_TO_PROJECT,
+ alias_to_scope_map=SIGNING_CERT_SCOPES,
+)
+
+get_devedition_signing_cert_scope = functools.partial(
+ get_scope_from_project,
+ alias_to_project_map=DEVEDITION_SIGNING_SCOPE_ALIAS_TO_PROJECT,
+ alias_to_scope_map=DEVEDITION_SIGNING_CERT_SCOPES,
+)
+
+get_beetmover_bucket_scope = functools.partial(
+ get_scope_from_project,
+ alias_to_project_map=BEETMOVER_SCOPE_ALIAS_TO_PROJECT,
+ alias_to_scope_map=BEETMOVER_BUCKET_SCOPES,
+)
+
+get_beetmover_apt_repo_scope = functools.partial(
+ get_scope_from_project,
+ alias_to_project_map=BEETMOVER_SCOPE_ALIAS_TO_PROJECT,
+ alias_to_scope_map=BEETMOVER_APT_REPO_SCOPES,
+)
+
+get_beetmover_repo_action_scope = functools.partial(
+ get_scope_from_release_type,
+ release_type_to_scope_map=BEETMOVER_REPO_ACTION_SCOPES,
+)
+
+get_beetmover_action_scope = functools.partial(
+ get_scope_from_release_type,
+ release_type_to_scope_map=BEETMOVER_ACTION_SCOPES,
+)
+
+get_balrog_server_scope = functools.partial(
+ get_scope_from_project,
+ alias_to_project_map=BALROG_SCOPE_ALIAS_TO_PROJECT,
+ alias_to_scope_map=BALROG_SERVER_SCOPES,
+)
+
+cached_load_yaml = memoize(load_yaml)
+
+
+# release_config {{{1
+def get_release_config(config):
+ """Get the build number and version for a release task.
+
+ Currently only applies to beetmover tasks.
+
+ Args:
+ config (TransformConfig): The configuration for the kind being transformed.
+
+ Returns:
+ dict: containing both `build_number` and `version`. This can be used to
+ update `task.payload`.
+ """
+ release_config = {}
+
+ partial_updates = os.environ.get("PARTIAL_UPDATES", "")
+ if partial_updates != "" and config.kind in (
+ "release-bouncer-sub",
+ "release-bouncer-check",
+ "release-update-verify-config",
+ "release-secondary-update-verify-config",
+ "release-balrog-submit-toplevel",
+ "release-secondary-balrog-submit-toplevel",
+ ):
+ partial_updates = json.loads(partial_updates)
+ release_config["partial_versions"] = ", ".join(
+ [
+ "{}build{}".format(v, info["buildNumber"])
+ for v, info in partial_updates.items()
+ ]
+ )
+ if release_config["partial_versions"] == "{}":
+ del release_config["partial_versions"]
+
+ release_config["version"] = config.params["version"]
+ release_config["appVersion"] = config.params["app_version"]
+
+ release_config["next_version"] = config.params["next_version"]
+ release_config["build_number"] = config.params["build_number"]
+ return release_config
+
+
+def get_signing_cert_scope_per_platform(build_platform, is_shippable, config):
+ if "devedition" in build_platform:
+ return get_devedition_signing_cert_scope(config)
+ if is_shippable:
+ return get_signing_cert_scope(config)
+ return add_scope_prefix(config, "signing:cert:dep-signing")
+
+
+# generate_beetmover_upstream_artifacts {{{1
+def generate_beetmover_upstream_artifacts(
+ config, job, platform, locale=None, dependencies=None, **kwargs
+):
+ """Generate the upstream artifacts for beetmover, using the artifact map.
+
+ Currently only applies to beetmover tasks.
+
+ Args:
+ job (dict): The current job being generated
+ dependencies (list): A list of the job's dependency labels.
+ platform (str): The current build platform
+ locale (str): The current locale being beetmoved.
+
+ Returns:
+ list: A list of dictionaries conforming to the upstream_artifacts spec.
+ """
+ base_artifact_prefix = get_artifact_prefix(job)
+ resolve_keyed_by(
+ job,
+ "attributes.artifact_map",
+ "artifact map",
+ **{
+ "release-type": config.params["release_type"],
+ "platform": platform,
+ },
+ )
+ map_config = copy_task(cached_load_yaml(job["attributes"]["artifact_map"]))
+ upstream_artifacts = list()
+
+ if not locale:
+ locales = map_config["default_locales"]
+ elif isinstance(locale, list):
+ locales = locale
+ else:
+ locales = [locale]
+
+ if not dependencies:
+ if job.get("dependencies"):
+ dependencies = job["dependencies"].keys()
+ else:
+ raise Exception(f"Unsupported type of dependency. Got job: {job}")
+
+ for locale, dep in itertools.product(locales, dependencies):
+ paths = list()
+
+ for filename in map_config["mapping"]:
+ resolve_keyed_by(
+ map_config["mapping"][filename],
+ "from",
+ f"beetmover filename {filename}",
+ platform=platform,
+ )
+ if dep not in map_config["mapping"][filename]["from"]:
+ continue
+ if locale != "en-US" and not map_config["mapping"][filename]["all_locales"]:
+ continue
+ if (
+ "only_for_platforms" in map_config["mapping"][filename]
+ and platform
+ not in map_config["mapping"][filename]["only_for_platforms"]
+ ):
+ continue
+ if (
+ "not_for_platforms" in map_config["mapping"][filename]
+ and platform in map_config["mapping"][filename]["not_for_platforms"]
+ ):
+ continue
+ if "partials_only" in map_config["mapping"][filename]:
+ continue
+ # The next time we look at this file it might be a different locale.
+ file_config = copy_task(map_config["mapping"][filename])
+ resolve_keyed_by(
+ file_config,
+ "source_path_modifier",
+ "source path modifier",
+ locale=locale,
+ )
+
+ kwargs["locale"] = locale
+
+ paths.append(
+ os.path.join(
+ base_artifact_prefix,
+ jsone.render(file_config["source_path_modifier"], kwargs),
+ jsone.render(filename, kwargs),
+ )
+ )
+
+ if (
+ job.get("dependencies")
+ and getattr(job["dependencies"][dep], "attributes", None)
+ and job["dependencies"][dep].attributes.get("release_artifacts")
+ ):
+ paths = [
+ path
+ for path in paths
+ if path in job["dependencies"][dep].attributes["release_artifacts"]
+ ]
+
+ if not paths:
+ continue
+
+ upstream_artifacts.append(
+ {
+ "taskId": {"task-reference": f"<{dep}>"},
+ "taskType": map_config["tasktype_map"].get(dep),
+ "paths": sorted(paths),
+ "locale": locale,
+ }
+ )
+
+ upstream_artifacts.sort(key=lambda u: u["paths"])
+ return upstream_artifacts
+
+
+def generate_artifact_registry_gcs_sources(dep):
+ gcs_sources = []
+ locale = dep.attributes.get("locale")
+ if not locale:
+ repackage_deb_reference = "<repackage-deb>"
+ repackage_deb_artifact = "public/build/target.deb"
+ else:
+ repackage_deb_reference = "<repackage-deb-l10n>"
+ repackage_deb_artifact = f"public/build/{locale}/target.langpack.deb"
+ for config in dep.task["payload"]["artifactMap"]:
+ if (
+ config["taskId"]["task-reference"] == repackage_deb_reference
+ and repackage_deb_artifact in config["paths"]
+ ):
+ gcs_sources.append(
+ config["paths"][repackage_deb_artifact]["destinations"][0]
+ )
+ return gcs_sources
+
+
+# generate_beetmover_artifact_map {{{1
+def generate_beetmover_artifact_map(config, job, **kwargs):
+ """Generate the beetmover artifact map.
+
+ Currently only applies to beetmover tasks.
+
+ Args:
+ config (): Current taskgraph configuration.
+ job (dict): The current job being generated
+ Common kwargs:
+ platform (str): The current build platform
+ locale (str): The current locale being beetmoved.
+
+ Returns:
+ list: A list of dictionaries containing source->destination
+ maps for beetmover.
+ """
+ platform = kwargs.get("platform", "")
+ resolve_keyed_by(
+ job,
+ "attributes.artifact_map",
+ job["label"],
+ **{
+ "release-type": config.params["release_type"],
+ "platform": platform,
+ },
+ )
+ map_config = copy_task(cached_load_yaml(job["attributes"]["artifact_map"]))
+ base_artifact_prefix = map_config.get(
+ "base_artifact_prefix", get_artifact_prefix(job)
+ )
+
+ artifacts = list()
+
+ dependencies = job["dependencies"].keys()
+
+ if kwargs.get("locale"):
+ if isinstance(kwargs["locale"], list):
+ locales = kwargs["locale"]
+ else:
+ locales = [kwargs["locale"]]
+ else:
+ locales = map_config["default_locales"]
+
+ resolve_keyed_by(map_config, "s3_bucket_paths", job["label"], platform=platform)
+
+ for locale, dep in sorted(itertools.product(locales, dependencies)):
+ paths = dict()
+ for filename in map_config["mapping"]:
+ # Relevancy checks
+ resolve_keyed_by(
+ map_config["mapping"][filename], "from", "blah", platform=platform
+ )
+ if dep not in map_config["mapping"][filename]["from"]:
+ # We don't get this file from this dependency.
+ continue
+ if locale != "en-US" and not map_config["mapping"][filename]["all_locales"]:
+ # This locale either doesn't produce or shouldn't upload this file.
+ continue
+ if (
+ "only_for_platforms" in map_config["mapping"][filename]
+ and platform
+ not in map_config["mapping"][filename]["only_for_platforms"]
+ ):
+ # This platform either doesn't produce or shouldn't upload this file.
+ continue
+ if (
+ "not_for_platforms" in map_config["mapping"][filename]
+ and platform in map_config["mapping"][filename]["not_for_platforms"]
+ ):
+ # This platform either doesn't produce or shouldn't upload this file.
+ continue
+ if "partials_only" in map_config["mapping"][filename]:
+ continue
+
+ # copy_task because the next time we look at this file the locale will differ.
+ file_config = copy_task(map_config["mapping"][filename])
+
+ for field in [
+ "destinations",
+ "locale_prefix",
+ "source_path_modifier",
+ "update_balrog_manifest",
+ "pretty_name",
+ "checksums_path",
+ ]:
+ resolve_keyed_by(
+ file_config, field, job["label"], locale=locale, platform=platform
+ )
+
+ # This format string should ideally be in the configuration file,
+ # but this would mean keeping variable names in sync between code + config.
+ destinations = [
+ "{s3_bucket_path}/{dest_path}/{locale_prefix}{filename}".format(
+ s3_bucket_path=bucket_path,
+ dest_path=dest_path,
+ locale_prefix=file_config["locale_prefix"],
+ filename=file_config.get("pretty_name", filename),
+ )
+ for dest_path, bucket_path in itertools.product(
+ file_config["destinations"], map_config["s3_bucket_paths"]
+ )
+ ]
+ # Creating map entries
+ # Key must be artifact path, to avoid trampling duplicates, such
+ # as public/build/target.apk and public/build/en-US/target.apk
+ key = os.path.join(
+ base_artifact_prefix,
+ file_config["source_path_modifier"],
+ filename,
+ )
+
+ paths[key] = {
+ "destinations": destinations,
+ }
+ if file_config.get("checksums_path"):
+ paths[key]["checksums_path"] = file_config["checksums_path"]
+
+ # optional flag: balrog manifest
+ if file_config.get("update_balrog_manifest"):
+ paths[key]["update_balrog_manifest"] = True
+ if file_config.get("balrog_format"):
+ paths[key]["balrog_format"] = file_config["balrog_format"]
+
+ if not paths:
+ # No files for this dependency/locale combination.
+ continue
+
+ # Render all variables for the artifact map
+ platforms = copy_task(map_config.get("platform_names", {}))
+ if platform:
+ for key in platforms.keys():
+ resolve_keyed_by(platforms, key, job["label"], platform=platform)
+
+ upload_date = datetime.fromtimestamp(config.params["build_date"])
+
+ kwargs.update(
+ {
+ "locale": locale,
+ "version": config.params["version"],
+ "branch": config.params["project"],
+ "build_number": config.params["build_number"],
+ "year": upload_date.year,
+ "month": upload_date.strftime("%m"), # zero-pad the month
+ "upload_date": upload_date.strftime("%Y-%m-%d-%H-%M-%S"),
+ }
+ )
+ kwargs.update(**platforms)
+ paths = jsone.render(paths, kwargs)
+ artifacts.append(
+ {
+ "taskId": {"task-reference": f"<{dep}>"},
+ "locale": locale,
+ "paths": paths,
+ }
+ )
+
+ return artifacts
+
+
+# generate_beetmover_partials_artifact_map {{{1
+def generate_beetmover_partials_artifact_map(config, job, partials_info, **kwargs):
+ """Generate the beetmover partials artifact map.
+
+ Currently only applies to beetmover tasks.
+
+ Args:
+ config (): Current taskgraph configuration.
+ job (dict): The current job being generated
+ partials_info (dict): Current partials and information about them in a dict
+ Common kwargs:
+ platform (str): The current build platform
+ locale (str): The current locale being beetmoved.
+
+ Returns:
+ list: A list of dictionaries containing source->destination
+ maps for beetmover.
+ """
+ platform = kwargs.get("platform", "")
+ resolve_keyed_by(
+ job,
+ "attributes.artifact_map",
+ "artifact map",
+ **{
+ "release-type": config.params["release_type"],
+ "platform": platform,
+ },
+ )
+ map_config = copy_task(cached_load_yaml(job["attributes"]["artifact_map"]))
+ base_artifact_prefix = map_config.get(
+ "base_artifact_prefix", get_artifact_prefix(job)
+ )
+
+ artifacts = list()
+ dependencies = job["dependencies"].keys()
+
+ if kwargs.get("locale"):
+ locales = [kwargs["locale"]]
+ else:
+ locales = map_config["default_locales"]
+
+ resolve_keyed_by(
+ map_config, "s3_bucket_paths", "s3_bucket_paths", platform=platform
+ )
+
+ platforms = copy_task(map_config.get("platform_names", {}))
+ if platform:
+ for key in platforms.keys():
+ resolve_keyed_by(platforms, key, key, platform=platform)
+ upload_date = datetime.fromtimestamp(config.params["build_date"])
+
+ for locale, dep in itertools.product(locales, dependencies):
+ paths = dict()
+ for filename in map_config["mapping"]:
+ # Relevancy checks
+ if dep not in map_config["mapping"][filename]["from"]:
+ # We don't get this file from this dependency.
+ continue
+ if locale != "en-US" and not map_config["mapping"][filename]["all_locales"]:
+ # This locale either doesn't produce or shouldn't upload this file.
+ continue
+ if "partials_only" not in map_config["mapping"][filename]:
+ continue
+ # copy_task because the next time we look at this file the locale will differ.
+ file_config = copy_task(map_config["mapping"][filename])
+
+ for field in [
+ "destinations",
+ "locale_prefix",
+ "source_path_modifier",
+ "update_balrog_manifest",
+ "from_buildid",
+ "pretty_name",
+ "checksums_path",
+ ]:
+ resolve_keyed_by(
+ file_config, field, field, locale=locale, platform=platform
+ )
+
+ # This format string should ideally be in the configuration file,
+ # but this would mean keeping variable names in sync between code + config.
+ destinations = [
+ "{s3_bucket_path}/{dest_path}/{locale_prefix}{filename}".format(
+ s3_bucket_path=bucket_path,
+ dest_path=dest_path,
+ locale_prefix=file_config["locale_prefix"],
+ filename=file_config.get("pretty_name", filename),
+ )
+ for dest_path, bucket_path in itertools.product(
+ file_config["destinations"], map_config["s3_bucket_paths"]
+ )
+ ]
+ # Creating map entries
+ # Key must be artifact path, to avoid trampling duplicates, such
+ # as public/build/target.apk and public/build/en-US/target.apk
+ key = os.path.join(
+ base_artifact_prefix,
+ file_config["source_path_modifier"],
+ filename,
+ )
+ partials_paths = {}
+ for pname, info in partials_info.items():
+ partials_paths[key] = {
+ "destinations": destinations,
+ }
+ if file_config.get("checksums_path"):
+ partials_paths[key]["checksums_path"] = file_config[
+ "checksums_path"
+ ]
+
+ # optional flag: balrog manifest
+ if file_config.get("update_balrog_manifest"):
+ partials_paths[key]["update_balrog_manifest"] = True
+ if file_config.get("balrog_format"):
+ partials_paths[key]["balrog_format"] = file_config[
+ "balrog_format"
+ ]
+ # optional flag: from_buildid
+ if file_config.get("from_buildid"):
+ partials_paths[key]["from_buildid"] = file_config["from_buildid"]
+
+ # render buildid
+ kwargs.update(
+ {
+ "partial": pname,
+ "from_buildid": info["buildid"],
+ "previous_version": info.get("previousVersion"),
+ "buildid": str(config.params["moz_build_date"]),
+ "locale": locale,
+ "version": config.params["version"],
+ "branch": config.params["project"],
+ "build_number": config.params["build_number"],
+ "year": upload_date.year,
+ "month": upload_date.strftime("%m"), # zero-pad the month
+ "upload_date": upload_date.strftime("%Y-%m-%d-%H-%M-%S"),
+ }
+ )
+ kwargs.update(**platforms)
+ paths.update(jsone.render(partials_paths, kwargs))
+
+ if not paths:
+ continue
+
+ artifacts.append(
+ {
+ "taskId": {"task-reference": f"<{dep}>"},
+ "locale": locale,
+ "paths": paths,
+ }
+ )
+
+ artifacts.sort(key=lambda a: sorted(a["paths"].items()))
+ return artifacts
diff --git a/taskcluster/gecko_taskgraph/util/signed_artifacts.py b/taskcluster/gecko_taskgraph/util/signed_artifacts.py
new file mode 100644
index 0000000000..2467ff8046
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/signed_artifacts.py
@@ -0,0 +1,198 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Defines artifacts to sign before repackage.
+"""
+
+from taskgraph.util.taskcluster import get_artifact_path
+
+from gecko_taskgraph.util.declarative_artifacts import get_geckoview_upstream_artifacts
+
+LANGPACK_SIGN_PLATFORMS = { # set
+ "linux64-shippable",
+ "linux64-devedition",
+ "macosx64-shippable",
+ "macosx64-devedition",
+}
+
+
+def is_partner_kind(kind):
+ if kind and kind.startswith(("release-partner", "release-eme-free")):
+ return True
+
+
+def is_notarization_kind(kind):
+ if kind and "notarization" in kind:
+ return True
+
+
+def is_mac_signing_king(kind):
+ return kind and "mac-signing" in kind
+
+
+def generate_specifications_of_artifacts_to_sign(
+ config, job, keep_locale_template=True, kind=None, dep_kind=None
+):
+ build_platform = job["attributes"].get("build_platform")
+ use_stub = job["attributes"].get("stub-installer")
+ # Get locales to know if we want to sign ja-JP-mac langpack
+ locales = job["attributes"].get("chunk_locales", [])
+ if kind == "release-source-signing":
+ artifacts_specifications = [
+ {
+ "artifacts": [get_artifact_path(job, "source.tar.xz")],
+ "formats": ["autograph_gpg"],
+ }
+ ]
+ elif "android" in build_platform:
+ artifacts_specifications = [
+ {
+ "artifacts": get_geckoview_artifacts_to_sign(config, job),
+ "formats": ["autograph_gpg"],
+ }
+ ]
+ # XXX: Mars aren't signed here (on any platform) because internals will be
+ # signed at after this stage of the release
+ elif "macosx" in build_platform:
+ langpack_formats = []
+ if is_notarization_kind(config.kind):
+ formats = ["apple_notarization"]
+ artifacts_specifications = [
+ {
+ "artifacts": [
+ get_artifact_path(job, "{locale}/target.tar.gz"),
+ get_artifact_path(job, "{locale}/target.pkg"),
+ ],
+ "formats": formats,
+ }
+ ]
+ else:
+ # This task is mac-signing
+ if is_partner_kind(kind):
+ extension = "tar.gz"
+ else:
+ extension = "dmg"
+ artifacts_specifications = [
+ {
+ "artifacts": [
+ get_artifact_path(job, f"{{locale}}/target.{extension}")
+ ],
+ "formats": ["macapp", "autograph_widevine", "autograph_omnija"],
+ }
+ ]
+ langpack_formats = ["autograph_langpack"]
+
+ if "ja-JP-mac" in locales and build_platform in LANGPACK_SIGN_PLATFORMS:
+ artifacts_specifications += [
+ {
+ "artifacts": [
+ get_artifact_path(job, "ja-JP-mac/target.langpack.xpi")
+ ],
+ "formats": langpack_formats,
+ }
+ ]
+ elif "win" in build_platform:
+ artifacts_specifications = [
+ {
+ "artifacts": [
+ get_artifact_path(job, "{locale}/setup.exe"),
+ ],
+ "formats": ["autograph_authenticode_sha2"],
+ },
+ {
+ "artifacts": [
+ get_artifact_path(job, "{locale}/target.zip"),
+ ],
+ "formats": [
+ "autograph_authenticode_sha2",
+ "autograph_widevine",
+ "autograph_omnija",
+ ],
+ },
+ ]
+
+ if use_stub:
+ artifacts_specifications[0]["artifacts"] += [
+ get_artifact_path(job, "{locale}/setup-stub.exe")
+ ]
+ elif "linux" in build_platform:
+ artifacts_specifications = [
+ {
+ "artifacts": [get_artifact_path(job, "{locale}/target.tar.bz2")],
+ "formats": ["autograph_gpg", "autograph_widevine", "autograph_omnija"],
+ }
+ ]
+ if build_platform in LANGPACK_SIGN_PLATFORMS:
+ artifacts_specifications += [
+ {
+ "artifacts": [
+ get_artifact_path(job, "{locale}/target.langpack.xpi")
+ ],
+ "formats": ["autograph_langpack"],
+ }
+ ]
+ else:
+ raise Exception("Platform not implemented for signing")
+
+ if not keep_locale_template:
+ artifacts_specifications = _strip_locale_template(artifacts_specifications)
+
+ if is_partner_kind(kind):
+ artifacts_specifications = _strip_widevine_for_partners(
+ artifacts_specifications
+ )
+
+ return artifacts_specifications
+
+
+def _strip_locale_template(artifacts_without_locales):
+ for spec in artifacts_without_locales:
+ for index, artifact in enumerate(spec["artifacts"]):
+ stripped_artifact = artifact.format(locale="")
+ stripped_artifact = stripped_artifact.replace("//", "/")
+ spec["artifacts"][index] = stripped_artifact
+
+ return artifacts_without_locales
+
+
+def _strip_widevine_for_partners(artifacts_specifications):
+ """Partner repacks should not resign that's previously signed for fear of breaking partial
+ updates
+ """
+ for spec in artifacts_specifications:
+ if "autograph_widevine" in spec["formats"]:
+ spec["formats"].remove("autograph_widevine")
+ if "autograph_omnija" in spec["formats"]:
+ spec["formats"].remove("autograph_omnija")
+
+ return artifacts_specifications
+
+
+def get_signed_artifacts(input, formats, behavior=None):
+ """
+ Get the list of signed artifacts for the given input and formats.
+ """
+ artifacts = set()
+ if input.endswith(".dmg"):
+ artifacts.add(input.replace(".dmg", ".tar.gz"))
+ if behavior and behavior != "mac_sign":
+ artifacts.add(input.replace(".dmg", ".pkg"))
+ else:
+ artifacts.add(input)
+ if "autograph_gpg" in formats:
+ artifacts.add(f"{input}.asc")
+
+ return artifacts
+
+
+def get_geckoview_artifacts_to_sign(config, job):
+ upstream_artifacts = []
+ for package in job["attributes"]["maven_packages"]:
+ upstream_artifacts += get_geckoview_upstream_artifacts(config, job, package)
+ return [
+ path
+ for upstream_artifact in upstream_artifacts
+ for path in upstream_artifact["paths"]
+ if not path.endswith(".md5") and not path.endswith(".sha1")
+ ]
diff --git a/taskcluster/gecko_taskgraph/util/taskcluster.py b/taskcluster/gecko_taskgraph/util/taskcluster.py
new file mode 100644
index 0000000000..cddb01fd37
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/taskcluster.py
@@ -0,0 +1,128 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import os
+
+import taskcluster_urls as liburls
+from taskcluster import Hooks
+from taskgraph.util import taskcluster as tc_util
+from taskgraph.util.taskcluster import (
+ _do_request,
+ get_index_url,
+ get_root_url,
+ get_task_definition,
+ get_task_url,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def insert_index(index_path, task_id, data=None, use_proxy=False):
+ index_url = get_index_url(index_path, use_proxy=use_proxy)
+
+ # Find task expiry.
+ expires = get_task_definition(task_id, use_proxy=use_proxy)["expires"]
+
+ response = _do_request(
+ index_url,
+ method="put",
+ json={
+ "taskId": task_id,
+ "rank": 0,
+ "data": data or {},
+ "expires": expires,
+ },
+ )
+ return response
+
+
+def status_task(task_id, use_proxy=False):
+ """Gets the status of a task given a task_id.
+
+ In testing mode, just logs that it would have retrieved status.
+
+ Args:
+ task_id (str): A task id.
+ use_proxy (bool): Whether to use taskcluster-proxy (default: False)
+
+ Returns:
+ dict: A dictionary object as defined here:
+ https://docs.taskcluster.net/docs/reference/platform/queue/api#status
+ """
+ if tc_util.testing:
+ logger.info(f"Would have gotten status for {task_id}.")
+ else:
+ resp = _do_request(get_task_url(task_id, use_proxy) + "/status")
+ status = resp.json().get("status", {})
+ return status
+
+
+def state_task(task_id, use_proxy=False):
+ """Gets the state of a task given a task_id.
+
+ In testing mode, just logs that it would have retrieved state. This is a subset of the
+ data returned by :func:`status_task`.
+
+ Args:
+ task_id (str): A task id.
+ use_proxy (bool): Whether to use taskcluster-proxy (default: False)
+
+ Returns:
+ str: The state of the task, one of
+ ``pending, running, completed, failed, exception, unknown``.
+ """
+ if tc_util.testing:
+ logger.info(f"Would have gotten state for {task_id}.")
+ else:
+ status = status_task(task_id, use_proxy=use_proxy).get("state") or "unknown"
+ return status
+
+
+def trigger_hook(hook_group_id, hook_id, hook_payload):
+ hooks = Hooks({"rootUrl": get_root_url(True)})
+ response = hooks.triggerHook(hook_group_id, hook_id, hook_payload)
+
+ logger.info(
+ "Task seen here: {}/tasks/{}".format(
+ get_root_url(os.environ.get("TASKCLUSTER_PROXY_URL")),
+ response["status"]["taskId"],
+ )
+ )
+
+
+def list_task_group_tasks(task_group_id):
+ """Generate the tasks in a task group"""
+ params = {}
+ while True:
+ url = liburls.api(
+ get_root_url(False),
+ "queue",
+ "v1",
+ f"task-group/{task_group_id}/list",
+ )
+ resp = _do_request(url, method="get", params=params).json()
+ yield from resp["tasks"]
+ if resp.get("continuationToken"):
+ params = {"continuationToken": resp.get("continuationToken")}
+ else:
+ break
+
+
+def list_task_group_incomplete_task_ids(task_group_id):
+ states = ("running", "pending", "unscheduled")
+ for task in [t["status"] for t in list_task_group_tasks(task_group_id)]:
+ if task["state"] in states:
+ yield task["taskId"]
+
+
+def list_task_group_complete_tasks(task_group_id):
+ tasks = {}
+ for task in list_task_group_tasks(task_group_id):
+ if task.get("status", {}).get("state", "") == "completed":
+ tasks[task.get("task", {}).get("metadata", {}).get("name", "")] = task.get(
+ "status", {}
+ ).get("taskId", "")
+ return tasks
diff --git a/taskcluster/gecko_taskgraph/util/taskgraph.py b/taskcluster/gecko_taskgraph/util/taskgraph.py
new file mode 100644
index 0000000000..bac7b3fbb8
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/taskgraph.py
@@ -0,0 +1,49 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Tools for interacting with existing taskgraphs.
+"""
+
+from taskgraph.util.taskcluster import find_task_id, get_artifact
+
+
+def find_decision_task(parameters, graph_config):
+ """Given the parameters for this action, find the taskId of the decision
+ task"""
+ head_rev_param = "{}head_rev".format(graph_config["project-repo-param-prefix"])
+ return find_task_id(
+ "{}.v2.{}.revision.{}.taskgraph.decision".format(
+ graph_config["trust-domain"],
+ parameters["project"],
+ parameters[head_rev_param],
+ )
+ )
+
+
+def find_existing_tasks(previous_graph_ids):
+ existing_tasks = {}
+ for previous_graph_id in previous_graph_ids:
+ label_to_taskid = get_artifact(previous_graph_id, "public/label-to-taskid.json")
+ existing_tasks.update(label_to_taskid)
+ return existing_tasks
+
+
+def find_existing_tasks_from_previous_kinds(
+ full_task_graph, previous_graph_ids, rebuild_kinds
+):
+ """Given a list of previous decision/action taskIds and kinds to ignore
+ from the previous graphs, return a dictionary of labels-to-taskids to use
+ as ``existing_tasks`` in the optimization step."""
+ existing_tasks = find_existing_tasks(previous_graph_ids)
+ kind_labels = {
+ t.label
+ for t in full_task_graph.tasks.values()
+ if t.attributes["kind"] not in rebuild_kinds
+ }
+ return {
+ label: taskid
+ for (label, taskid) in existing_tasks.items()
+ if label in kind_labels
+ }
diff --git a/taskcluster/gecko_taskgraph/util/templates.py b/taskcluster/gecko_taskgraph/util/templates.py
new file mode 100644
index 0000000000..e6640a7edd
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/templates.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from gecko_taskgraph.util.copy_task import copy_task
+
+
+def merge_to(source, dest):
+ """
+ Merge dict and arrays (override scalar values)
+
+ Keys from source override keys from dest, and elements from lists in source
+ are appended to lists in dest.
+
+ :param dict source: to copy from
+ :param dict dest: to copy to (modified in place)
+ """
+
+ for key, value in source.items():
+ if (
+ isinstance(value, dict)
+ and len(value) == 1
+ and list(value)[0].startswith("by-")
+ ):
+ # Do not merge by-* values as this is likely to confuse someone
+ dest[key] = value
+ continue
+
+ # Override mismatching or empty types
+ if type(value) != type(dest.get(key)): # noqa
+ dest[key] = value
+ continue
+
+ # Merge dict
+ if isinstance(value, dict):
+ merge_to(value, dest[key])
+ continue
+
+ if isinstance(value, list):
+ dest[key] = dest[key] + value
+ continue
+
+ dest[key] = value
+
+ return dest
+
+
+def merge(*objects):
+ """
+ Merge the given objects, using the semantics described for merge_to, with
+ objects later in the list taking precedence. From an inheritance
+ perspective, "parents" should be listed before "children".
+
+ Returns the result without modifying any arguments.
+ """
+ if len(objects) == 1:
+ return copy_task(objects[0])
+ return merge_to(objects[-1], merge(*objects[:-1]))
diff --git a/taskcluster/gecko_taskgraph/util/verify.py b/taskcluster/gecko_taskgraph/util/verify.py
new file mode 100644
index 0000000000..037d985cca
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/verify.py
@@ -0,0 +1,454 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import os
+import re
+import sys
+
+import attr
+from taskgraph.util.treeherder import join_symbol
+from taskgraph.util.verify import VerificationSequence
+
+from gecko_taskgraph import GECKO
+from gecko_taskgraph.util.attributes import (
+ ALL_PROJECTS,
+ RELEASE_PROJECTS,
+ RUN_ON_PROJECT_ALIASES,
+)
+
+logger = logging.getLogger(__name__)
+doc_base_path = os.path.join(GECKO, "taskcluster", "docs")
+
+
+verifications = VerificationSequence()
+
+
+@attr.s(frozen=True)
+class DocPaths:
+ _paths = attr.ib(factory=list)
+
+ def get_files(self, filename):
+ rv = []
+ for p in self._paths:
+ doc_path = os.path.join(p, filename)
+ if os.path.exists(doc_path):
+ rv.append(doc_path)
+ return rv
+
+ def add(self, path):
+ """
+ Projects that make use of Firefox's taskgraph can extend it with
+ their own task kinds by registering additional paths for documentation.
+ documentation_paths.add() needs to be called by the project's Taskgraph
+ registration function. See taskgraph.config.
+ """
+ self._paths.append(path)
+
+
+documentation_paths = DocPaths()
+documentation_paths.add(doc_base_path)
+
+
+def verify_docs(filename, identifiers, appearing_as):
+ """
+ Look for identifiers of the type appearing_as in the files
+ returned by documentation_paths.get_files(). Firefox will have
+ a single file in a list, but projects such as Thunderbird can have
+ documentation in another location and may return multiple files.
+ """
+ # We ignore identifiers starting with '_' for the sake of tests.
+ # Strings starting with "_" are ignored for doc verification
+ # hence they can be used for faking test values
+ doc_files = documentation_paths.get_files(filename)
+ doctext = "".join([open(d).read() for d in doc_files])
+
+ if appearing_as == "inline-literal":
+ expression_list = [
+ "``" + identifier + "``"
+ for identifier in identifiers
+ if not identifier.startswith("_")
+ ]
+ elif appearing_as == "heading":
+ expression_list = [
+ "\n" + identifier + "\n(?:(?:(?:-+\n)+)|(?:(?:.+\n)+))"
+ for identifier in identifiers
+ if not identifier.startswith("_")
+ ]
+ else:
+ raise Exception(f"appearing_as = `{appearing_as}` not defined")
+
+ for expression, identifier in zip(expression_list, identifiers):
+ match_group = re.search(expression, doctext)
+ if not match_group:
+ raise Exception(
+ "{}: `{}` missing from doc file: `{}`".format(
+ appearing_as, identifier, filename
+ )
+ )
+
+
+@verifications.add("initial")
+def verify_run_using():
+ from gecko_taskgraph.transforms.job import registry
+
+ verify_docs(
+ filename="transforms/job.rst",
+ identifiers=registry.keys(),
+ appearing_as="inline-literal",
+ )
+
+
+@verifications.add("parameters")
+def verify_parameters_docs(parameters):
+ if not parameters.strict:
+ return
+
+ parameters_dict = dict(**parameters)
+ verify_docs(
+ filename="parameters.rst",
+ identifiers=list(parameters_dict),
+ appearing_as="inline-literal",
+ )
+
+
+@verifications.add("kinds")
+def verify_kinds_docs(kinds):
+ verify_docs(filename="kinds.rst", identifiers=kinds.keys(), appearing_as="heading")
+
+
+@verifications.add("full_task_set")
+def verify_attributes(task, taskgraph, scratch_pad, graph_config, parameters):
+ if task is None:
+ verify_docs(
+ filename="attributes.rst",
+ identifiers=list(scratch_pad["attribute_set"]),
+ appearing_as="heading",
+ )
+ return
+ scratch_pad.setdefault("attribute_set", set()).update(task.attributes.keys())
+
+
+@verifications.add("full_task_graph")
+def verify_task_graph_symbol(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ This function verifies that tuple
+ (collection.keys(), machine.platform, groupSymbol, symbol) is unique
+ for a target task graph.
+ """
+ if task is None:
+ return
+ task_dict = task.task
+ if "extra" in task_dict:
+ extra = task_dict["extra"]
+ if "treeherder" in extra:
+ treeherder = extra["treeherder"]
+
+ collection_keys = tuple(sorted(treeherder.get("collection", {}).keys()))
+ if len(collection_keys) != 1:
+ raise Exception(
+ "Task {} can't be in multiple treeherder collections "
+ "(the part of the platform after `/`): {}".format(
+ task.label, collection_keys
+ )
+ )
+ platform = treeherder.get("machine", {}).get("platform")
+ group_symbol = treeherder.get("groupSymbol")
+ symbol = treeherder.get("symbol")
+
+ key = (platform, collection_keys[0], group_symbol, symbol)
+ if key in scratch_pad:
+ raise Exception(
+ "Duplicate treeherder platform and symbol in tasks "
+ "`{}`and `{}`: {} {}".format(
+ task.label,
+ scratch_pad[key],
+ f"{platform}/{collection_keys[0]}",
+ join_symbol(group_symbol, symbol),
+ )
+ )
+ else:
+ scratch_pad[key] = task.label
+
+
+@verifications.add("full_task_graph")
+def verify_trust_domain_v2_routes(
+ task, taskgraph, scratch_pad, graph_config, parameters
+):
+ """
+ This function ensures that any two tasks have distinct ``index.{trust-domain}.v2`` routes.
+ """
+ if task is None:
+ return
+ route_prefix = "index.{}.v2".format(graph_config["trust-domain"])
+ task_dict = task.task
+ routes = task_dict.get("routes", [])
+
+ for route in routes:
+ if route.startswith(route_prefix):
+ if route in scratch_pad:
+ raise Exception(
+ "conflict between {}:{} for route: {}".format(
+ task.label, scratch_pad[route], route
+ )
+ )
+ else:
+ scratch_pad[route] = task.label
+
+
+@verifications.add("full_task_graph")
+def verify_routes_notification_filters(
+ task, taskgraph, scratch_pad, graph_config, parameters
+):
+ """
+ This function ensures that only understood filters for notifications are
+ specified.
+
+ See: https://firefox-ci-tc.services.mozilla.com/docs/manual/using/task-notifications
+ """
+ if task is None:
+ return
+ route_prefix = "notify."
+ valid_filters = ("on-any", "on-completed", "on-failed", "on-exception")
+ task_dict = task.task
+ routes = task_dict.get("routes", [])
+
+ for route in routes:
+ if route.startswith(route_prefix):
+ # Get the filter of the route
+ route_filter = route.split(".")[-1]
+ if route_filter not in valid_filters:
+ raise Exception(
+ "{} has invalid notification filter ({})".format(
+ task.label, route_filter
+ )
+ )
+
+
+@verifications.add("full_task_graph")
+def verify_dependency_tiers(task, taskgraph, scratch_pad, graph_config, parameters):
+ tiers = scratch_pad
+ if task is not None:
+ tiers[task.label] = (
+ task.task.get("extra", {}).get("treeherder", {}).get("tier", sys.maxsize)
+ )
+ else:
+
+ def printable_tier(tier):
+ if tier == sys.maxsize:
+ return "unknown"
+ return tier
+
+ for task in taskgraph.tasks.values():
+ tier = tiers[task.label]
+ for d in task.dependencies.values():
+ if taskgraph[d].task.get("workerType") == "always-optimized":
+ continue
+ if "dummy" in taskgraph[d].kind:
+ continue
+ if tier < tiers[d]:
+ raise Exception(
+ "{} (tier {}) cannot depend on {} (tier {})".format(
+ task.label,
+ printable_tier(tier),
+ d,
+ printable_tier(tiers[d]),
+ )
+ )
+
+
+@verifications.add("full_task_graph")
+def verify_required_signoffs(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ Task with required signoffs can't be dependencies of tasks with less
+ required signoffs.
+ """
+ all_required_signoffs = scratch_pad
+ if task is not None:
+ all_required_signoffs[task.label] = set(
+ task.attributes.get("required_signoffs", [])
+ )
+ else:
+
+ def printable_signoff(signoffs):
+ if len(signoffs) == 1:
+ return "required signoff {}".format(*signoffs)
+ if signoffs:
+ return "required signoffs {}".format(", ".join(signoffs))
+ return "no required signoffs"
+
+ for task in taskgraph.tasks.values():
+ required_signoffs = all_required_signoffs[task.label]
+ for d in task.dependencies.values():
+ if required_signoffs < all_required_signoffs[d]:
+ raise Exception(
+ "{} ({}) cannot depend on {} ({})".format(
+ task.label,
+ printable_signoff(required_signoffs),
+ d,
+ printable_signoff(all_required_signoffs[d]),
+ )
+ )
+
+
+@verifications.add("full_task_graph")
+def verify_aliases(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ This function verifies that aliases are not reused.
+ """
+ if task is None:
+ return
+ if task.kind not in ("toolchain", "fetch"):
+ return
+ for_kind = scratch_pad.setdefault(task.kind, {})
+ aliases = for_kind.setdefault("aliases", {})
+ alias_attribute = f"{task.kind}-alias"
+ if task.label in aliases:
+ raise Exception(
+ "Task `{}` has a {} of `{}`, masking a task of that name.".format(
+ aliases[task.label],
+ alias_attribute,
+ task.label[len(task.kind) + 1 :],
+ )
+ )
+ labels = for_kind.setdefault("labels", set())
+ labels.add(task.label)
+ attributes = task.attributes
+ if alias_attribute in attributes:
+ keys = attributes[alias_attribute]
+ if not keys:
+ keys = []
+ elif isinstance(keys, str):
+ keys = [keys]
+ for key in keys:
+ full_key = f"{task.kind}-{key}"
+ if full_key in labels:
+ raise Exception(
+ "Task `{}` has a {} of `{}`,"
+ " masking a task of that name.".format(
+ task.label,
+ alias_attribute,
+ key,
+ )
+ )
+ if full_key in aliases:
+ raise Exception(
+ "Duplicate {} in tasks `{}`and `{}`: {}".format(
+ alias_attribute,
+ task.label,
+ aliases[full_key],
+ key,
+ )
+ )
+ else:
+ aliases[full_key] = task.label
+
+
+@verifications.add("optimized_task_graph")
+def verify_always_optimized(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ This function ensures that always-optimized tasks have been optimized.
+ """
+ if task is None:
+ return
+ if task.task.get("workerType") == "always-optimized":
+ raise Exception(f"Could not optimize the task {task.label!r}")
+
+
+@verifications.add("full_task_graph", run_on_projects=RELEASE_PROJECTS)
+def verify_shippable_no_sccache(task, taskgraph, scratch_pad, graph_config, parameters):
+ if task and task.attributes.get("shippable"):
+ if task.task.get("payload", {}).get("env", {}).get("USE_SCCACHE"):
+ raise Exception(f"Shippable job {task.label} cannot use sccache")
+
+
+@verifications.add("full_task_graph")
+def verify_test_packaging(task, taskgraph, scratch_pad, graph_config, parameters):
+ if task is None:
+ # In certain cases there are valid reasons for tests to be missing,
+ # don't error out when that happens.
+ missing_tests_allowed = any(
+ (
+ # user specified `--target-kind`
+ bool(parameters.get("target-kinds")),
+ # manifest scheduling is enabled
+ parameters["test_manifest_loader"] != "default",
+ )
+ )
+
+ exceptions = []
+ for task in taskgraph.tasks.values():
+ if task.kind == "build" and not task.attributes.get(
+ "skip-verify-test-packaging"
+ ):
+ build_env = task.task.get("payload", {}).get("env", {})
+ package_tests = build_env.get("MOZ_AUTOMATION_PACKAGE_TESTS")
+ shippable = task.attributes.get("shippable", False)
+ build_has_tests = scratch_pad.get(task.label)
+
+ if package_tests != "1":
+ # Shippable builds should always package tests.
+ if shippable:
+ exceptions.append(
+ "Build job {} is shippable and does not specify "
+ "MOZ_AUTOMATION_PACKAGE_TESTS=1 in the "
+ "environment.".format(task.label)
+ )
+
+ # Build tasks in the scratch pad have tests dependent on
+ # them, so we need to package tests during build.
+ if build_has_tests:
+ exceptions.append(
+ "Build job {} has tests dependent on it and does not specify "
+ "MOZ_AUTOMATION_PACKAGE_TESTS=1 in the environment".format(
+ task.label
+ )
+ )
+ else:
+ # Build tasks that aren't in the scratch pad have no
+ # dependent tests, so we shouldn't package tests.
+ # With the caveat that we expect shippable jobs to always
+ # produce tests.
+ if not build_has_tests and not shippable:
+ # If we have not generated all task kinds, we can't verify that
+ # there are no dependent tests.
+ if not missing_tests_allowed:
+ exceptions.append(
+ "Build job {} has no tests, but specifies "
+ "MOZ_AUTOMATION_PACKAGE_TESTS={} in the environment. "
+ "Unset MOZ_AUTOMATION_PACKAGE_TESTS in the task definition "
+ "to fix.".format(task.label, package_tests)
+ )
+ if exceptions:
+ raise Exception("\n".join(exceptions))
+ return
+ if task.kind == "test":
+ build_task = taskgraph[task.dependencies["build"]]
+ scratch_pad[build_task.label] = 1
+
+
+@verifications.add("full_task_graph")
+def verify_run_known_projects(task, taskgraph, scratch_pad, graph_config, parameters):
+ """Validates the inputs in run-on-projects.
+
+ We should never let 'try' (or 'try-comm-central') be in run-on-projects even though it
+ is valid because it is not considered for try pushes. While here we also validate for
+ other unknown projects or typos.
+ """
+ if task and task.attributes.get("run_on_projects"):
+ projects = set(task.attributes["run_on_projects"])
+ if {"try", "try-comm-central"} & set(projects):
+ raise Exception(
+ "In task {}: using try in run-on-projects is invalid; use try "
+ "selectors to select this task on try".format(task.label)
+ )
+ # try isn't valid, but by the time we get here its not an available project anyway.
+ valid_projects = ALL_PROJECTS | set(RUN_ON_PROJECT_ALIASES.keys())
+ invalid_projects = projects - valid_projects
+ if invalid_projects:
+ raise Exception(
+ "Task '{}' has an invalid run-on-projects value: "
+ "{}".format(task.label, invalid_projects)
+ )
diff --git a/taskcluster/gecko_taskgraph/util/workertypes.py b/taskcluster/gecko_taskgraph/util/workertypes.py
new file mode 100644
index 0000000000..b9617993dd
--- /dev/null
+++ b/taskcluster/gecko_taskgraph/util/workertypes.py
@@ -0,0 +1,103 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from mozbuild.util import memoize
+from taskgraph.util.attributes import keymatch
+from taskgraph.util.keyed_by import evaluate_keyed_by
+
+from gecko_taskgraph.util.attributes import release_level as _release_level
+
+WORKER_TYPES = {
+ "gce/gecko-1-b-linux": ("docker-worker", "linux"),
+ "gce/gecko-2-b-linux": ("docker-worker", "linux"),
+ "gce/gecko-3-b-linux": ("docker-worker", "linux"),
+ "invalid/invalid": ("invalid", None),
+ "invalid/always-optimized": ("always-optimized", None),
+ "scriptworker-prov-v1/signing-linux-v1": ("scriptworker-signing", None),
+}
+
+
+@memoize
+def _get(graph_config, alias, level, release_level, project):
+ """Get the configuration for this worker_type alias: {provisioner,
+ worker-type, implementation, os}"""
+ level = str(level)
+
+ # handle the legacy (non-alias) format
+ if "/" in alias:
+ alias = alias.format(level=level)
+ provisioner, worker_type = alias.split("/", 1)
+ try:
+ implementation, os = WORKER_TYPES[alias]
+ return {
+ "provisioner": provisioner,
+ "worker-type": worker_type,
+ "implementation": implementation,
+ "os": os,
+ }
+ except KeyError:
+ return {
+ "provisioner": provisioner,
+ "worker-type": worker_type,
+ }
+
+ matches = keymatch(graph_config["workers"]["aliases"], alias)
+ if len(matches) > 1:
+ raise KeyError("Multiple matches for worker-type alias " + alias)
+ elif not matches:
+ raise KeyError("No matches for worker-type alias " + alias)
+ worker_config = matches[0].copy()
+
+ worker_config["provisioner"] = evaluate_keyed_by(
+ worker_config["provisioner"],
+ f"worker-type alias {alias} field provisioner",
+ {"level": level},
+ ).format(
+ **{
+ "trust-domain": graph_config["trust-domain"],
+ "level": level,
+ "alias": alias,
+ }
+ )
+ attrs = {"level": level, "release-level": release_level}
+ if project:
+ attrs["project"] = project
+ worker_config["worker-type"] = evaluate_keyed_by(
+ worker_config["worker-type"],
+ f"worker-type alias {alias} field worker-type",
+ attrs,
+ ).format(
+ **{
+ "trust-domain": graph_config["trust-domain"],
+ "level": level,
+ "alias": alias,
+ }
+ )
+
+ return worker_config
+
+
+def worker_type_implementation(graph_config, parameters, worker_type):
+ """Get the worker implementation and OS for the given workerType, where the
+ OS represents the host system, not the target OS, in the case of
+ cross-compiles."""
+ worker_config = _get(
+ graph_config, worker_type, "1", "staging", parameters["project"]
+ )
+ return worker_config["implementation"], worker_config.get("os")
+
+
+def get_worker_type(graph_config, parameters, worker_type):
+ """
+ Get the worker type provisioner and worker-type, optionally evaluating
+ aliases from the graph config.
+ """
+ worker_config = _get(
+ graph_config,
+ worker_type,
+ parameters["level"],
+ _release_level(parameters.get("project")),
+ parameters.get("project"),
+ )
+ return worker_config["provisioner"], worker_config["worker-type"]