summaryrefslogtreecommitdiffstats
path: root/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py')
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py61
1 files changed, 37 insertions, 24 deletions
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py
index c55de78513..168b8c00c9 100644
--- a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py
@@ -110,7 +110,7 @@ task_description_schema = Schema(
# section of the kind (delimited by "-") all smooshed together.
# Eg: "test" becomes "T", "docker-image" becomes "DI", etc.
"symbol": Optional(str),
- # the job kind
+ # the task kind
# If "build" or "test" is found in the kind name, this defaults
# to the appropriate value. Otherwise, defaults to "other"
"kind": Optional(Any("build", "test", "other")),
@@ -129,7 +129,7 @@ task_description_schema = Schema(
Optional("index"): {
# the name of the product this build produces
"product": str,
- # the names to use for this job in the TaskCluster index
+ # the names to use for this task in the TaskCluster index
"job-name": str,
# Type of gecko v2 index to use
"type": str,
@@ -179,7 +179,7 @@ task_description_schema = Schema(
# be substituted in this string:
# {level} -- the scm level of this push
"worker-type": str,
- # Whether the job should use sccache compiler caching.
+ # Whether the task should use sccache compiler caching.
Required("needs-sccache"): bool,
# information specific to the worker implementation that will run this task
Optional("worker"): {
@@ -196,7 +196,7 @@ TC_TREEHERDER_SCHEMA_URL = (
UNKNOWN_GROUP_NAME = (
- "Treeherder group {} (from {}) has no name; " "add it to taskcluster/ci/config.yml"
+ "Treeherder group {} (from {}) has no name; " "add it to taskcluster/config.yml"
)
V2_ROUTE_TEMPLATES = [
@@ -266,7 +266,7 @@ def index_builder(name):
UNSUPPORTED_INDEX_PRODUCT_ERROR = """\
The index product {product} is not in the list of configured products in
-`taskcluster/ci/config.yml'.
+`taskcluster/config.yml'.
"""
@@ -317,7 +317,7 @@ def verify_index(config, index):
{
# only one type is supported by any of the workers right now
"type": "persistent",
- # name of the cache, allowing re-use by subsequent tasks naming the
+ # name of the cache, allowing reuse by subsequent tasks naming the
# same cache
"name": str,
# location in the task image where the cache will be mounted
@@ -364,6 +364,9 @@ def build_docker_worker_payload(config, task, task_def):
if "in-tree" in image:
name = image["in-tree"]
docker_image_task = "build-docker-image-" + image["in-tree"]
+ assert "docker-image" not in task.get(
+ "dependencies", ()
+ ), "docker-image key in dependencies object is reserved"
task.setdefault("dependencies", {})["docker-image"] = docker_image_task
image = {
@@ -487,19 +490,19 @@ def build_docker_worker_payload(config, task, task_def):
# run-task knows how to validate caches.
#
- # To help ensure new run-task features and bug fixes don't interfere
- # with existing caches, we seed the hash of run-task into cache names.
- # So, any time run-task changes, we should get a fresh set of caches.
- # This means run-task can make changes to cache interaction at any time
- # without regards for backwards or future compatibility.
+ # To help ensure new run-task features and bug fixes, as well as the
+ # versions of tools such as mercurial or git, don't interfere with
+ # existing caches, we seed the underlying docker-image task id into
+ # cache names, for tasks using in-tree Docker images.
#
# But this mechanism only works for in-tree Docker images that are built
# with the current run-task! For out-of-tree Docker images, we have no
# way of knowing their content of run-task. So, in addition to varying
# cache names by the contents of run-task, we also take the Docker image
- # name into consideration. This means that different Docker images will
- # never share the same cache. This is a bit unfortunate. But it is the
- # safest thing to do. Fortunately, most images are defined in-tree.
+ # name into consideration.
+ #
+ # This means that different Docker images will never share the same
+ # cache. This is a bit unfortunate, but is the safest thing to do.
#
# For out-of-tree Docker images, we don't strictly need to incorporate
# the run-task content into the cache name. However, doing so preserves
@@ -520,6 +523,8 @@ def build_docker_worker_payload(config, task, task_def):
out_of_tree_image.encode("utf-8")
).hexdigest()
suffix += name_hash[0:12]
+ else:
+ suffix += "-<docker-image>"
else:
suffix = cache_version
@@ -539,13 +544,15 @@ def build_docker_worker_payload(config, task, task_def):
suffix=suffix,
)
caches[name] = cache["mount-point"]
- task_def["scopes"].append("docker-worker:cache:%s" % name)
+ task_def["scopes"].append(
+ {"task-reference": "docker-worker:cache:%s" % name}
+ )
# Assertion: only run-task is interested in this.
if run_task:
payload["env"]["TASKCLUSTER_CACHES"] = ";".join(sorted(caches.values()))
- payload["cache"] = caches
+ payload["cache"] = {"task-reference": caches}
# And send down volumes information to run-task as well.
if run_task and worker.get("volumes"):
@@ -752,7 +759,7 @@ def build_generic_worker_payload(config, task, task_def):
schema={
# the maximum time to run, in seconds
Required("max-run-time"): int,
- # locale key, if this is a locale beetmover job
+ # locale key, if this is a locale beetmover task
Optional("locale"): str,
Optional("partner-public"): bool,
Required("release-properties"): {
@@ -1075,7 +1082,11 @@ def build_task(config, tasks):
extra["parent"] = os.environ.get("TASK_ID", "")
if "expires-after" not in task:
- task["expires-after"] = "28 days" if config.params.is_try() else "1 year"
+ task["expires-after"] = (
+ config.graph_config._config.get("task-expires-after", "28 days")
+ if config.params.is_try()
+ else "1 year"
+ )
if "deadline-after" not in task:
if "task-deadline-after" in config.graph_config:
@@ -1142,9 +1153,9 @@ def build_task(config, tasks):
config.params["project"] + th_project_suffix, branch_rev
)
)
- task_def["metadata"]["description"] += " ([Treeherder push]({}))".format(
- th_push_link
- )
+ task_def["metadata"][
+ "description"
+ ] += f" ([Treeherder push]({th_push_link}))"
# add the payload and adjust anything else as required (e.g., scopes)
payload_builders[task["worker"]["implementation"]].builder(
@@ -1288,7 +1299,7 @@ def check_caches_are_volumes(task):
Caches and volumes are the only filesystem locations whose content
isn't defined by the Docker image itself. Some caches are optional
- depending on the job environment. We want paths that are potentially
+ depending on the task environment. We want paths that are potentially
caches to have as similar behavior regardless of whether a cache is
used. To help enforce this, we require that all paths used as caches
to be declared as Docker volumes. This check won't catch all offenders.
@@ -1343,7 +1354,9 @@ def check_run_task_caches(config, tasks):
main_command = command[0] if isinstance(command[0], str) else ""
run_task = main_command.endswith("run-task")
- for cache in payload.get("cache", {}):
+ for cache in payload.get("cache", {}).get(
+ "task-reference", payload.get("cache", {})
+ ):
if not cache.startswith(cache_prefix):
raise Exception(
"{} is using a cache ({}) which is not appropriate "
@@ -1364,7 +1377,7 @@ def check_run_task_caches(config, tasks):
"cache name"
)
- if not cache.endswith(suffix):
+ if suffix not in cache:
raise Exception(
f"{task['label']} is using a cache ({cache}) reserved for run-task "
"but the cache name is not dependent on the contents "