From 26a029d407be480d791972afb5975cf62c9360a6 Mon Sep 17 00:00:00 2001
From: Daniel Baumann
Date: Fri, 19 Apr 2024 02:47:55 +0200
Subject: Adding upstream version 124.0.1.
Signed-off-by: Daniel Baumann
---
testing/web-platform/tests/tools/ci/tc/README.md | 243 +++++++++
testing/web-platform/tests/tools/ci/tc/__init__.py | 0
testing/web-platform/tests/tools/ci/tc/decision.py | 414 +++++++++++++++
testing/web-platform/tests/tools/ci/tc/download.py | 111 ++++
.../tests/tools/ci/tc/github_checks_output.py | 34 ++
.../web-platform/tests/tools/ci/tc/sink_task.py | 65 +++
.../web-platform/tests/tools/ci/tc/taskgraph.py | 175 ++++++
.../web-platform/tests/tools/ci/tc/tasks/test.yml | 589 +++++++++++++++++++++
.../ci/tc/testdata/epochs_daily_push_event.json | 460 ++++++++++++++++
.../tools/ci/tc/testdata/master_push_event.json | 214 ++++++++
.../tests/tools/ci/tc/testdata/pr_event.json | 577 ++++++++++++++++++++
.../ci/tc/testdata/pr_event_tests_affected.json | 505 ++++++++++++++++++
.../tests/tools/ci/tc/tests/__init__.py | 0
.../tests/tools/ci/tc/tests/test_decision.py | 56 ++
.../tests/tools/ci/tc/tests/test_taskgraph.py | 148 ++++++
.../tests/tools/ci/tc/tests/test_valid.py | 379 +++++++++++++
16 files changed, 3970 insertions(+)
create mode 100644 testing/web-platform/tests/tools/ci/tc/README.md
create mode 100644 testing/web-platform/tests/tools/ci/tc/__init__.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/decision.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/download.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/github_checks_output.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/sink_task.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/taskgraph.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/tasks/test.yml
create mode 100644 testing/web-platform/tests/tools/ci/tc/testdata/epochs_daily_push_event.json
create mode 100644 testing/web-platform/tests/tools/ci/tc/testdata/master_push_event.json
create mode 100644 testing/web-platform/tests/tools/ci/tc/testdata/pr_event.json
create mode 100644 testing/web-platform/tests/tools/ci/tc/testdata/pr_event_tests_affected.json
create mode 100644 testing/web-platform/tests/tools/ci/tc/tests/__init__.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/tests/test_decision.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/tests/test_taskgraph.py
create mode 100644 testing/web-platform/tests/tools/ci/tc/tests/test_valid.py
(limited to 'testing/web-platform/tests/tools/ci/tc')
diff --git a/testing/web-platform/tests/tools/ci/tc/README.md b/testing/web-platform/tests/tools/ci/tc/README.md
new file mode 100644
index 0000000000..785c82cca3
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/README.md
@@ -0,0 +1,243 @@
+# Taskgraph Setup
+
+The taskgraph is built from a YAML file. This file has two top-level
+properties: `components` and `tasks`. The full list of tasks is
+defined by the `tasks` object; each task is an object with a single
+property representing the task with the corresponding value an object
+representing the task properties. Each task requires the following
+top-level properties:
+
+* `provisionerId`: String. Name of Taskcluster provisioner
+* `schedulerId`: String. Name of Taskcluster scheduler
+* `deadline`: String. Time until the task expires
+* `image`: String. Name of docker image to use for task
+* `maxRunTime`: Number. Maximum time in seconds for which the task can
+ run.
+* `artifacts`: Object. List of artifacts and directories to upload; see
+ Taskcluster documentation.
+* `command`: String. Command to run. This is automatically wrapped in a
+ run_tc command
+* `options`: Optional Object. Options to pass into run_tc
+ - xvfb: Boolean. Enable Xvfb for run
+ - oom-killer: Boolean. Enable xvfb for run
+ - hosts: Boolean. Update hosts file with wpt hosts before run
+ - install-certificates: Boolean. Install wpt certs into OS
+ certificate store for run
+ - browser: List. List of browser names for run
+ - channel: String. Browser channel for run
+* `trigger`: Object. Conditions on which to consider task. One or more
+ of following properties:
+ - branch: List. List of branch names on which to trigger.
+ - pull-request: No value. Trigger for pull request actions
+* `schedule-if`: Optional Object. Conditions on which task should be
+ scheduled given it meets the trigger conditions.
+ - `run-job`: List. Job names for which this task should be considered,
+ matching the output from `./wpt test-jobs`
+* `env`: Optional Object. Environment variables to set when running task.
+* `depends-on`: Optional list. List of task names that must be complete
+ before the current task is scheduled.
+* `description`: String. Task description.
+* `name`: Optional String. Name to use for the task overriding the
+ property name. This is useful in combination with substitutions
+ described below.
+* `download-artifacts`: Optional Object. An artifact to download from
+ a task that this task depends on. This has the following properties:
+ - `task` - Name of the task producing the artifact
+ - `glob` - A glob pattern for the filename of the artifact
+ - `dest` - A directory reltive to the home directory in which to place
+ the artifact
+ - `extract` - Optional. A boolean indicating whether an archive artifact
+ should be extracted in-place.
+
+## Task Expansions
+
+Using the above syntax it's possble to describe each task
+directly. But typically in a taskgraph there are many common
+properties between tasks so it's tedious and error prone to repeat
+information that's common to multiple tasks. Therefore the taskgraph
+format provides several mechanisms to reuse partial task definitions
+across multiple tasks.
+
+### Components
+
+The other top-level property in the taskgraph format is
+`components`. The value of this property is an object containing named
+partial task definitions. Each task definition may contain a property called
+`use` which is a list of components to use as the basis for the task
+definition. The components list is evaluated in order. If a property
+is not previously defined in the output it is added to the output. If
+it was previously defined, the value is updated according to the type:
+ * Strings and numbers are replaced with a new value
+ * Lists are extended with the additional values
+ * Objects are updated recursively following the above rules
+This means that types must always match between components and the
+final value.
+
+For example
+```
+components:
+ example-1:
+ list_prop:
+ - first
+ - second
+ object_prop:
+ key1: value1
+ key2: base_value
+ example-2:
+ list_prop:
+ - third
+ - fourth
+ object_prop:
+ key3:
+ - value3-1
+
+tasks:
+ - example-task:
+ use:
+ - example-1
+ - example-2
+ object_prop:
+ key2: value2
+ key3:
+ - value3-2
+```
+
+will evaluate to the following task:
+
+```
+example-task:
+ list_prop:
+ - first
+ - second
+ - third
+ - fourth
+ object_prop:
+ key1: value1
+ key2: value2
+ key3:
+ - value3-1
+ - value3-2
+```
+
+Note that components cannot currently define `use` properties of their own.
+
+## Substitutions
+
+Components and tasks can define a property `vars` that holds variables
+which are later substituted into the task definition using the syntax
+`${vars.property-name}`. For example:
+
+```
+components:
+ generic-component:
+ prop: ${vars.value}
+
+tasks:
+ - first:
+ use:
+ - generic-component
+ vars:
+ value: value1
+ - second:
+ use:
+ - generic-component
+ vars:
+ value: value2
+```
+
+Results in the following tasks:
+
+```
+first:
+ prop: value1
+second:
+ prop: value2
+```
+
+## Maps
+
+Instead of defining a task directly, an item in the tasks property may
+be an object with a single property `$map`. This object itself has two
+child properties; `for` and `do`. The value of `for` is a list of
+objects, and the value of `do` is either an object or a list of
+objects. For each object in the `for` property, a set of tasks is
+created by taking a copy of that object for each task in the `do`
+property, updating the object with the properties from the
+corresponding `do` object, using the same rules as for components
+above, and then processing as for a normal task. `$map` rules can also
+be nested.
+
+Note: Although `$map` shares a name with the `$map` used in json-e
+(used. in `.taskcluster.yml`), the semantics are different.
+
+For example
+
+```
+components: {}
+tasks:
+ $map:
+ for:
+ - vars:
+ example: value1
+ - vars:
+ example: value2
+ do:
+ example-${vars.example}
+ prop: ${vars.example}
+```
+
+Results in the tasks
+
+```
+example-value1:
+ prop: value1
+example-value2:
+ prop: value2
+```
+
+Note that in combination with `$map`, variable substitutions are
+applied *twice*; once after the `$map` is evaluated and once after the
+`use` statements are evaluated.
+
+## Chunks
+
+A common requirements for tasks is that they are "chunked" into N
+partial tasks. This is handled specially in the syntax. A top level
+property `chunks` can be used to define the number of individual
+chunks to create for a specific task. Each chunked task is created
+with a `chunks` property set to an object containing an `id` property
+containing the one-based index of the chunk an a `total` property
+containing the total number of chunks. These can be substituted into
+the task definition using the same syntax as for `vars` above
+e.g. `${chunks.id}`. Note that because task names must be unique, it's
+common to specify a `name` property on the task that will override the
+property name e.g.
+
+```
+components: {}
+tasks:
+ - chunked-task:
+ chunks:2
+ command: "task-run --chunk=${chunks.id} --totalChunks=${chunks.total}"
+ name: task-chunk-${chunks.id}
+```
+
+creates tasks:
+
+```
+task-chunk-1:
+ command: "task-run --chunk=1 --totalChunks=2"
+task-chunk-2:
+ command: "task-run --chunk=2 --totalChunks=2"
+```
+
+# Overall processing model
+
+The overall processing model for tasks is as follows:
+ * Evaluate maps
+ * Perform subsitutions
+ * Evaluate use statements
+ * Expand chunks
+ * Perform subsitutions
+
+At each point after maps are evaluated tasks must have a unique name.
diff --git a/testing/web-platform/tests/tools/ci/tc/__init__.py b/testing/web-platform/tests/tools/ci/tc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/testing/web-platform/tests/tools/ci/tc/decision.py b/testing/web-platform/tests/tools/ci/tc/decision.py
new file mode 100644
index 0000000000..d00ba6ba19
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/decision.py
@@ -0,0 +1,414 @@
+# mypy: allow-untyped-defs
+
+import argparse
+import json
+import logging
+import os
+import re
+import subprocess
+from collections import OrderedDict
+
+import taskcluster
+
+from . import taskgraph
+
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+logging.basicConfig()
+logger = logging.getLogger()
+
+
+def get_triggers(event):
+ # Set some variables that we use to get the commits on the current branch
+ ref_prefix = "refs/heads/"
+ is_pr = "pull_request" in event
+ branch = None
+ if not is_pr and "ref" in event:
+ branch = event["ref"]
+ if branch.startswith(ref_prefix):
+ branch = branch[len(ref_prefix):]
+
+ return is_pr, branch
+
+
+def fetch_event_data(queue):
+ try:
+ task_id = os.environ["TASK_ID"]
+ except KeyError:
+ logger.warning("Missing TASK_ID environment variable")
+ # For example under local testing
+ return None
+
+ task_data = queue.task(task_id)
+
+ return task_data.get("extra", {}).get("github_event")
+
+
+def filter_triggers(event, all_tasks):
+ is_pr, branch = get_triggers(event)
+ triggered = OrderedDict()
+ for name, task in all_tasks.items():
+ if "trigger" in task:
+ if is_pr and "pull-request" in task["trigger"]:
+ triggered[name] = task
+ elif branch is not None and "branch" in task["trigger"]:
+ for trigger_branch in task["trigger"]["branch"]:
+ if (trigger_branch == branch or
+ trigger_branch.endswith("*") and branch.startswith(trigger_branch[:-1])):
+ triggered[name] = task
+ logger.info("Triggers match tasks:\n * %s" % "\n * ".join(triggered.keys()))
+ return triggered
+
+
+def get_run_jobs(event):
+ from tools.ci import jobs
+ revish = "%s..%s" % (event["pull_request"]["base"]["sha"]
+ if "pull_request" in event
+ else event["before"],
+ event["pull_request"]["head"]["sha"]
+ if "pull_request" in event
+ else event["after"])
+ logger.info("Looking for changes in range %s" % revish)
+ paths = jobs.get_paths(revish=revish)
+ logger.info("Found changes in paths:%s" % "\n".join(paths))
+ path_jobs = jobs.get_jobs(paths)
+ all_jobs = path_jobs | get_extra_jobs(event)
+ logger.info("Including jobs:\n * %s" % "\n * ".join(all_jobs))
+ return all_jobs
+
+
+def get_extra_jobs(event):
+ body = None
+ jobs = set()
+ if "commits" in event and event["commits"]:
+ body = event["commits"][0]["message"]
+ elif "pull_request" in event:
+ body = event["pull_request"]["body"]
+
+ if not body:
+ return jobs
+
+ regexp = re.compile(r"\s*tc-jobs:(.*)$")
+
+ for line in body.splitlines():
+ m = regexp.match(line)
+ if m:
+ items = m.group(1)
+ for item in items.split(","):
+ jobs.add(item.strip())
+ break
+ return jobs
+
+
+def filter_excluded_users(tasks, event):
+ # Some users' pull requests are excluded from tasks,
+ # such as pull requests from automated exports.
+ try:
+ submitter = event["pull_request"]["user"]["login"]
+ except KeyError:
+ # Just ignore excluded users if the
+ # username cannot be pulled from the event.
+ logger.debug("Unable to read username from event. Continuing.")
+ return
+
+ excluded_tasks = []
+ # A separate list of items for tasks is needed to iterate over
+ # because removing an item during iteration will raise an error.
+ for name, task in list(tasks.items()):
+ if submitter in task.get("exclude-users", []):
+ excluded_tasks.append(name)
+ tasks.pop(name) # removing excluded task
+ if excluded_tasks:
+ logger.info(
+ f"Tasks excluded for user {submitter}:\n * " +
+ "\n * ".join(excluded_tasks)
+ )
+
+
+def filter_schedule_if(event, tasks):
+ scheduled = OrderedDict()
+ run_jobs = None
+ for name, task in tasks.items():
+ if "schedule-if" in task:
+ if "run-job" in task["schedule-if"]:
+ if run_jobs is None:
+ run_jobs = get_run_jobs(event)
+ if "all" in run_jobs or any(item in run_jobs for item in task["schedule-if"]["run-job"]):
+ scheduled[name] = task
+ else:
+ scheduled[name] = task
+ logger.info("Scheduling rules match tasks:\n * %s" % "\n * ".join(scheduled.keys()))
+ return scheduled
+
+
+def get_fetch_rev(event):
+ is_pr, _ = get_triggers(event)
+ if is_pr:
+ # Try to get the actual rev so that all non-decision tasks are pinned to that
+ rv = ["refs/pull/%s/merge" % event["pull_request"]["number"]]
+ # For every PR GitHub maintains a 'head' branch with commits from the
+ # PR, and a 'merge' branch containing a merge commit between the base
+ # branch and the PR.
+ for ref_type in ["head", "merge"]:
+ ref = "refs/pull/%s/%s" % (event["pull_request"]["number"], ref_type)
+ sha = None
+ try:
+ output = subprocess.check_output(["git", "ls-remote", "origin", ref])
+ except subprocess.CalledProcessError:
+ import traceback
+ logger.error(traceback.format_exc())
+ logger.error("Failed to get commit sha1 for %s" % ref)
+ else:
+ if not output:
+ logger.error("Failed to get commit for %s" % ref)
+ else:
+ sha = output.decode("utf-8").split()[0]
+ rv.append(sha)
+ rv = tuple(rv)
+ else:
+ # For a branch push we have a ref and a head but no merge SHA
+ rv = (event["ref"], event["after"], None)
+ assert len(rv) == 3
+ return rv
+
+
+def build_full_command(event, task):
+ fetch_ref, head_sha, merge_sha = get_fetch_rev(event)
+ cmd_args = {
+ "task_name": task["name"],
+ "repo_url": event["repository"]["clone_url"],
+ "fetch_ref": fetch_ref,
+ "task_cmd": task["command"],
+ "install_str": "",
+ }
+
+ options = task.get("options", {})
+ options_args = []
+ options_args.append("--ref=%s" % fetch_ref)
+ if head_sha is not None:
+ options_args.append("--head-rev=%s" % head_sha)
+ if merge_sha is not None:
+ options_args.append("--merge-rev=%s" % merge_sha)
+ if options.get("oom-killer"):
+ options_args.append("--oom-killer")
+ if options.get("xvfb"):
+ options_args.append("--xvfb")
+ if not options.get("hosts"):
+ options_args.append("--no-hosts")
+ else:
+ options_args.append("--hosts")
+ # Check out the expected SHA unless it is overridden (e.g. to base_head).
+ if options.get("checkout"):
+ options_args.append("--checkout=%s" % options["checkout"])
+ for browser in options.get("browser", []):
+ options_args.append("--browser=%s" % browser)
+ if options.get("channel"):
+ options_args.append("--channel=%s" % options["channel"])
+ if options.get("install-certificates"):
+ options_args.append("--install-certificates")
+
+ cmd_args["options_str"] = " ".join(str(item) for item in options_args)
+
+ install_packages = task.get("install")
+ if install_packages:
+ install_items = ["apt update -qqy"]
+ install_items.extend("apt install -qqy %s" % item
+ for item in install_packages)
+ cmd_args["install_str"] = "\n".join("sudo %s;" % item for item in install_items)
+
+ return ["/bin/bash",
+ "--login",
+ "-xc",
+ """
+~/start.sh \
+ %(repo_url)s \
+ %(fetch_ref)s;
+%(install_str)s
+cd web-platform-tests;
+./tools/ci/run_tc.py %(options_str)s -- %(task_cmd)s;
+""" % cmd_args]
+
+
+def get_owner(event):
+ if "pusher" in event:
+ pusher = event.get("pusher", {}).get("email", "")
+ if pusher and "@" in pusher:
+ return pusher
+ return "web-platform-tests@users.noreply.github.com"
+
+
+def create_tc_task(event, task, taskgroup_id, depends_on_ids, env_extra=None):
+ command = build_full_command(event, task)
+ task_id = taskcluster.slugId()
+ task_data = {
+ "taskGroupId": taskgroup_id,
+ "created": taskcluster.fromNowJSON(""),
+ "deadline": taskcluster.fromNowJSON(task["deadline"]),
+ "provisionerId": task["provisionerId"],
+ "schedulerId": task["schedulerId"],
+ "workerType": task["workerType"],
+ "scopes": task.get("scopes", []),
+ "metadata": {
+ "name": task["name"],
+ "description": task.get("description", ""),
+ "owner": get_owner(event),
+ "source": event["repository"]["clone_url"]
+ },
+ "payload": {
+ "artifacts": task.get("artifacts"),
+ "command": command,
+ "image": task.get("image"),
+ "maxRunTime": task.get("maxRunTime"),
+ "env": task.get("env", {}),
+ },
+ "extra": {
+ "github_event": json.dumps(event)
+ },
+ "routes": ["checks"]
+ }
+ if "extra" in task:
+ task_data["extra"].update(task["extra"])
+ if task.get("privileged"):
+ if "capabilities" not in task_data["payload"]:
+ task_data["payload"]["capabilities"] = {}
+ task_data["payload"]["capabilities"]["privileged"] = True
+ if env_extra:
+ task_data["payload"]["env"].update(env_extra)
+ if depends_on_ids:
+ task_data["dependencies"] = depends_on_ids
+ task_data["requires"] = task.get("requires", "all-completed")
+ return task_id, task_data
+
+
+def get_artifact_data(artifact, task_id_map):
+ task_id, data = task_id_map[artifact["task"]]
+ return {
+ "task": task_id,
+ "glob": artifact["glob"],
+ "dest": artifact["dest"],
+ "extract": artifact.get("extract", False)
+ }
+
+
+def build_task_graph(event, all_tasks, tasks):
+ task_id_map = OrderedDict()
+ taskgroup_id = os.environ.get("TASK_ID", taskcluster.slugId())
+ sink_task_depends_on = []
+
+ def add_task(task_name, task):
+ depends_on_ids = []
+ if "depends-on" in task:
+ for depends_name in task["depends-on"]:
+ if depends_name not in task_id_map:
+ add_task(depends_name,
+ all_tasks[depends_name])
+ depends_on_ids.append(task_id_map[depends_name][0])
+ env_extra = {}
+ if "download-artifacts" in task:
+ env_extra["TASK_ARTIFACTS"] = json.dumps(
+ [get_artifact_data(artifact, task_id_map)
+ for artifact in task["download-artifacts"]])
+
+ task_id, task_data = create_tc_task(event, task, taskgroup_id, depends_on_ids,
+ env_extra=env_extra)
+ task_id_map[task_name] = (task_id, task_data)
+
+ # The string conversion here is because if we use variables they are
+ # converted to a string, so it's easier to use a string always
+ if str(task.get("required", "True")) != "False" and task_name != "sink-task":
+ sink_task_depends_on.append(task_id)
+
+ for task_name, task in tasks.items():
+ if task_name == "sink-task":
+ # sink-task will be created below at the end of the ordered dict,
+ # so that it can depend on all other tasks.
+ continue
+ add_task(task_name, task)
+
+ # GitHub branch protection for pull requests needs us to name explicit
+ # required tasks - which doesn't suffice when using a dynamic task graph.
+ # To work around this we declare a sink task that depends on all the other
+ # tasks completing, and checks if they have succeeded. We can then
+ # make the sink task the sole required task for pull requests.
+ sink_task = tasks.get("sink-task")
+ if sink_task:
+ logger.info("Scheduling sink-task")
+ sink_task["command"] += " {}".format(" ".join(sink_task_depends_on))
+ task_id_map["sink-task"] = create_tc_task(
+ event, sink_task, taskgroup_id, sink_task_depends_on)
+ else:
+ logger.info("sink-task is not scheduled")
+
+ return task_id_map
+
+
+def create_tasks(queue, task_id_map):
+ for (task_id, task_data) in task_id_map.values():
+ queue.createTask(task_id, task_data)
+
+
+def get_event(queue, event_path):
+ if event_path is not None:
+ try:
+ with open(event_path) as f:
+ event_str = f.read()
+ except OSError:
+ logger.error("Missing event file at path %s" % event_path)
+ raise
+ elif "TASK_EVENT" in os.environ:
+ event_str = os.environ["TASK_EVENT"]
+ else:
+ event_str = fetch_event_data(queue)
+ if not event_str:
+ raise ValueError("Can't find GitHub event definition; for local testing pass --event-path")
+ try:
+ return json.loads(event_str)
+ except ValueError:
+ logger.error("Event was not valid JSON")
+ raise
+
+
+def decide(event):
+ all_tasks = taskgraph.load_tasks_from_path(os.path.join(here, "tasks", "test.yml"))
+
+ triggered_tasks = filter_triggers(event, all_tasks)
+ scheduled_tasks = filter_schedule_if(event, triggered_tasks)
+ filter_excluded_users(scheduled_tasks, event)
+
+ logger.info("UNSCHEDULED TASKS:\n %s" % "\n ".join(sorted(set(all_tasks.keys()) -
+ set(scheduled_tasks.keys()))))
+ logger.info("SCHEDULED TASKS:\n %s" % "\n ".join(sorted(scheduled_tasks.keys())))
+
+ task_id_map = build_task_graph(event, all_tasks, scheduled_tasks)
+ return task_id_map
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--event-path",
+ help="Path to file containing serialized GitHub event")
+ parser.add_argument("--dry-run", action="store_true",
+ help="Don't actually create the tasks, just output the tasks that "
+ "would be created")
+ parser.add_argument("--tasks-path",
+ help="Path to file in which to write payload for all scheduled tasks")
+ return parser
+
+
+def run(venv, **kwargs):
+ queue = taskcluster.Queue({'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']})
+ event = get_event(queue, event_path=kwargs["event_path"])
+
+ task_id_map = decide(event)
+
+ try:
+ if not kwargs["dry_run"]:
+ create_tasks(queue, task_id_map)
+ else:
+ print(json.dumps(task_id_map, indent=2))
+ finally:
+ if kwargs["tasks_path"]:
+ with open(kwargs["tasks_path"], "w") as f:
+ json.dump(task_id_map, f, indent=2)
diff --git a/testing/web-platform/tests/tools/ci/tc/download.py b/testing/web-platform/tests/tools/ci/tc/download.py
new file mode 100644
index 0000000000..6a78935be4
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/download.py
@@ -0,0 +1,111 @@
+# mypy: allow-untyped-defs
+
+import argparse
+import os
+import logging
+
+import requests
+
+import github
+
+
+logging.basicConfig()
+logger = logging.getLogger("tc-download")
+
+# The root URL of the Taskcluster deployment from which to download wpt reports
+# (after https://bugzilla.mozilla.org/show_bug.cgi?id=1574668 lands, this will
+# be https://community-tc.services.mozilla.com)
+TASKCLUSTER_ROOT_URL = 'https://taskcluster.net'
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--ref", action="store", default="master",
+ help="Branch (in the GitHub repository) or commit to fetch logs for")
+ parser.add_argument("--artifact-name", action="store", default="wpt_report.json.gz",
+ help="Log type to fetch")
+ parser.add_argument("--repo-name", action="store", default="web-platform-tests/wpt",
+ help="GitHub repo name in the format owner/repo. "
+ "This must be the repo from which the Taskcluster run was scheduled "
+ "(for PRs this is the repo into which the PR would merge)")
+ parser.add_argument("--token-file", action="store",
+ help="File containing GitHub token")
+ parser.add_argument("--out-dir", action="store", default=".",
+ help="Path to save the logfiles")
+ return parser
+
+
+def get_json(url, key=None):
+ resp = requests.get(url)
+ resp.raise_for_status()
+ data = resp.json()
+ if key:
+ data = data[key]
+ return data
+
+
+def get(url, dest, name):
+ resp = requests.get(url)
+ resp.raise_for_status()
+ path = os.path.join(dest, name)
+ with open(path, "w") as f:
+ f.write(resp.content)
+ return path
+
+
+def run(*args, **kwargs):
+ if not os.path.exists(kwargs["out_dir"]):
+ os.mkdir(kwargs["out_dir"])
+
+ if kwargs["token_file"]:
+ with open(kwargs["token_file"]) as f:
+ gh = github.Github(f.read().strip())
+ else:
+ gh = github.Github()
+
+ repo = gh.get_repo(kwargs["repo_name"])
+ commit = repo.get_commit(kwargs["ref"])
+ statuses = commit.get_statuses()
+ taskgroups = set()
+
+ for status in statuses:
+ if not status.context.startswith("Taskcluster "):
+ continue
+ if status.state == "pending":
+ continue
+ taskgroup_id = status.target_url.rsplit("/", 1)[1]
+ taskgroups.add(taskgroup_id)
+
+ if not taskgroups:
+ logger.error("No complete Taskcluster runs found for ref %s" % kwargs["ref"])
+ return 1
+
+ for taskgroup in taskgroups:
+ if TASKCLUSTER_ROOT_URL == 'https://taskcluster.net':
+ # NOTE: this condition can be removed after November 9, 2019
+ taskgroup_url = "https://queue.taskcluster.net/v1/task-group/%s/list"
+ artifacts_list_url = "https://queue.taskcluster.net/v1/task/%s/artifacts"
+ else:
+ taskgroup_url = TASKCLUSTER_ROOT_URL + "/api/queue/v1/task-group/%s/list"
+ artifacts_list_url = TASKCLUSTER_ROOT_URL + "/api/queue/v1/task/%s/artifacts"
+ tasks = get_json(taskgroup_url % taskgroup, "tasks")
+ for task in tasks:
+ task_id = task["status"]["taskId"]
+ url = artifacts_list_url % (task_id,)
+ for artifact in get_json(url, "artifacts"):
+ if artifact["name"].endswith(kwargs["artifact_name"]):
+ filename = "%s-%s-%s" % (task["task"]["metadata"]["name"],
+ task_id,
+ kwargs["artifact_name"])
+ path = get("%s/%s" % (url, artifact["name"]), kwargs["out_dir"], filename)
+ logger.info(path)
+
+
+def main():
+ kwargs = get_parser().parse_args()
+
+ run(None, vars(kwargs))
+
+
+if __name__ == "__main__":
+ main() # type: ignore
diff --git a/testing/web-platform/tests/tools/ci/tc/github_checks_output.py b/testing/web-platform/tests/tools/ci/tc/github_checks_output.py
new file mode 100644
index 0000000000..a334d39eec
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/github_checks_output.py
@@ -0,0 +1,34 @@
+from typing import Optional, Text
+
+
+class GitHubChecksOutputter:
+ """Provides a method to output data to be shown in the GitHub Checks UI.
+
+ This can be useful to provide a summary of a given check (e.g. the lint)
+ to enable developers to quickly understand what has gone wrong. The output
+ supports markdown format.
+
+ https://docs.taskcluster.net/docs/reference/integrations/github/checks#custom-text-output-in-checks
+ """
+ def __init__(self, path: Text) -> None:
+ self.path = path
+
+ def output(self, line: Text) -> None:
+ with open(self.path, mode="a") as f:
+ f.write(line)
+ f.write("\n")
+
+
+__outputter = None
+
+
+def get_gh_checks_outputter(filepath: Optional[Text]) -> Optional[GitHubChecksOutputter]:
+ """Return the outputter for GitHub Checks output, if enabled.
+
+ :param filepath: The filepath to write GitHub Check output information to,
+ or None if not enabled.
+ """
+ global __outputter
+ if filepath and __outputter is None:
+ __outputter = GitHubChecksOutputter(filepath)
+ return __outputter
diff --git a/testing/web-platform/tests/tools/ci/tc/sink_task.py b/testing/web-platform/tests/tools/ci/tc/sink_task.py
new file mode 100644
index 0000000000..ec3d5a47ca
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/sink_task.py
@@ -0,0 +1,65 @@
+# mypy: allow-untyped-defs
+
+import argparse
+import logging
+import os
+
+import taskcluster
+
+from .github_checks_output import get_gh_checks_outputter
+
+
+logging.basicConfig()
+logger = logging.getLogger()
+
+
+def check_task_statuses(task_ids, github_checks_outputter):
+ """Verifies whether a set of Taskcluster tasks completed successfully or not.
+
+ Returns 0 if all tasks passed completed successfully, 1 otherwise."""
+
+ queue = taskcluster.Queue({'rootUrl': os.environ['TASKCLUSTER_ROOT_URL']})
+ failed_tasks = []
+ for task in task_ids:
+ status = queue.status(task)
+ state = status['status']['state']
+ if state == 'failed' or state == 'exception':
+ logger.error(f'Task {task} failed with state "{state}"')
+ failed_tasks.append(status)
+ elif state != 'completed':
+ logger.error(f'Task {task} had unexpected state "{state}"')
+ failed_tasks.append(status)
+
+ if failed_tasks and github_checks_outputter:
+ github_checks_outputter.output('Failed tasks:')
+ for task in failed_tasks:
+ # We need to make an additional call to get the task name.
+ task_id = task['status']['taskId']
+ task_name = queue.task(task_id)['metadata']['name']
+ github_checks_outputter.output('* `{}` failed with status `{}`'.format(task_name, task['status']['state']))
+ else:
+ logger.info('All tasks completed successfully')
+ if github_checks_outputter:
+ github_checks_outputter.output('All tasks completed successfully')
+ return 1 if failed_tasks else 0
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--github-checks-text-file", type=str,
+ help="Path to GitHub checks output file for Taskcluster runs")
+ parser.add_argument("tasks", nargs="+",
+ help="A set of Taskcluster task ids to verify the state of.")
+ return parser
+
+
+def run(venv, **kwargs):
+ github_checks_outputter = get_gh_checks_outputter(kwargs["github_checks_text_file"])
+
+ if github_checks_outputter:
+ github_checks_outputter.output(
+ "This check acts as a 'sink' for all other Taskcluster-based checks. "
+ "A failure here means that some other check has failed, which is the "
+ "real blocker.\n"
+ )
+ return check_task_statuses(kwargs['tasks'], github_checks_outputter)
diff --git a/testing/web-platform/tests/tools/ci/tc/taskgraph.py b/testing/web-platform/tests/tools/ci/tc/taskgraph.py
new file mode 100644
index 0000000000..54542ef0f8
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/taskgraph.py
@@ -0,0 +1,175 @@
+# mypy: allow-untyped-defs
+
+import json
+import os
+import re
+from collections import OrderedDict
+from copy import deepcopy
+
+import yaml
+
+here = os.path.dirname(__file__)
+
+
+def first(iterable):
+ # First item from a list or iterator
+ if not hasattr(iterable, "next"):
+ if hasattr(iterable, "__iter__"):
+ iterable = iter(iterable)
+ else:
+ raise ValueError("Object isn't iterable")
+ return next(iterable)
+
+
+def load_task_file(path):
+ with open(path) as f:
+ return yaml.safe_load(f)
+
+
+def update_recursive(data, update_data):
+ for key, value in update_data.items():
+ if key not in data:
+ data[key] = value
+ else:
+ initial_value = data[key]
+ if isinstance(value, dict):
+ if not isinstance(initial_value, dict):
+ raise ValueError("Variable %s has inconsistent types "
+ "(expected object)" % key)
+ update_recursive(initial_value, value)
+ elif isinstance(value, list):
+ if not isinstance(initial_value, list):
+ raise ValueError("Variable %s has inconsistent types "
+ "(expected list)" % key)
+ initial_value.extend(value)
+ else:
+ data[key] = value
+
+
+def resolve_use(task_data, templates):
+ rv = {}
+ if "use" in task_data:
+ for template_name in task_data["use"]:
+ update_recursive(rv, deepcopy(templates[template_name]))
+ update_recursive(rv, task_data)
+ rv.pop("use", None)
+ return rv
+
+
+def resolve_name(task_data, default_name):
+ if "name" not in task_data:
+ task_data["name"] = default_name
+ return task_data
+
+
+def resolve_chunks(task_data):
+ if "chunks" not in task_data:
+ return [task_data]
+ rv = []
+ total_chunks = task_data["chunks"]
+ if "chunks-override" in task_data:
+ override = task_data["chunks-override"].get(task_data["vars"]["test-type"])
+ if override is not None:
+ total_chunks = override
+ for i in range(1, total_chunks + 1):
+ chunk_data = deepcopy(task_data)
+ chunk_data["chunks"] = {"id": i,
+ "total": total_chunks}
+ rv.append(chunk_data)
+ return rv
+
+
+def replace_vars(input_string, variables):
+ # TODO: support replacing as a non-string type?
+ variable_re = re.compile(r"(?-
+ ./tools/ci/taskcluster-run.py
+ ${vars.browser}
+ ${vars.channel}
+ --
+ --channel=${vars.channel}
+ --log-wptreport=../artifacts/wpt_report.json
+ --log-wptscreenshot=../artifacts/wpt_screenshot.txt
+ --no-fail-on-unexpected
+ --this-chunk=${chunks.id}
+ --total-chunks=${chunks.total}
+ --test-type=${vars.suite}
+
+ trigger-master:
+ trigger:
+ branch:
+ - master
+
+ trigger-push:
+ trigger:
+ branch:
+ - triggers/${vars.browser}_${vars.channel}
+
+ trigger-daily:
+ trigger:
+ branch:
+ - epochs/daily
+
+ trigger-weekly:
+ trigger:
+ branch:
+ - epochs/weekly
+
+ trigger-pr:
+ trigger:
+ pull-request:
+
+ browser-firefox:
+ depends-on:
+ - download-firefox-${vars.channel}
+ download-artifacts:
+ - task: download-firefox-${vars.channel}
+ glob: public/results/firefox-${vars.channel}.*
+ dest: build/
+ extract: true
+
+ browser-webkitgtk_minibrowser: {}
+
+ browser-chrome: {}
+
+ browser-chromium: {}
+
+ browser-servo: {}
+
+ browser-firefox_android:
+ privileged: true
+ scopes:
+ - "docker-worker:capability:privileged"
+ chunks-override:
+ testharness: 24
+
+ tox-python3_7:
+ env:
+ TOXENV: py37
+ PY_COLORS: "0"
+ install:
+ - python3.7
+ - python3.7-distutils
+ - python3.7-dev
+ - python3.7-venv
+
+ tox-python3_11:
+ env:
+ TOXENV: py311
+ PY_COLORS: "0"
+ install:
+ - python3.11
+ - python3.11-distutils
+ - python3.11-dev
+ - python3.11-venv
+ tests-affected:
+ options:
+ browser:
+ - ${vars.browser}
+ channel: ${vars.channel}
+ schedule-if:
+ run-job:
+ - affected_tests
+
+tasks:
+ # The scheduling order of tasks is NOT determined by the order in which they
+ # are defined, but by their dependencies (depends-on).
+
+ # Run full suites on push
+ - $map:
+ for:
+ - vars:
+ suite: testharness
+ - vars:
+ suite: reftest
+ - vars:
+ suite: wdspec
+ - vars:
+ suite: crashtest
+ do:
+ $map:
+ for:
+ - vars:
+ browser: firefox
+ channel: nightly
+ use:
+ - trigger-master
+ - trigger-push
+ - vars:
+ browser: firefox
+ channel: beta
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: firefox
+ channel: stable
+ use:
+ - trigger-daily
+ - trigger-push
+ - vars:
+ # Chromium ToT
+ browser: chromium
+ channel: nightly
+ use:
+ - trigger-daily
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: canary
+ use:
+ - trigger-master
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: dev
+ use:
+ - trigger-master
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: beta
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: stable
+ use:
+ - trigger-daily
+ - trigger-push
+ - vars:
+ browser: webkitgtk_minibrowser
+ channel: nightly
+ use:
+ - trigger-daily
+ - trigger-push
+ - vars:
+ browser: webkitgtk_minibrowser
+ channel: stable
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: webkitgtk_minibrowser
+ channel: beta
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: servo
+ channel: nightly
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: firefox_android
+ channel: nightly
+ use:
+ - trigger-daily
+ - trigger-push
+ do:
+ - ${vars.browser}-${vars.channel}-${vars.suite}:
+ use:
+ - wpt-base
+ - run-options
+ - wpt-run
+ - browser-${vars.browser}
+ - wpt-${vars.suite}
+ description: >-
+ A subset of WPT's "${vars.suite}" tests (chunk number ${chunks.id}
+ of ${chunks.total}), run in the ${vars.channel} release of
+ ${vars.browser}.
+
+ # print-reftest are currently only supported by Chrome and Firefox.
+ - $map:
+ for:
+ - vars:
+ suite: print-reftest
+ do:
+ $map:
+ for:
+ - vars:
+ browser: firefox
+ channel: nightly
+ use:
+ - trigger-master
+ - trigger-push
+ - vars:
+ browser: firefox
+ channel: beta
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: firefox
+ channel: stable
+ use:
+ - trigger-daily
+ - trigger-push
+ - vars:
+ # Chromium ToT
+ browser: chromium
+ channel: nightly
+ use:
+ - trigger-daily
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: canary
+ use:
+ - trigger-master
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: dev
+ use:
+ - trigger-master
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: beta
+ use:
+ - trigger-weekly
+ - trigger-push
+ - vars:
+ browser: chrome
+ channel: stable
+ use:
+ - trigger-daily
+ - trigger-push
+ do:
+ - ${vars.browser}-${vars.channel}-${vars.suite}:
+ use:
+ - wpt-base
+ - run-options
+ - wpt-run
+ - browser-${vars.browser}
+ - wpt-${vars.suite}
+ description: >-
+ A subset of WPT's "${vars.suite}" tests (chunk number ${chunks.id}
+ of ${chunks.total}), run in the ${vars.channel} release of
+ ${vars.browser}.
+
+ - $map:
+ for:
+ - vars:
+ browser: firefox
+ channel: nightly
+ stability-exclude-users:
+ - moz-wptsync-bot
+ required: true
+ - vars:
+ browser: chrome
+ channel: dev
+ stability-exclude-users:
+ - chromium-wpt-export-bot
+ required: false
+ do:
+ - wpt-${vars.browser}-${vars.channel}-stability:
+ use:
+ - wpt-base
+ - run-options
+ - browser-${vars.browser}
+ - trigger-pr
+ - tests-affected
+ description: >-
+ Verify that all tests affected by a pull request are stable
+ when executed in ${vars.browser}.
+ command: >-
+ ./tools/ci/taskcluster-run.py
+ --commit-range base_head
+ ${vars.browser}
+ ${vars.channel}
+ --
+ --channel=${vars.channel}
+ --verify
+ --verify-no-chaos-mode
+ --verify-repeat-loop=0
+ --verify-repeat-restart=10
+ --github-checks-text-file="/home/test/artifacts/checkrun.md"
+ exclude-users: ${vars.stability-exclude-users}
+ required: ${vars.required}
+
+ - wpt-${vars.browser}-${vars.channel}-results:
+ use:
+ - wpt-base
+ - run-options
+ - browser-${vars.browser}
+ - trigger-pr
+ - tests-affected
+ description: >-
+ Collect results for all tests affected by a pull request in
+ ${vars.browser}.
+ command: >-
+ ./tools/ci/taskcluster-run.py
+ --commit-range base_head
+ ${vars.browser}
+ ${vars.channel}
+ --
+ --channel=${vars.channel}
+ --no-fail-on-unexpected
+ --log-wptreport=../artifacts/wpt_report.json
+ --log-wptscreenshot=../artifacts/wpt_screenshot.txt
+
+ - wpt-${vars.browser}-${vars.channel}-results-without-changes:
+ use:
+ - wpt-base
+ - run-options
+ - browser-${vars.browser}
+ - trigger-pr
+ - tests-affected
+ options:
+ checkout: base_head
+ description: >-
+ Collect results for all tests affected by a pull request in
+ ${vars.browser} but without the changes in the PR.
+ command: >-
+ ./tools/ci/taskcluster-run.py
+ --commit-range task_head
+ ${vars.browser}
+ ${vars.channel}
+ --
+ --channel=${vars.channel}
+ --no-fail-on-unexpected
+ --log-wptreport=../artifacts/wpt_report.json
+ --log-wptscreenshot=../artifacts/wpt_screenshot.txt
+ - $map:
+ for:
+ - vars:
+ channel: nightly
+ - vars:
+ channel: beta
+ - vars:
+ channel: stable
+ do:
+ download-firefox-${vars.channel}:
+ use:
+ - wpt-base
+ command: "./wpt install --download-only --destination /home/test/artifacts/ --channel=${vars.channel} --rename=firefox-${vars.channel} firefox browser"
+
+ - lint:
+ use:
+ - wpt-base
+ - trigger-master
+ - trigger-pr
+ description: >-
+ Lint for wpt-specific requirements
+ command: "./wpt lint --all --github-checks-text-file=/home/test/artifacts/checkrun.md"
+
+ - update-built:
+ use:
+ - wpt-base
+ - trigger-pr
+ schedule-if:
+ run-job:
+ - update_built
+ command: "./tools/ci/ci_built_diff.sh"
+
+ - tools/ unittests (Python 3.7):
+ description: >-
+ Unit tests for tools running under Python 3.7, excluding wptrunner
+ use:
+ - wpt-base
+ - trigger-pr
+ - tox-python3_7
+ command: ./tools/ci/ci_tools_unittest.sh
+ env:
+ HYPOTHESIS_PROFILE: ci
+ schedule-if:
+ run-job:
+ - tools_unittest
+
+ - tools/ unittests (Python 3.11):
+ description: >-
+ Unit tests for tools running under Python 3.11, excluding wptrunner
+ use:
+ - wpt-base
+ - trigger-pr
+ - tox-python3_11
+ command: ./tools/ci/ci_tools_unittest.sh
+ env:
+ HYPOTHESIS_PROFILE: ci
+ schedule-if:
+ run-job:
+ - tools_unittest
+
+ - tools/ integration tests (Python 3.7):
+ description: >-
+ Integration tests for tools running under Python 3.7
+ use:
+ - wpt-base
+ - trigger-pr
+ - tox-python3_7
+ command: ./tools/ci/ci_tools_integration_test.sh
+ install:
+ - libnss3-tools
+ options:
+ oom-killer: true
+ browser:
+ - firefox
+ - chrome
+ channel: experimental
+ xvfb: true
+ hosts: true
+ schedule-if:
+ run-job:
+ - wpt_integration
+
+ - tools/ integration tests (Python 3.11):
+ description: >-
+ Integration tests for tools running under Python 3.11
+ use:
+ - wpt-base
+ - trigger-pr
+ - tox-python3_11
+ command: ./tools/ci/ci_tools_integration_test.sh
+ install:
+ - libnss3-tools
+ options:
+ oom-killer: true
+ browser:
+ - firefox
+ - chrome
+ channel: experimental
+ xvfb: true
+ hosts: true
+ schedule-if:
+ run-job:
+ - wpt_integration
+
+ - resources/ tests (Python 3.7):
+ description: >-
+ Tests for testharness.js and other files in resources/ under Python 3.7
+ use:
+ - wpt-base
+ - trigger-pr
+ - tox-python3_7
+ command: ./tools/ci/ci_resources_unittest.sh
+ install:
+ - libnss3-tools
+ options:
+ browser:
+ - firefox
+ xvfb: true
+ hosts: true
+ schedule-if:
+ run-job:
+ - resources_unittest
+
+ - resources/ tests (Python 3.11):
+ description: >-
+ Tests for testharness.js and other files in resources/ under Python 3.11
+ use:
+ - wpt-base
+ - trigger-pr
+ - tox-python3_11
+ command: ./tools/ci/ci_resources_unittest.sh
+ install:
+ - libnss3-tools
+ options:
+ browser:
+ - firefox
+ xvfb: true
+ hosts: true
+ schedule-if:
+ run-job:
+ - resources_unittest
+
+ - infrastructure/ tests:
+ description: >-
+ Smoketests for wptrunner
+ vars:
+ channel: nightly
+ use:
+ - wpt-base
+ - trigger-pr
+ - browser-firefox
+ - browser-firefox_android
+ command: ./tools/ci/ci_wptrunner_infrastructure.sh
+ install:
+ - python3-pip
+ - libnss3-tools
+ - libappindicator1
+ - fonts-liberation
+ options:
+ oom-killer: true
+ browser:
+ - firefox
+ - chrome
+ - firefox_android
+ channel: experimental
+ xvfb: true
+ hosts: false
+ schedule-if:
+ run-job:
+ - wptrunner_infrastructure
+
+ # Note: even though sink-task does not have `depends-on`, it depends on all
+ # other tasks (dynamically added by tools/ci/tc/decision.py).
+ - sink-task:
+ description: >-
+ Sink task for all other tasks; indicates success
+ use:
+ - wpt-base
+ - trigger-pr
+ command: "./wpt tc-sink-task --github-checks-text-file=/home/test/artifacts/checkrun.md"
+ requires: all-resolved
diff --git a/testing/web-platform/tests/tools/ci/tc/testdata/epochs_daily_push_event.json b/testing/web-platform/tests/tools/ci/tc/testdata/epochs_daily_push_event.json
new file mode 100644
index 0000000000..0f74c315d2
--- /dev/null
+++ b/testing/web-platform/tests/tools/ci/tc/testdata/epochs_daily_push_event.json
@@ -0,0 +1,460 @@
+{
+ "ref": "refs/heads/epochs/daily",
+ "before": "20bb1ca5db519ee5d37ece6492868f8a6b65a2e7",
+ "after": "5df56b25e1cb81f81fe16c88be839f9fd538b41e",
+ "repository": {
+ "id": 3618133,
+ "node_id": "MDEwOlJlcG9zaXRvcnkzNjE4MTMz",
+ "name": "wpt",
+ "full_name": "web-platform-tests/wpt",
+ "private": false,
+ "owner": {
+ "name": "web-platform-tests",
+ "email": null,
+ "login": "web-platform-tests",
+ "id": 37226233,
+ "node_id": "MDEyOk9yZ2FuaXphdGlvbjM3MjI2MjMz",
+ "avatar_url": "https://avatars0.githubusercontent.com/u/37226233?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/web-platform-tests",
+ "html_url": "https://github.com/web-platform-tests",
+ "followers_url": "https://api.github.com/users/web-platform-tests/followers",
+ "following_url": "https://api.github.com/users/web-platform-tests/following{/other_user}",
+ "gists_url": "https://api.github.com/users/web-platform-tests/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/web-platform-tests/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/web-platform-tests/subscriptions",
+ "organizations_url": "https://api.github.com/users/web-platform-tests/orgs",
+ "repos_url": "https://api.github.com/users/web-platform-tests/repos",
+ "events_url": "https://api.github.com/users/web-platform-tests/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/web-platform-tests/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "html_url": "https://github.com/web-platform-tests/wpt",
+ "description": "Test suites for Web platform specs — including WHATWG, W3C, and others",
+ "fork": false,
+ "url": "https://github.com/web-platform-tests/wpt",
+ "forks_url": "https://api.github.com/repos/web-platform-tests/wpt/forks",
+ "keys_url": "https://api.github.com/repos/web-platform-tests/wpt/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/web-platform-tests/wpt/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/web-platform-tests/wpt/teams",
+ "hooks_url": "https://api.github.com/repos/web-platform-tests/wpt/hooks",
+ "issue_events_url": "https://api.github.com/repos/web-platform-tests/wpt/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/web-platform-tests/wpt/events",
+ "assignees_url": "https://api.github.com/repos/web-platform-tests/wpt/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/web-platform-tests/wpt/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/web-platform-tests/wpt/tags",
+ "blobs_url": "https://api.github.com/repos/web-platform-tests/wpt/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/web-platform-tests/wpt/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/web-platform-tests/wpt/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/web-platform-tests/wpt/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/web-platform-tests/wpt/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/web-platform-tests/wpt/languages",
+ "stargazers_url": "https://api.github.com/repos/web-platform-tests/wpt/stargazers",
+ "contributors_url": "https://api.github.com/repos/web-platform-tests/wpt/contributors",
+ "subscribers_url": "https://api.github.com/repos/web-platform-tests/wpt/subscribers",
+ "subscription_url": "https://api.github.com/repos/web-platform-tests/wpt/subscription",
+ "commits_url": "https://api.github.com/repos/web-platform-tests/wpt/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/web-platform-tests/wpt/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/web-platform-tests/wpt/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/web-platform-tests/wpt/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/web-platform-tests/wpt/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/web-platform-tests/wpt/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/web-platform-tests/wpt/merges",
+ "archive_url": "https://api.github.com/repos/web-platform-tests/wpt/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/web-platform-tests/wpt/downloads",
+ "issues_url": "https://api.github.com/repos/web-platform-tests/wpt/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/web-platform-tests/wpt/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/web-platform-tests/wpt/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/web-platform-tests/wpt/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/web-platform-tests/wpt/labels{/name}",
+ "releases_url": "https://api.github.com/repos/web-platform-tests/wpt/releases{/id}",
+ "deployments_url": "https://api.github.com/repos/web-platform-tests/wpt/deployments",
+ "created_at": 1330865891,
+ "updated_at": "2019-11-30T21:34:30Z",
+ "pushed_at": 1575160610,
+ "git_url": "git://github.com/web-platform-tests/wpt.git",
+ "ssh_url": "git@github.com:web-platform-tests/wpt.git",
+ "clone_url": "https://github.com/web-platform-tests/wpt.git",
+ "svn_url": "https://github.com/web-platform-tests/wpt",
+ "homepage": "https://web-platform-tests.org/",
+ "size": 329465,
+ "stargazers_count": 2543,
+ "watchers_count": 2543,
+ "language": "HTML",
+ "has_issues": true,
+ "has_projects": true,
+ "has_downloads": true,
+ "has_wiki": true,
+ "has_pages": true,
+ "forks_count": 1838,
+ "mirror_url": null,
+ "archived": false,
+ "disabled": false,
+ "open_issues_count": 1590,
+ "license": {
+ "key": "other",
+ "name": "Other",
+ "spdx_id": "NOASSERTION",
+ "url": null,
+ "node_id": "MDc6TGljZW5zZTA="
+ },
+ "forks": 1838,
+ "open_issues": 1590,
+ "watchers": 2543,
+ "default_branch": "master",
+ "stargazers": 2543,
+ "master_branch": "master",
+ "organization": "web-platform-tests"
+ },
+ "pusher": {
+ "name": "github-actions[bot]",
+ "email": null
+ },
+ "organization": {
+ "login": "web-platform-tests",
+ "id": 37226233,
+ "node_id": "MDEyOk9yZ2FuaXphdGlvbjM3MjI2MjMz",
+ "url": "https://api.github.com/orgs/web-platform-tests",
+ "repos_url": "https://api.github.com/orgs/web-platform-tests/repos",
+ "events_url": "https://api.github.com/orgs/web-platform-tests/events",
+ "hooks_url": "https://api.github.com/orgs/web-platform-tests/hooks",
+ "issues_url": "https://api.github.com/orgs/web-platform-tests/issues",
+ "members_url": "https://api.github.com/orgs/web-platform-tests/members{/member}",
+ "public_members_url": "https://api.github.com/orgs/web-platform-tests/public_members{/member}",
+ "avatar_url": "https://avatars0.githubusercontent.com/u/37226233?v=4",
+ "description": ""
+ },
+ "sender": {
+ "login": "github-actions[bot]",
+ "id": 41898282,
+ "node_id": "MDM6Qm90NDE4OTgyODI=",
+ "avatar_url": "https://avatars2.githubusercontent.com/in/15368?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/github-actions%5Bbot%5D",
+ "html_url": "https://github.com/apps/github-actions",
+ "followers_url": "https://api.github.com/users/github-actions%5Bbot%5D/followers",
+ "following_url": "https://api.github.com/users/github-actions%5Bbot%5D/following{/other_user}",
+ "gists_url": "https://api.github.com/users/github-actions%5Bbot%5D/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/github-actions%5Bbot%5D/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/github-actions%5Bbot%5D/subscriptions",
+ "organizations_url": "https://api.github.com/users/github-actions%5Bbot%5D/orgs",
+ "repos_url": "https://api.github.com/users/github-actions%5Bbot%5D/repos",
+ "events_url": "https://api.github.com/users/github-actions%5Bbot%5D/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/github-actions%5Bbot%5D/received_events",
+ "type": "Bot",
+ "site_admin": false
+ },
+ "created": false,
+ "deleted": false,
+ "forced": false,
+ "base_ref": "refs/heads/epochs/six_hourly",
+ "compare": "https://github.com/web-platform-tests/wpt/compare/20bb1ca5db51...5df56b25e1cb",
+ "commits": [
+ {
+ "id": "3503c50a6452e153bde906a9c6644cb6237224fc",
+ "tree_id": "b735fa0ae88ebe0abd6764a1afd63aea815ac18e",
+ "distinct": false,
+ "message": "[LayoutNG] Pixel-snap column rules.\n\nBug: 829028\nChange-Id: I252901109502256f14bc68e64d4303006db50a13\nReviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1944350\nCommit-Queue: Xianzhu Wang \nReviewed-by: Xianzhu Wang \nCr-Commit-Position: refs/heads/master@{#720302}",
+ "timestamp": "2019-11-29T16:25:44-08:00",
+ "url": "https://github.com/web-platform-tests/wpt/commit/3503c50a6452e153bde906a9c6644cb6237224fc",
+ "author": {
+ "name": "Morten Stenshorne",
+ "email": "mstensho@chromium.org",
+ "username": "mstensho"
+ },
+ "committer": {
+ "name": "Blink WPT Bot",
+ "email": "blink-w3c-test-autoroller@chromium.org",
+ "username": "chromium-wpt-export-bot"
+ },
+ "added": [
+ "css/css-multicol/equal-gap-and-rule.html"
+ ],
+ "removed": [
+
+ ],
+ "modified": [
+
+ ]
+ },
+ {
+ "id": "561b765308e6d188618f3ba73091bb598d8357ce",
+ "tree_id": "775ac4481c03e020819910d03019f0ec93def868",
+ "distinct": false,
+ "message": "Fix parser mXSS sanitizer bypass for and
within foreign context\n\nPrior to this CL, the following code:\n
\nparsed to this innerHTML: \n\nThis is in contrast to this code:\n \nwhich parses to \n\nThe fact that the
is left inside the