summaryrefslogtreecommitdiffstats
path: root/python/mozperftest/mozperftest/metrics
diff options
context:
space:
mode:
Diffstat (limited to 'python/mozperftest/mozperftest/metrics')
-rw-r--r--python/mozperftest/mozperftest/metrics/__init__.py23
-rw-r--r--python/mozperftest/mozperftest/metrics/common.py356
-rw-r--r--python/mozperftest/mozperftest/metrics/consoleoutput.py59
-rw-r--r--python/mozperftest/mozperftest/metrics/exceptions.py53
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/__init__.py7
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/constant.py31
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/notebook-sections/compare85
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/notebook-sections/header12
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/notebook-sections/scatterplot15
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/perftestetl.py167
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/perftestnotebook.py79
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/template_upload_file.html39
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/transformer.py228
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/transforms/__init__.py0
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/transforms/logcattime.py121
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/transforms/single_json.py56
-rw-r--r--python/mozperftest/mozperftest/metrics/notebook/utilities.py63
-rw-r--r--python/mozperftest/mozperftest/metrics/notebookupload.py115
-rw-r--r--python/mozperftest/mozperftest/metrics/perfboard/__init__.py3
-rw-r--r--python/mozperftest/mozperftest/metrics/perfboard/dashboard.json56
-rw-r--r--python/mozperftest/mozperftest/metrics/perfboard/grafana.py87
-rw-r--r--python/mozperftest/mozperftest/metrics/perfboard/influx.py188
-rw-r--r--python/mozperftest/mozperftest/metrics/perfboard/panel.json81
-rw-r--r--python/mozperftest/mozperftest/metrics/perfboard/target.json20
-rw-r--r--python/mozperftest/mozperftest/metrics/perfherder.py374
-rw-r--r--python/mozperftest/mozperftest/metrics/utils.py149
-rw-r--r--python/mozperftest/mozperftest/metrics/visualmetrics.py221
27 files changed, 2688 insertions, 0 deletions
diff --git a/python/mozperftest/mozperftest/metrics/__init__.py b/python/mozperftest/mozperftest/metrics/__init__.py
new file mode 100644
index 0000000000..1ca5f7e408
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/__init__.py
@@ -0,0 +1,23 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from mozperftest.layers import Layers
+from mozperftest.metrics.consoleoutput import ConsoleOutput
+from mozperftest.metrics.notebookupload import Notebook
+from mozperftest.metrics.perfboard.influx import Influx
+from mozperftest.metrics.perfherder import Perfherder
+from mozperftest.metrics.visualmetrics import VisualMetrics
+
+
+def get_layers():
+ return VisualMetrics, Perfherder, ConsoleOutput, Notebook, Influx
+
+
+def pick_metrics(env, flavor, mach_cmd):
+ if flavor in ("desktop-browser", "mobile-browser"):
+ layers = get_layers()
+ else:
+ # we don't need VisualMetrics for xpcshell
+ layers = Perfherder, ConsoleOutput, Notebook, Influx
+
+ return Layers(env, mach_cmd, layers)
diff --git a/python/mozperftest/mozperftest/metrics/common.py b/python/mozperftest/mozperftest/metrics/common.py
new file mode 100644
index 0000000000..3598cd378a
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/common.py
@@ -0,0 +1,356 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from collections import defaultdict
+from pathlib import Path
+
+from mozperftest.metrics.exceptions import (
+ MetricsMissingResultsError,
+ MetricsMultipleTransformsError,
+)
+from mozperftest.metrics.notebook import PerftestETL
+from mozperftest.metrics.utils import metric_fields, validate_intermediate_results
+
+COMMON_ARGS = {
+ "metrics": {
+ "type": metric_fields,
+ "nargs": "*",
+ "default": [],
+ "help": "The metrics that should be retrieved from the data.",
+ },
+ "prefix": {"type": str, "default": "", "help": "Prefix used by the output files."},
+ "split-by": {
+ "type": str,
+ "default": None,
+ "help": "A metric name to use for splitting the data. For instance, "
+ "using browserScripts.pageinfo.url will split the data by the unique "
+ "URLs that are found.",
+ },
+ "simplify-names": {
+ "action": "store_true",
+ "default": False,
+ "help": "If set, metric names will be simplified to a single word. The PerftestETL "
+ "combines dictionary keys by `.`, and the final key contains that value of the data. "
+ "That final key becomes the new name of the metric.",
+ },
+ "simplify-exclude": {
+ "nargs": "*",
+ "default": ["statistics"],
+ "help": "When renaming/simplifying metric names, entries with these strings "
+ "will be ignored and won't get simplified. These options are only used when "
+ "--simplify-names is set.",
+ },
+ "transformer": {
+ "type": str,
+ "default": None,
+ "help": "The path to the file containing the custom transformer, "
+ "or the module to import along with the class name, "
+ "e.g. mozperftest.test.xpcshell:XpcShellTransformer",
+ },
+}
+
+
+class MetricsStorage(object):
+ """Holds data that is commonly used across all metrics layers.
+
+ An instance of this class represents data for a given and output
+ path and prefix.
+ """
+
+ def __init__(self, output_path, prefix, logger):
+ self.prefix = prefix
+ self.output_path = output_path
+ self.stddata = {}
+ self.ptnb_config = {}
+ self.results = []
+ self.logger = logger
+
+ p = Path(output_path)
+ p.mkdir(parents=True, exist_ok=True)
+
+ def _parse_results(self, results):
+ if isinstance(results, dict):
+ return [results]
+ res = []
+ # XXX we need to embrace pathlib everywhere.
+ if isinstance(results, (str, Path)):
+ # Expecting a single path or a directory
+ p = Path(results)
+ if not p.exists():
+ self.logger.warning("Given path does not exist: {}".format(results))
+ elif p.is_dir():
+ files = [f for f in p.glob("**/*.json") if not f.is_dir()]
+ res.extend(self._parse_results(files))
+ else:
+ res.append(p.as_posix())
+ if isinstance(results, list):
+ # Expecting a list of paths
+ for path in results:
+ res.extend(self._parse_results(path))
+ return res
+
+ def set_results(self, results):
+ """Processes and sets results provided by the metadata.
+
+ `results` can be a path to a file or a directory. Every
+ file is scanned and we build a list. Alternatively, it
+ can be a mapping containing the results, in that case
+ we just use it direcly, but keep it in a list.
+
+ :param results list/dict/str: Path, or list of paths to the data
+ (or the data itself in a dict) of the data to be processed.
+ """
+ # Parse the results into files (for now) and the settings
+ self.results = defaultdict(lambda: defaultdict(list))
+ self.settings = defaultdict(dict)
+ for res in results:
+ # Ensure that the results are valid before continuing
+ validate_intermediate_results(res)
+
+ name = res["name"]
+ if isinstance(res["results"], dict):
+ # XXX Implement subtest based parsing
+ raise NotImplementedError(
+ "Subtest-based processing is not implemented yet"
+ )
+
+ # Merge all entries with the same name into one
+ # result, if separation is needed use unique names
+ self.results[name]["files"].extend(self._parse_results(res["results"]))
+
+ suite_settings = self.settings[name]
+ for key, val in res.items():
+ if key == "results":
+ continue
+ suite_settings[key] = val
+
+ # Check the transform definitions
+ currtrfm = self.results[name]["transformer"]
+ if not currtrfm:
+ self.results[name]["transformer"] = res.get(
+ "transformer", "SingleJsonRetriever"
+ )
+ elif currtrfm != res.get("transformer", "SingleJsonRetriever"):
+ raise MetricsMultipleTransformsError(
+ f"Only one transformer allowed per data name! Found multiple for {name}: "
+ f"{[currtrfm, res['transformer']]}"
+ )
+
+ # Get the transform options if available
+ self.results[name]["options"] = res.get("transformer-options", {})
+
+ if not self.results:
+ self.return_code = 1
+ raise MetricsMissingResultsError("Could not find any results to process.")
+
+ def get_standardized_data(self, group_name="firefox", transformer=None):
+ """Returns a parsed, standardized results data set.
+
+ The dataset is computed once then cached unless overwrite is used.
+ The transformer dictates how the data will be parsed, by default it uses
+ a JSON transformer that flattens the dictionary while merging all the
+ common metrics together.
+
+ :param group_name str: The name for this results group.
+ :param transformer str: The name of the transformer to use
+ when parsing the data. Currently, only SingleJsonRetriever
+ is available.
+ :param overwrite str: if True, we recompute the results
+ :return dict: Standardized notebook data with containing the
+ requested metrics.
+ """
+ if self.stddata:
+ return self.stddata
+
+ for data_type, data_info in self.results.items():
+ tfm = transformer if transformer is not None else data_info["transformer"]
+ prefix = data_type
+ if self.prefix:
+ prefix = "{}-{}".format(self.prefix, data_type)
+
+ # Primarily used to store the transformer used on the data
+ # so that it can also be used for generating things
+ # like summary values for suites, and subtests.
+ self.ptnb_config[data_type] = {
+ "output": self.output_path,
+ "prefix": prefix,
+ "custom_transformer": tfm,
+ "file_groups": {data_type: data_info["files"]},
+ }
+
+ ptnb = PerftestETL(
+ file_groups=self.ptnb_config[data_type]["file_groups"],
+ config=self.ptnb_config[data_type],
+ prefix=self.prefix,
+ logger=self.logger,
+ custom_transform=tfm,
+ )
+ r = ptnb.process(**data_info["options"])
+ self.stddata[data_type] = r["data"]
+
+ return self.stddata
+
+ def filtered_metrics(
+ self,
+ group_name="firefox",
+ transformer=None,
+ metrics=None,
+ exclude=None,
+ split_by=None,
+ simplify_names=False,
+ simplify_exclude=["statistics"],
+ ):
+ """Filters the metrics to only those that were requested by `metrics`.
+
+ If metrics is Falsey (None, empty list, etc.) then no metrics
+ will be filtered. The entries in metrics are pattern matched with
+ the subtests in the standardized data (not a regular expression).
+ For example, if "firstPaint" is in metrics, then all subtests which
+ contain this string in their name will be kept.
+
+ :param metrics list: List of metrics to keep.
+ :param exclude list: List of string matchers to exclude from the metrics
+ gathered/reported.
+ :param split_by str: The name of a metric to use to split up data by.
+ :param simplify_exclude list: List of string matchers to exclude
+ from the naming simplification process.
+ :return dict: Standardized notebook data containing the
+ requested metrics.
+ """
+ results = self.get_standardized_data(
+ group_name=group_name, transformer=transformer
+ )
+ if not metrics:
+ return results
+ if not exclude:
+ exclude = []
+ if not simplify_exclude:
+ simplify_exclude = []
+
+ # Get the field to split the results by (if any)
+ if split_by is not None:
+ splitting_entry = None
+ for data_type, data_info in results.items():
+ for res in data_info:
+ if split_by in res["subtest"]:
+ splitting_entry = res
+ break
+ if splitting_entry is not None:
+ split_by = defaultdict(list)
+ for c, entry in enumerate(splitting_entry["data"]):
+ split_by[entry["value"]].append(c)
+
+ # Filter metrics
+ filtered = {}
+ for data_type, data_info in results.items():
+ newresults = []
+ for res in data_info:
+ if any([met["name"] in res["subtest"] for met in metrics]) and not any(
+ [met in res["subtest"] for met in exclude]
+ ):
+ res["transformer"] = self.ptnb_config[data_type][
+ "custom_transformer"
+ ]
+ newresults.append(res)
+ filtered[data_type] = newresults
+
+ # Simplify the filtered metric names
+ if simplify_names:
+
+ def _simplify(name):
+ if any([met in name for met in simplify_exclude]):
+ return None
+ return name.split(".")[-1]
+
+ self._alter_name(filtered, res, filter=_simplify)
+
+ # Split the filtered results
+ if split_by is not None:
+ newfilt = {}
+ total_iterations = sum([len(inds) for _, inds in split_by.items()])
+ for data_type in filtered:
+ if not filtered[data_type]:
+ # Ignore empty data types
+ continue
+
+ newresults = []
+ newfilt[data_type] = newresults
+ for split, indices in split_by.items():
+ for res in filtered[data_type]:
+ if len(res["data"]) != total_iterations:
+ # Skip data that cannot be split
+ continue
+ splitres = {key: val for key, val in res.items()}
+ splitres["subtest"] += " " + split
+ splitres["data"] = [res["data"][i] for i in indices]
+ splitres["transformer"] = self.ptnb_config[data_type][
+ "custom_transformer"
+ ]
+
+ newresults.append(splitres)
+
+ filtered = newfilt
+
+ return filtered
+
+ def _alter_name(self, filtered, res, filter):
+ previous = []
+ for data_type, data_info in filtered.items():
+ for res in data_info:
+ new = filter(res["subtest"])
+ if new is None:
+ continue
+ if new in previous:
+ self.logger.warning(
+ f"Another metric which ends with `{new}` was already found. "
+ f"{res['subtest']} will not be simplified."
+ )
+ continue
+ res["subtest"] = new
+ previous.append(new)
+
+
+_metrics = {}
+
+
+def filtered_metrics(
+ metadata,
+ path,
+ prefix,
+ group_name="firefox",
+ transformer=None,
+ metrics=None,
+ settings=False,
+ exclude=None,
+ split_by=None,
+ simplify_names=False,
+ simplify_exclude=["statistics"],
+):
+ """Returns standardized data extracted from the metadata instance.
+
+ We're caching an instance of MetricsStorage per metrics/storage
+ combination and compute the data only once when this function is called.
+ """
+ key = path, prefix
+ if key not in _metrics:
+ storage = _metrics[key] = MetricsStorage(path, prefix, metadata)
+ storage.set_results(metadata.get_results())
+ else:
+ storage = _metrics[key]
+
+ results = storage.filtered_metrics(
+ group_name=group_name,
+ transformer=transformer,
+ metrics=metrics,
+ exclude=exclude,
+ split_by=split_by,
+ simplify_names=simplify_names,
+ simplify_exclude=simplify_exclude,
+ )
+
+ # XXX returning two different types is a problem
+ # in case settings is false, we should return None for it
+ # and always return a 2-tuple
+ if settings:
+ return results, storage.settings
+ return results
diff --git a/python/mozperftest/mozperftest/metrics/consoleoutput.py b/python/mozperftest/mozperftest/metrics/consoleoutput.py
new file mode 100644
index 0000000000..a4d544f3ef
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/consoleoutput.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import os
+
+from mozperftest.layers import Layer
+from mozperftest.metrics.common import COMMON_ARGS, filtered_metrics
+
+RESULTS_TEMPLATE = """\
+
+==========================================================
+ Results ({})
+==========================================================
+
+{}
+
+"""
+
+
+class ConsoleOutput(Layer):
+ """Output metrics in the console."""
+
+ name = "console"
+ # By default activate the console layer when running locally.
+ activated = "MOZ_AUTOMATION" not in os.environ
+ arguments = COMMON_ARGS
+
+ def run(self, metadata):
+ # Get filtered metrics
+ results = filtered_metrics(
+ metadata,
+ self.get_arg("output"),
+ self.get_arg("prefix"),
+ metrics=self.get_arg("metrics"),
+ transformer=self.get_arg("transformer"),
+ split_by=self.get_arg("split-by"),
+ simplify_names=self.get_arg("simplify-names"),
+ simplify_exclude=self.get_arg("simplify-exclude"),
+ )
+
+ if not results:
+ self.warning("No results left after filtering")
+ return metadata
+
+ for name, res in results.items():
+ # Make a nicer view of the data
+ subtests = [
+ "{}: {}".format(r["subtest"], [v["value"] for v in r["data"]])
+ for r in res
+ ]
+
+ # Output the data to console
+ self.info(
+ "\n==========================================================\n"
+ "= Results =\n"
+ "=========================================================="
+ "\n" + "\n".join(subtests) + "\n"
+ )
+ return metadata
diff --git a/python/mozperftest/mozperftest/metrics/exceptions.py b/python/mozperftest/mozperftest/metrics/exceptions.py
new file mode 100644
index 0000000000..dcac64ded9
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/exceptions.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+class MetricsMultipleTransformsError(Exception):
+ """Raised when more than one transformer was specified.
+
+ This is because intermediate results with the same data
+ name are merged when being processed.
+ """
+
+ pass
+
+
+class MetricsMissingResultsError(Exception):
+ """Raised when no results could be found after parsing the intermediate results."""
+
+ pass
+
+
+class PerfherderValidDataError(Exception):
+ """Raised when no valid data (int/float) can be found to build perfherder blob."""
+
+ pass
+
+
+class NotebookInvalidTransformError(Exception):
+ """Raised when an invalid custom transformer is set."""
+
+ pass
+
+
+class NotebookTransformOptionsError(Exception):
+ """Raised when an invalid option is given to a transformer."""
+
+ pass
+
+
+class NotebookTransformError(Exception):
+ """Raised on generic errors within the transformers."""
+
+
+class NotebookDuplicateTransformsError(Exception):
+ """Raised when a directory contains more than one transformers have the same class name."""
+
+ pass
+
+
+class NotebookInvalidPathError(Exception):
+ """Raised when an invalid path is given."""
+
+ pass
diff --git a/python/mozperftest/mozperftest/metrics/notebook/__init__.py b/python/mozperftest/mozperftest/metrics/notebook/__init__.py
new file mode 100644
index 0000000000..8d69182664
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/__init__.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from .perftestetl import PerftestETL
+from .perftestnotebook import PerftestNotebook
+
+__all__ = ["PerftestETL", "PerftestNotebook"]
diff --git a/python/mozperftest/mozperftest/metrics/notebook/constant.py b/python/mozperftest/mozperftest/metrics/notebook/constant.py
new file mode 100644
index 0000000000..ca40d289d4
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/constant.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import os
+import pathlib
+from types import MappingProxyType
+
+from .transformer import get_transformers
+
+
+class Constant(object):
+ """A singleton class to store all constants."""
+
+ __instance = None
+
+ def __new__(cls, *args, **kw):
+ if cls.__instance is None:
+ cls.__instance = object.__new__(cls, *args, **kw)
+ return cls.__instance
+
+ def __init__(self):
+ self.__here = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
+ self.__predefined_transformers = get_transformers(self.__here / "transforms")
+
+ @property
+ def predefined_transformers(self):
+ return MappingProxyType(self.__predefined_transformers).copy()
+
+ @property
+ def here(self):
+ return self.__here
diff --git a/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/compare b/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/compare
new file mode 100644
index 0000000000..f6870f0246
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/compare
@@ -0,0 +1,85 @@
+%% md
+<div id="table-wrapper">
+ <table id="compareTable" border="1"></table>
+</div>
+
+%% py
+from js import document, data_object
+import json
+import numpy as np
+
+split_data = {}
+dir_names = set()
+subtests = set()
+newest_run_name = ""
+for element in data_object:
+ name = element["name"]
+ if "- newest run" in name:
+ newest_run_name = name
+ subtest = element["subtest"]
+ dir_names.add(name)
+ subtests.add(subtest)
+
+ data = [p["value"] for p in element["data"]]
+ split_data.setdefault(name, {}).update({
+ subtest:{
+ "data":data,
+ "stats":{
+ "Mean": np.round(np.mean(data),2),
+ "Median": np.median(data),
+ "Std. Dev.": np.round(np.std(data),2)
+ }
+ }
+ })
+
+table = document.getElementById("compareTable")
+table.innerHTML=''
+
+# build table head
+thead = table.createTHead()
+throw = thead.insertRow()
+for name in ["Metrics", "Statistics"] + list(dir_names):
+ th = document.createElement("th")
+ th.appendChild(document.createTextNode(name))
+ throw.appendChild(th)
+
+def fillRow(row, subtest, stat):
+ row.insertCell().appendChild(document.createTextNode(stat))
+ newest_run_val = split_data[newest_run_name][subtest]["stats"][stat]
+ for name in dir_names:
+ cell_val = split_data[name][subtest]["stats"][stat]
+ diff = np.round((cell_val - newest_run_val * 1.0)/newest_run_val * 100, 2)
+ color = "red" if diff>0 else "green"
+ row.insertCell().innerHTML = f"{cell_val}\n(<span style=\"color:{color}\">{diff}</span>%)"
+
+# build table body
+tbody = document.createElement("tbody")
+for subtest in subtests:
+ row1 = tbody.insertRow()
+ cell0 = row1.insertCell()
+ cell0.appendChild(document.createTextNode(subtest))
+ cell0.rowSpan = 3;
+ a = split_data
+ fillRow(row1, subtest, "Mean")
+
+ row2 = tbody.insertRow()
+ fillRow(row2, subtest, "Median")
+
+ row3 = tbody.insertRow()
+ fillRow(row3, subtest, "Std. Dev.")
+
+table.appendChild(tbody)
+
+%% css
+#table-wrapper {
+ height: 600px;
+ overflow: auto;
+}
+
+#table {
+ display: table;
+}
+
+td {
+ white-space:pre-line;
+}
diff --git a/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/header b/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/header
new file mode 100644
index 0000000000..1a0f659e54
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/header
@@ -0,0 +1,12 @@
+%% md
+# Welcome to PerftestNotebook
+
+press the :fast_forward: button on your top left corner to run whole notebook
+
+%% fetch
+
+text: data_string = http://127.0.0.1:5000/data
+
+%% js
+
+var data_object = JSON.parse(data_string);
diff --git a/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/scatterplot b/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/scatterplot
new file mode 100644
index 0000000000..f68b540236
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/notebook-sections/scatterplot
@@ -0,0 +1,15 @@
+%% py
+from js import data_object
+import matplotlib.pyplot as plt
+
+plt.figure()
+
+for element in data_object:
+ data_array = element["data"]
+ x = [x["xaxis"] for x in data_array]
+ y = [x["value"] for x in data_array]
+ label = element["name"]+"\n"+element["subtest"]
+ plt.scatter(x,y,label=label)
+
+plt.legend()
+plt.show()
diff --git a/python/mozperftest/mozperftest/metrics/notebook/perftestetl.py b/python/mozperftest/mozperftest/metrics/notebook/perftestetl.py
new file mode 100644
index 0000000000..bd28d9be6d
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/perftestetl.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import json
+import os
+import pathlib
+from collections import OrderedDict
+
+from .constant import Constant
+from .transformer import SimplePerfherderTransformer, Transformer, get_transformer
+
+
+class PerftestETL(object):
+ """Controller class for the PerftestETL."""
+
+ def __init__(
+ self,
+ file_groups,
+ config,
+ prefix,
+ logger,
+ custom_transform=None,
+ sort_files=False,
+ ):
+ """Initializes PerftestETL.
+
+ :param dict file_groups: A dict of file groupings. The value
+ of each of the dict entries is the name of the data that
+ will be produced.
+ :param str custom_transform: The class name of a custom transformer.
+ """
+ self.fmt_data = {}
+ self.file_groups = file_groups
+ self.config = config
+ self.sort_files = sort_files
+ self.const = Constant()
+ self.prefix = prefix
+ self.logger = logger
+
+ # Gather the available transformers
+ tfms_dict = self.const.predefined_transformers
+
+ # XXX NOTEBOOK_PLUGIN functionality is broken at the moment.
+ # This code block will raise an exception if it detects it in
+ # the environment.
+ plugin_path = os.getenv("NOTEBOOK_PLUGIN")
+ if plugin_path:
+ raise Exception("NOTEBOOK_PLUGIN is currently broken.")
+
+ # Initialize the requested transformer
+ if custom_transform:
+ # try to load it directly, and fallback to registry
+ try:
+ tfm_cls = get_transformer(custom_transform)
+ except ImportError:
+ tfm_cls = tfms_dict.get(custom_transform)
+
+ if tfm_cls:
+ self.transformer = Transformer(
+ files=[],
+ custom_transformer=tfm_cls(),
+ logger=self.logger,
+ prefix=self.prefix,
+ )
+ self.logger.info(f"Found {custom_transform} transformer", self.prefix)
+ else:
+ raise Exception(f"Could not get a {custom_transform} transformer.")
+ else:
+ self.transformer = Transformer(
+ files=[],
+ custom_transformer=SimplePerfherderTransformer(),
+ logger=self.logger,
+ prefix=self.prefix,
+ )
+
+ def parse_file_grouping(self, file_grouping):
+ """Handles differences in the file_grouping definitions.
+
+ It can either be a path to a folder containing the files, a list of files,
+ or it can contain settings from an artifact_downloader instance.
+
+ :param file_grouping: A file grouping entry.
+ :return: A list of files to process.
+ """
+ files = []
+ if isinstance(file_grouping, list):
+ # A list of files was provided
+ files = file_grouping
+ elif isinstance(file_grouping, dict):
+ # A dictionary of settings from an artifact_downloader instance
+ # was provided here
+ raise Exception(
+ "Artifact downloader tooling is disabled for the time being."
+ )
+ elif isinstance(file_grouping, str):
+ # Assume a path to files was given
+ filepath = file_grouping
+ newf = [f.resolve().as_posix() for f in pathlib.Path(filepath).rglob("*")]
+ files = newf
+ else:
+ raise Exception(
+ "Unknown file grouping type provided here: %s" % file_grouping
+ )
+
+ if self.sort_files:
+ if isinstance(files, list):
+ files.sort()
+ else:
+ for _, file_list in files.items():
+ file_list.sort()
+ files = OrderedDict(sorted(files.items(), key=lambda entry: entry[0]))
+
+ if not files:
+ raise Exception(
+ "Could not find any files in this configuration: %s" % file_grouping
+ )
+
+ return files
+
+ def parse_output(self):
+ # XXX Fix up this function, it should only return a directory for output
+ # not a directory or a file. Or remove it completely, it's not very useful.
+ prefix = "" if "prefix" not in self.config else self.config["prefix"]
+ filepath = f"{prefix}std-output.json"
+
+ if "output" in self.config:
+ filepath = self.config["output"]
+ if os.path.isdir(filepath):
+ filepath = os.path.join(filepath, f"{prefix}std-output.json")
+
+ return filepath
+
+ def process(self, **kwargs):
+ """Process the file groups and return the results of the requested analyses.
+
+ :return: All the results in a dictionary. The field names are the Analyzer
+ funtions that were called.
+ """
+ fmt_data = []
+
+ for name, files in self.file_groups.items():
+ files = self.parse_file_grouping(files)
+ if isinstance(files, dict):
+ raise Exception(
+ "Artifact downloader tooling is disabled for the time being."
+ )
+ else:
+ # Transform the data
+ self.transformer.files = files
+ trfm_data = self.transformer.process(name, **kwargs)
+
+ if isinstance(trfm_data, list):
+ fmt_data.extend(trfm_data)
+ else:
+ fmt_data.append(trfm_data)
+
+ self.fmt_data = fmt_data
+
+ # Write formatted data output to filepath
+ output_data_filepath = self.parse_output()
+
+ print("Writing results to %s" % output_data_filepath)
+ with open(output_data_filepath, "w") as f:
+ json.dump(self.fmt_data, f, indent=4, sort_keys=True)
+
+ return {"data": self.fmt_data, "file-output": output_data_filepath}
diff --git a/python/mozperftest/mozperftest/metrics/notebook/perftestnotebook.py b/python/mozperftest/mozperftest/metrics/notebook/perftestnotebook.py
new file mode 100644
index 0000000000..99c3766b42
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/perftestnotebook.py
@@ -0,0 +1,79 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import json
+import webbrowser
+from http.server import BaseHTTPRequestHandler, HTTPServer
+
+from .constant import Constant
+
+
+class PerftestNotebook(object):
+ """Controller class for PerftestNotebook."""
+
+ def __init__(self, data, logger, prefix):
+ """Initialize the PerftestNotebook.
+
+ :param dict data: Standardized data, post-transformation.
+ """
+ self.data = data
+ self.logger = logger
+ self.prefix = prefix
+ self.const = Constant()
+
+ def get_notebook_section(self, func):
+ """Fetch notebook content based on analysis name.
+
+ :param str func: analysis or notebook section name
+ """
+ template_path = self.const.here / "notebook-sections" / func
+ if not template_path.exists():
+ self.logger.warning(
+ f"Could not find the notebook-section called {func}", self.prefix
+ )
+ return ""
+ with template_path.open() as f:
+ return f.read()
+
+ def post_to_iodide(self, analysis=None, start_local_server=True):
+ """Build notebook and post it to iodide.
+
+ :param list analysis: notebook section names, analysis to perform in iodide
+ """
+ data = self.data
+ notebook_sections = ""
+
+ template_header_path = self.const.here / "notebook-sections" / "header"
+ with template_header_path.open() as f:
+ notebook_sections += f.read()
+
+ if analysis:
+ for func in analysis:
+ notebook_sections += self.get_notebook_section(func)
+
+ template_upload_file_path = self.const.here / "template_upload_file.html"
+ with template_upload_file_path.open() as f:
+ html = f.read().replace("replace_me", repr(notebook_sections))
+
+ upload_file_path = self.const.here / "upload_file.html"
+ with upload_file_path.open("w") as f:
+ f.write(html)
+
+ # set up local server. Iodide will fetch data from localhost:5000/data
+ class DataRequestHandler(BaseHTTPRequestHandler):
+ def do_GET(self):
+ if self.path == "/data":
+ self.send_response(200)
+ self.send_header("Content-type", "application/json")
+ self.send_header("Access-Control-Allow-Origin", "*")
+ self.end_headers()
+ self.wfile.write(bytes(json.dumps(data).encode("utf-8")))
+
+ PORT_NUMBER = 5000
+ server = HTTPServer(("", PORT_NUMBER), DataRequestHandler)
+ if start_local_server:
+ webbrowser.open_new_tab(str(upload_file_path))
+ try:
+ server.serve_forever()
+ finally:
+ server.server_close()
diff --git a/python/mozperftest/mozperftest/metrics/notebook/template_upload_file.html b/python/mozperftest/mozperftest/metrics/notebook/template_upload_file.html
new file mode 100644
index 0000000000..2400be4e87
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/template_upload_file.html
@@ -0,0 +1,39 @@
+<!DOCTYPE html>
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this
+ - file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
+<html>
+ <body>
+ Redirecting to Iodide...
+ <script>
+ function post(path, params, method='post') {
+ const form = document.createElement('form');
+ form.method = method;
+ form.action = path;
+ form.id = 'uploadform';
+
+ for (const key in params) {
+ if (params.hasOwnProperty(key)) {
+ const textarea = document.createElement('textarea');
+ textarea.name = key;
+ textarea.value = params[key];
+ textarea.style.display = "none";
+ form.appendChild(textarea);
+ }
+ }
+
+
+ document.body.appendChild(form);
+ form.submit();
+ }
+
+ // TODO Need to escape all `'`,
+ // Otherwsie, this will result in javascript failures.
+ var template = replace_me
+
+ // Create a form object, and send it
+ // after release, change back to https://alpha.iodide.io/from-template/
+ post("https://alpha.iodide.io/from-template/", {"iomd": template})
+ </script>
+ </body>
+</html>
diff --git a/python/mozperftest/mozperftest/metrics/notebook/transformer.py b/python/mozperftest/mozperftest/metrics/notebook/transformer.py
new file mode 100644
index 0000000000..7ecbc40d89
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/transformer.py
@@ -0,0 +1,228 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import importlib.util
+import inspect
+import json
+import pathlib
+
+from jsonschema import validate
+
+from mozperftest.metrics.exceptions import (
+ NotebookDuplicateTransformsError,
+ NotebookInvalidPathError,
+ NotebookInvalidTransformError,
+)
+from mozperftest.runner import HERE
+from mozperftest.utils import load_class
+
+
+class Transformer(object):
+ """Abstract class for data transformers."""
+
+ def __init__(self, files=None, custom_transformer=None, logger=None, prefix=None):
+ """Initialize the transformer with files.
+
+ :param list files: A list of files containing data to transform.
+ :param object custom_transformer: A custom transformer instance.
+ Must implement `transform` and `merge` methods.
+ """
+ self._files = files
+ self.logger = logger
+ self.prefix = prefix
+
+ if custom_transformer:
+ valid = (
+ hasattr(custom_transformer, "transform")
+ and hasattr(custom_transformer, "merge")
+ and callable(custom_transformer.transform)
+ and callable(custom_transformer.merge)
+ )
+
+ if not valid:
+ raise NotebookInvalidTransformError(
+ "The custom transformer must contain `transform` and `merge` methods."
+ )
+
+ self._custom_transformer = custom_transformer
+
+ with pathlib.Path(HERE, "schemas", "transformer_schema.json").open() as f:
+ self.schema = json.load(f)
+
+ @property
+ def files(self):
+ return self._files
+
+ @files.setter
+ def files(self, val):
+ if not isinstance(val, list):
+ self.logger.warning(
+ "`files` must be a list, got %s" % type(val), self.prefix
+ )
+ return
+ self._files = val
+
+ @property
+ def custom_transformer(self):
+ return self._custom_transformer
+
+ def open_data(self, file):
+ """Opens a file of data.
+
+ If it's not a JSON file, then the data
+ will be opened as a text file.
+
+ :param str file: Path to the data file.
+ :return: Data contained in the file.
+ """
+ with open(file) as f:
+ if file.endswith(".json"):
+ return json.load(f)
+ return f.readlines()
+
+ def process(self, name, **kwargs):
+ """Process all the known data into a merged, and standardized data format.
+
+ :param str name: Name of the merged data.
+ :return dict: Merged data.
+ """
+ trfmdata = []
+
+ for file in self.files:
+ data = {}
+
+ # Open data
+ try:
+ if hasattr(self._custom_transformer, "open_data"):
+ data = self._custom_transformer.open_data(file)
+ else:
+ data = self.open_data(file)
+ except Exception as e:
+ self.logger.warning(
+ "Failed to open file %s, skipping" % file, self.prefix
+ )
+ self.logger.warning("%s %s" % (e.__class__.__name__, e), self.prefix)
+
+ # Transform data
+ try:
+ data = self._custom_transformer.transform(data, **kwargs)
+ if not isinstance(data, list):
+ data = [data]
+ for entry in data:
+ for ele in entry["data"]:
+ if "file" not in ele:
+ ele.update({"file": file})
+ trfmdata.extend(data)
+ except Exception as e:
+ self.logger.warning(
+ "Failed to transform file %s, skipping" % file, self.prefix
+ )
+ self.logger.warning("%s %s" % (e.__class__.__name__, e), self.prefix)
+
+ merged = self._custom_transformer.merge(trfmdata)
+
+ if isinstance(merged, dict):
+ merged["name"] = name
+ else:
+ for e in merged:
+ e["name"] = name
+
+ validate(instance=merged, schema=self.schema)
+ return merged
+
+
+class SimplePerfherderTransformer:
+ """Transforms perfherder data into the standardized data format."""
+
+ entry_number = 0
+
+ def transform(self, data):
+ self.entry_number += 1
+ return {
+ "data": [{"value": data["suites"][0]["value"], "xaxis": self.entry_number}]
+ }
+
+ def merge(self, sde):
+ merged = {"data": []}
+ for entry in sde:
+ if isinstance(entry["data"], list):
+ merged["data"].extend(entry["data"])
+ else:
+ merged["data"].append(entry["data"])
+
+ self.entry_number = 0
+ return merged
+
+
+def get_transformer(path, ret_members=False):
+ """This function returns a Transformer class with the given path.
+
+ :param str path: The path points to the custom transformer.
+ :param bool ret_members: If true then return inspect.getmembers().
+ :return Transformer if not ret_members else inspect.getmembers().
+ """
+ file = pathlib.Path(path)
+
+ if file.suffix != ".py":
+ return load_class(path)
+
+ if not file.exists():
+ raise NotebookInvalidPathError(f"The path {path} does not exist.")
+
+ # Importing a source file directly
+ spec = importlib.util.spec_from_file_location(name=file.name, location=path)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ members = inspect.getmembers(
+ module,
+ lambda c: inspect.isclass(c)
+ and hasattr(c, "transform")
+ and hasattr(c, "merge")
+ and callable(c.transform)
+ and callable(c.merge),
+ )
+
+ if not members and not ret_members:
+ raise NotebookInvalidTransformError(
+ f"The path {path} was found but it was not a valid transformer."
+ )
+
+ return members if ret_members else members[0][-1]
+
+
+def get_transformers(dirpath=None):
+ """This function returns a dict of transformers under the given path.
+
+ If more than one transformers have the same class name, an exception will be raised.
+
+ :param pathlib.Path dirpath: Path to a directory containing the transformers.
+ :return dict: {"Transformer class name": Transformer class}.
+ """
+
+ ret = {}
+
+ if not dirpath.exists():
+ raise NotebookInvalidPathError(f"The path {dirpath.as_posix()} does not exist.")
+
+ if not dirpath.is_dir():
+ raise NotebookInvalidPathError(
+ f"Path given is not a directory: {dirpath.as_posix()}"
+ )
+
+ tfm_files = list(dirpath.glob("*.py"))
+ importlib.machinery.SOURCE_SUFFIXES.append("")
+
+ for file in tfm_files:
+ members = get_transformer(file.resolve().as_posix(), True)
+
+ for (name, tfm_class) in members:
+ if name in ret:
+ raise NotebookDuplicateTransformsError(
+ f"Duplicated transformer {name} "
+ + f"is found in the directory {dirpath.as_posix()}."
+ + "Please define each transformer class with a unique class name.",
+ )
+ ret.update({name: tfm_class})
+
+ return ret
diff --git a/python/mozperftest/mozperftest/metrics/notebook/transforms/__init__.py b/python/mozperftest/mozperftest/metrics/notebook/transforms/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/transforms/__init__.py
diff --git a/python/mozperftest/mozperftest/metrics/notebook/transforms/logcattime.py b/python/mozperftest/mozperftest/metrics/notebook/transforms/logcattime.py
new file mode 100644
index 0000000000..184b327540
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/transforms/logcattime.py
@@ -0,0 +1,121 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import re
+from datetime import datetime, timedelta
+
+from mozperftest.metrics.exceptions import (
+ NotebookTransformError,
+ NotebookTransformOptionsError,
+)
+
+TIME_MATCHER = re.compile(r"(\s+[\d.:]+\s+)")
+
+
+class LogCatTimeTransformer:
+ """Used for parsing times/durations from logcat logs."""
+
+ def open_data(self, file):
+ with open(file) as f:
+ return f.read()
+
+ def _get_duration(self, startline, endline):
+ """Parse duration between two logcat lines.
+
+ Expecting lines with a prefix like:
+ 05-26 11:45:41.226 ...
+
+ We only parse the hours, minutes, seconds, and milliseconds here
+ because we have no use for the days and other times.
+ """
+ match = TIME_MATCHER.search(startline)
+ if not match:
+ return None
+ start = match.group(1).strip()
+
+ match = TIME_MATCHER.search(endline)
+ if not match:
+ return None
+ end = match.group(1).strip()
+
+ sdt = datetime.strptime(start, "%H:%M:%S.%f")
+ edt = datetime.strptime(end, "%H:%M:%S.%f")
+
+ # If the ending is less than the start, we rolled into a new
+ # day, so we add 1 day to the end time to handle this
+ if sdt > edt:
+ edt += timedelta(1)
+
+ return (edt - sdt).total_seconds() * 1000
+
+ def _parse_logcat(self, logcat, first_ts, second_ts=None, processor=None):
+ """Parse data from logcat lines.
+
+ If two regexes are provided (first_ts, and second_ts), then the elapsed
+ time between those lines will be measured. Otherwise, if only `first_ts`
+ is defined then, we expect a number as the first group from the
+ match. Optionally, a `processor` function can be provided to process
+ all the groups that were obtained from the match, allowing users to
+ customize what the result is.
+
+ :param list logcat: The logcat lines to parse.
+ :param str first_ts: Regular expression for the first matching line.
+ :param str second_ts: Regular expression for the second matching line.
+ :param func processor: Function to process the groups from the first_ts
+ regular expression.
+ :return list: Returns a list of durations/times parsed.
+ """
+ full_re = r"(" + first_ts + r"\n)"
+ if second_ts:
+ full_re += r".+(?:\n.+)+?(\n" + second_ts + r"\n)"
+
+ durations = []
+ for match in re.findall(full_re, logcat, re.MULTILINE):
+ if isinstance(match, str):
+ raise NotebookTransformOptionsError(
+ "Only one regex was provided, and it has no groups to process."
+ )
+
+ if second_ts is not None:
+ if len(match) != 2:
+ raise NotebookTransformError(
+ "More than 2 groups found. It's unclear which "
+ "to use for calculating the durations."
+ )
+ val = self._get_duration(match[0], match[1])
+ elif processor is not None:
+ # Ignore the first match (that is the full line)
+ val = processor(match[1:])
+ else:
+ val = match[1]
+
+ if val is not None:
+ durations.append(float(val))
+
+ return durations
+
+ def transform(self, data, **kwargs):
+ alltimes = self._parse_logcat(
+ data,
+ kwargs.get("first-timestamp"),
+ second_ts=kwargs.get("second-timestamp"),
+ processor=kwargs.get("processor"),
+ )
+ subtest = kwargs.get("transform-subtest-name")
+ return [
+ {
+ "data": [{"value": val, "xaxis": c} for c, val in enumerate(alltimes)],
+ "subtest": subtest if subtest else "logcat-metric",
+ }
+ ]
+
+ def merge(self, sde):
+ grouped_data = {}
+
+ for entry in sde:
+ subtest = entry["subtest"]
+ data = grouped_data.get(subtest, [])
+ data.extend(entry["data"])
+ grouped_data.update({subtest: data})
+
+ return [{"data": v, "subtest": k} for k, v in grouped_data.items()]
diff --git a/python/mozperftest/mozperftest/metrics/notebook/transforms/single_json.py b/python/mozperftest/mozperftest/metrics/notebook/transforms/single_json.py
new file mode 100644
index 0000000000..375615fb23
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/transforms/single_json.py
@@ -0,0 +1,56 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from mozperftest.metrics.notebook.utilities import flat
+
+
+class SingleJsonRetriever:
+ """Transforms perfherder data into the standardized data format."""
+
+ entry_number = 0
+
+ def transform(self, data):
+ self.entry_number += 1
+
+ # flat(data, ()) returns a dict that have one key per dictionary path
+ # in the original data.
+ return [
+ {
+ "data": [{"value": i, "xaxis": self.entry_number} for i in v],
+ "subtest": k,
+ }
+ for k, v in flat(data, ()).items()
+ ]
+
+ def merge(self, sde):
+ grouped_data = {}
+ for entry in sde:
+ subtest = entry["subtest"]
+ data = grouped_data.get(subtest, [])
+ data.extend(entry["data"])
+ grouped_data.update({subtest: data})
+
+ merged_data = [{"data": v, "subtest": k} for k, v in grouped_data.items()]
+
+ self.entry_number = 0
+ return merged_data
+
+ def summary(self, suite):
+ """Summarize a suite of perfherder data into a single value.
+
+ Returning None means that there's no summary. Otherwise, an integer
+ or float must be returned.
+
+ Only available in the Perfherder layer.
+ """
+ return None
+
+ def subtest_summary(self, subtest):
+ """Summarize a set of replicates for a given subtest.
+
+ By default, it returns a None so we fall back to using the
+ average of the replicates which is the default.
+
+ Only available in the Perfherder layer.
+ """
+ return None
diff --git a/python/mozperftest/mozperftest/metrics/notebook/utilities.py b/python/mozperftest/mozperftest/metrics/notebook/utilities.py
new file mode 100644
index 0000000000..7fd97fa3fa
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebook/utilities.py
@@ -0,0 +1,63 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from collections.abc import Iterable
+
+
+def flat(data, parent_dir):
+ """
+ Converts a dictionary with nested entries like this
+ {
+ "dict1": {
+ "dict2": {
+ "key1": value1,
+ "key2": value2,
+ ...
+ },
+ ...
+ },
+ ...
+ "dict3": {
+ "key3": value3,
+ "key4": value4,
+ ...
+ }
+ ...
+ }
+
+ to a "flattened" dictionary like this that has no nested entries:
+ {
+ "dict1.dict2.key1": value1,
+ "dict1.dict2.key2": value2,
+ ...
+ "dict3.key3": value3,
+ "dict3.key4": value4,
+ ...
+ }
+
+ :param Iterable data : json data.
+ :param tuple parent_dir: json fields.
+
+ :return dict: {subtest: value}
+ """
+ result = {}
+
+ if not data:
+ return result
+
+ if isinstance(data, list):
+ for item in data:
+ for k, v in flat(item, parent_dir).items():
+ result.setdefault(k, []).extend(v)
+
+ if isinstance(data, dict):
+ for k, v in data.items():
+ current_dir = parent_dir + (k,)
+ subtest = ".".join(current_dir)
+ if isinstance(v, Iterable) and not isinstance(v, str):
+ for x, y in flat(v, current_dir).items():
+ result.setdefault(x, []).extend(y)
+ elif v or v == 0:
+ result.setdefault(subtest, []).append(v)
+
+ return result
diff --git a/python/mozperftest/mozperftest/metrics/notebookupload.py b/python/mozperftest/mozperftest/metrics/notebookupload.py
new file mode 100644
index 0000000000..ec53af2b7f
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/notebookupload.py
@@ -0,0 +1,115 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import pathlib
+
+from mozperftest.layers import Layer
+from mozperftest.metrics.common import COMMON_ARGS, filtered_metrics
+from mozperftest.metrics.notebook import PerftestNotebook
+from mozperftest.metrics.utils import is_number
+
+
+class Notebook(Layer):
+ """Post standarized data to iodide and run analysis."""
+
+ name = "notebook"
+ activated = False
+
+ arguments = COMMON_ARGS
+ arguments.update(
+ {
+ "analysis": {
+ "nargs": "*",
+ "default": [],
+ "help": "List of analyses to run in Iodide.",
+ },
+ "analyze-strings": {
+ "action": "store_true",
+ "default": False,
+ "help": (
+ "If set, strings won't be filtered out of the results to analyze in Iodide."
+ ),
+ },
+ "no-server": {
+ "action": "store_true",
+ "default": False,
+ "help": "If set, the data won't be opened in Iodide.",
+ },
+ "compare-to": {
+ "nargs": "*",
+ "default": [],
+ "help": (
+ "Compare the results from this test to the historical data in the folder(s) "
+ "specified through this option. Only JSON data can be processed for the "
+ "moment. Each folder containing those JSONs is considered as a distinct "
+ "data point to compare with the newest run."
+ ),
+ },
+ "stats": {
+ "action": "store_true",
+ "default": False,
+ "help": "If set, browsertime statistics will be reported.",
+ },
+ }
+ )
+
+ def run(self, metadata):
+ exclusions = None
+ if not self.get_arg("stats"):
+ exclusions = ["statistics."]
+
+ for result in metadata.get_results():
+ result["name"] += "- newest run"
+
+ analysis = self.get_arg("analysis")
+ dir_list = self.get_arg("compare-to")
+ if dir_list:
+ analysis.append("compare")
+ for directory in dir_list:
+ dirpath = pathlib.Path(directory)
+ if not dirpath.exists():
+ raise Exception(f"{dirpath} does not exist.")
+ if not dirpath.is_dir():
+ raise Exception(f"{dirpath} is not a directory")
+ # TODO: Handle more than just JSON data.
+ for jsonfile in dirpath.rglob("*.json"):
+ metadata.add_result(
+ {
+ "results": str(jsonfile.resolve()),
+ "name": jsonfile.parent.name,
+ }
+ )
+
+ results = filtered_metrics(
+ metadata,
+ self.get_arg("output"),
+ self.get_arg("prefix"),
+ metrics=self.get_arg("metrics"),
+ transformer=self.get_arg("transformer"),
+ exclude=exclusions,
+ split_by=self.get_arg("split-by"),
+ simplify_names=self.get_arg("simplify-names"),
+ simplify_exclude=self.get_arg("simplify-exclude"),
+ )
+
+ if not results:
+ self.warning("No results left after filtering")
+ return metadata
+
+ data_to_post = []
+ for name, res in results.items():
+ for r in res:
+ val = r["data"][0]["value"]
+ if is_number(val):
+ data_to_post.append(r)
+ elif self.get_arg("analyze-strings"):
+ data_to_post.append(r)
+
+ self.ptnb = PerftestNotebook(
+ data=data_to_post, logger=metadata, prefix=self.get_arg("prefix")
+ )
+ self.ptnb.post_to_iodide(
+ analysis, start_local_server=not self.get_arg("no-server")
+ )
+
+ return metadata
diff --git a/python/mozperftest/mozperftest/metrics/perfboard/__init__.py b/python/mozperftest/mozperftest/metrics/perfboard/__init__.py
new file mode 100644
index 0000000000..6fbe8159b2
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfboard/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/python/mozperftest/mozperftest/metrics/perfboard/dashboard.json b/python/mozperftest/mozperftest/metrics/perfboard/dashboard.json
new file mode 100644
index 0000000000..804b880b55
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfboard/dashboard.json
@@ -0,0 +1,56 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 1,
+ "links": [],
+ "panels": [],
+ "refresh": false,
+ "schemaVersion": 22,
+ "style": "dark",
+ "tags": [
+ "component"
+ ],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-30d",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ]
+ },
+ "timezone": "",
+ "title": "?",
+ "uid": null,
+ "id": null,
+ "variables": {
+ "list": []
+ },
+ "version": 13
+}
diff --git a/python/mozperftest/mozperftest/metrics/perfboard/grafana.py b/python/mozperftest/mozperftest/metrics/perfboard/grafana.py
new file mode 100644
index 0000000000..1fa76ea991
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfboard/grafana.py
@@ -0,0 +1,87 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import json
+import os
+
+from grafana_api.grafana_face import GrafanaFace
+
+HERE = os.path.dirname(__file__)
+
+
+with open(os.path.join(HERE, "dashboard.json")) as f:
+ template = json.loads(f.read())
+
+with open(os.path.join(HERE, "panel.json")) as f:
+ panel_template = json.loads(f.read())
+
+with open(os.path.join(HERE, "target.json")) as f:
+ target_template = json.loads(f.read())
+
+
+class Grafana:
+ def __init__(self, layer, key, host="perfboard.dev.mozaws.net", port=3000):
+ self.client = GrafanaFace(host=host, port=port, auth=key)
+ self.layer = layer
+
+ def get_dashboard(self, title):
+ existing = self.client.search.search_dashboards(tag="component")
+ existing = dict(
+ [(dashboard["title"].lower(), dashboard["uid"]) for dashboard in existing]
+ )
+ if title in existing:
+ return self.client.dashboard.get_dashboard(existing[title])
+ self.layer.debug(f"Creating dashboard {title}")
+ d = dict(template)
+ d["title"] = title.capitalize()
+ res = self.client.dashboard.update_dashboard(
+ dashboard={"dashboard": d, "folderId": 0, "overwrite": False}
+ )
+
+ return self.client.dashboard.get_dashboard(res["uid"])
+
+ def _add_panel(self, dashboard, panel_title, metrics):
+ found = None
+ ids = []
+ for panel in dashboard["dashboard"]["panels"]:
+ ids.append(panel["id"])
+
+ if panel["title"] == panel_title:
+ found = panel
+
+ ids.sort()
+
+ need_update = False
+ if found is None:
+ # create the panel
+ panel = panel_template
+ panel["title"] = panel_title
+ if ids != []:
+ panel["id"] = ids[-1] + 1
+ else:
+ panel["id"] = 1
+ self.layer.debug("Creating panel")
+ dashboard["dashboard"]["panels"].append(panel)
+ need_update = True
+ else:
+ self.layer.debug("Panel exists")
+ panel = found
+
+ # check the metrics
+ existing = [target["measurement"] for target in panel["targets"]]
+
+ for metric in metrics:
+ if metric in existing:
+ continue
+ m = dict(target_template)
+ m["measurement"] = metric
+ panel["targets"].append(m)
+ need_update = True
+
+ if need_update:
+ self.layer.debug("Updating dashboard")
+ self.client.dashboard.update_dashboard(dashboard=dashboard)
+
+ def add_panel(self, dashboard, panel, metrics):
+ dashboard = self.get_dashboard(dashboard)
+ self._add_panel(dashboard, panel, metrics)
diff --git a/python/mozperftest/mozperftest/metrics/perfboard/influx.py b/python/mozperftest/mozperftest/metrics/perfboard/influx.py
new file mode 100644
index 0000000000..4f7e27072c
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfboard/influx.py
@@ -0,0 +1,188 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import datetime
+import statistics
+from collections import defaultdict
+
+from mozperftest import utils
+from mozperftest.layers import Layer
+from mozperftest.metrics.common import COMMON_ARGS, filtered_metrics
+from mozperftest.utils import get_tc_secret, install_package
+
+
+class Influx(Layer):
+ """Sends the metrics to an InfluxDB server"""
+
+ name = "perfboard"
+ activated = False
+ arguments = COMMON_ARGS
+ arguments.update(
+ {
+ "dashboard": {
+ "type": str,
+ "default": None,
+ "help": "Name of the dashboard - defaults to the script"
+ " `component` metadata. When not set, falls back to"
+ " `perftest`",
+ },
+ "influx-host": {
+ "type": str,
+ "default": "perfboard.dev.mozaws.net",
+ },
+ "influx-user": {
+ "type": str,
+ "default": "admin",
+ },
+ "influx-port": {
+ "type": int,
+ "default": 8086,
+ },
+ "influx-password": {
+ "type": str,
+ "default": None,
+ },
+ "influx-db": {
+ "type": str,
+ "default": "perf",
+ },
+ "grafana-host": {
+ "type": str,
+ "default": "perfboard.dev.mozaws.net",
+ },
+ "grafana-key": {
+ "type": str,
+ "default": None,
+ },
+ "grafana-port": {
+ "type": int,
+ "default": 3000,
+ },
+ }
+ )
+
+ def _setup(self):
+ venv = self.mach_cmd.virtualenv_manager
+ try:
+ from influxdb import InfluxDBClient
+ except ImportError:
+ install_package(venv, "influxdb", ignore_failure=False)
+ from influxdb import InfluxDBClient
+
+ try:
+ from mozperftest.metrics.perfboard.grafana import Grafana
+ except ImportError:
+ install_package(venv, "grafana_api", ignore_failure=False)
+ from mozperftest.metrics.perfboard.grafana import Grafana
+
+ if utils.ON_TRY:
+ secret = get_tc_secret()
+ i_host = secret["influx_host"]
+ i_port = secret["influx_port"]
+ i_user = secret["influx_user"]
+ i_password = secret["influx_password"]
+ i_dbname = secret["influx_db"]
+ g_key = secret["grafana_key"]
+ g_host = secret["grafana_host"]
+ g_port = secret["grafana_port"]
+ else:
+ i_host = self.get_arg("influx-host")
+ i_port = self.get_arg("influx-port")
+ i_user = self.get_arg("influx-user")
+ i_password = self.get_arg("influx-password")
+ if i_password is None:
+ raise Exception("You need to set --perfboard-influx-password")
+ i_dbname = self.get_arg("influx-db")
+ g_key = self.get_arg("grafana-key")
+ if g_key is None:
+ raise Exception("You need to set --perfboard-grafana-key")
+ g_host = self.get_arg("grafana-host")
+ g_port = self.get_arg("grafana-port")
+
+ self.client = InfluxDBClient(i_host, i_port, i_user, i_password, i_dbname)
+ # this will error out if the server is unreachable
+ self.client.ping()
+ self.grafana = Grafana(self, g_key, g_host, g_port)
+
+ def _build_point(self, name, component, values, date):
+ value = statistics.mean(values)
+ return {
+ "measurement": name,
+ "tags": {
+ "component": component,
+ },
+ "time": date,
+ "fields": {"Float_value": float(value)},
+ }
+
+ def run(self, metadata):
+ when = datetime.datetime.utcnow()
+ date = when.isoformat()
+ metrics = self.get_arg("metrics")
+
+ # Get filtered metrics
+ results = filtered_metrics(
+ metadata,
+ self.get_arg("output"),
+ self.get_arg("prefix"),
+ metrics=metrics,
+ transformer=self.get_arg("transformer"),
+ split_by=self.get_arg("split-by"),
+ simplify_names=self.get_arg("simplify-names"),
+ simplify_exclude=self.get_arg("simplify-exclude"),
+ )
+
+ if not results:
+ self.warning("No results left after filtering")
+ return metadata
+
+ # there's one thing we don't do yet is getting a timestamp
+ # for each measure that is happening in browsertime or xpcshell
+ # if we had it, we could send all 13/25 samples, each one with
+ # their timestamp, to InfluxDB, and let Grafana handle the
+ # mean() or median() part.
+ #
+ # Until we have this, here we convert the series to
+ # a single value and timestamp
+ self._setup()
+ component = self.get_arg("dashboard")
+ if component is None:
+ component = metadata.script.get("component", "perftest")
+
+ data = defaultdict(list)
+ for name, res in results.items():
+ for line in res:
+ if "subtest" not in line:
+ continue
+ metric_name = line["subtest"]
+ short_name = metric_name.split(".")[-1]
+ short_name = short_name.lower()
+ if metrics and not any(
+ [m.lower().startswith(short_name.lower()) for m in metrics]
+ ):
+ continue
+ values = [v["value"] for v in line["data"]]
+ data[short_name].extend(values)
+
+ if not data:
+ self.warning("No results left after filtering")
+ return data
+
+ points = []
+ for metric_name, values in data.items():
+ try:
+ point = self._build_point(metric_name, component, values, date)
+ except TypeError:
+ continue
+ points.append(point)
+
+ self.info("Sending data to InfluxDB")
+ self.client.write_points(points)
+
+ # making sure we expose it in Grafana
+ test_name = self.get_arg("tests")[0]
+ test_name = test_name.split("/")[-1]
+ for metric_name in data:
+ self.grafana.add_panel(component, test_name, metric_name)
+
+ return metadata
diff --git a/python/mozperftest/mozperftest/metrics/perfboard/panel.json b/python/mozperftest/mozperftest/metrics/perfboard/panel.json
new file mode 100644
index 0000000000..61deeaad8f
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfboard/panel.json
@@ -0,0 +1,81 @@
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 11,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "BBC Link perftest",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
diff --git a/python/mozperftest/mozperftest/metrics/perfboard/target.json b/python/mozperftest/mozperftest/metrics/perfboard/target.json
new file mode 100644
index 0000000000..ad96488840
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfboard/target.json
@@ -0,0 +1,20 @@
+
+{
+ "groupBy": [],
+ "measurement": "rumSpeedIndex",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "Float_value"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": []
+ }
diff --git a/python/mozperftest/mozperftest/metrics/perfherder.py b/python/mozperftest/mozperftest/metrics/perfherder.py
new file mode 100644
index 0000000000..0521e2a205
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/perfherder.py
@@ -0,0 +1,374 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import json
+import os
+import pathlib
+import statistics
+import sys
+
+import jsonschema
+
+from mozperftest.layers import Layer
+from mozperftest.metrics.common import COMMON_ARGS, filtered_metrics
+from mozperftest.metrics.exceptions import PerfherderValidDataError
+from mozperftest.metrics.notebook.constant import Constant
+from mozperftest.metrics.notebook.transformer import get_transformer
+from mozperftest.metrics.utils import has_callable_method, is_number, write_json
+from mozperftest.utils import strtobool
+
+PERFHERDER_SCHEMA = pathlib.Path(
+ "testing", "mozharness", "external_tools", "performance-artifact-schema.json"
+)
+
+
+class Perfherder(Layer):
+ """Output data in the perfherder format."""
+
+ name = "perfherder"
+ activated = False
+
+ arguments = COMMON_ARGS
+ arguments.update(
+ {
+ "app": {
+ "type": str,
+ "default": "firefox",
+ "choices": [
+ "firefox",
+ "chrome-m",
+ "chrome",
+ "chromium",
+ "fennec",
+ "geckoview",
+ "fenix",
+ "refbrow",
+ ],
+ "help": (
+ "Shorthand name of application that is "
+ "being tested (used in perfherder data)."
+ ),
+ },
+ "stats": {
+ "action": "store_true",
+ "default": False,
+ "help": "If set, browsertime statistics will be reported.",
+ },
+ "timestamp": {
+ "type": float,
+ "default": None,
+ "help": (
+ "Timestamp to use for the perfherder data. Can be the "
+ "current date or a past date if needed."
+ ),
+ },
+ }
+ )
+
+ def run(self, metadata):
+ """Processes the given results into a perfherder-formatted data blob.
+
+ If the `--perfherder` flag isn't provided, then the
+ results won't be processed into a perfherder-data blob. If the
+ flavor is unknown to us, then we assume that it comes from
+ browsertime.
+
+ XXX If needed, make a way to do flavor-specific processing
+
+ :param results list/dict/str: Results to process.
+ :param perfherder bool: True if results should be processed
+ into a perfherder-data blob.
+ :param flavor str: The flavor that is being processed.
+ """
+ prefix = self.get_arg("prefix")
+ output = self.get_arg("output")
+
+ # XXX Make an arugment for exclusions from metrics
+ # (or go directly to regex's for metrics)
+ exclusions = None
+ if not self.get_arg("stats"):
+ exclusions = ["statistics."]
+
+ # Get filtered metrics
+ metrics = self.get_arg("metrics")
+ results, fullsettings = filtered_metrics(
+ metadata,
+ output,
+ prefix,
+ metrics=metrics,
+ transformer=self.get_arg("transformer"),
+ settings=True,
+ exclude=exclusions,
+ split_by=self.get_arg("split-by"),
+ simplify_names=self.get_arg("simplify-names"),
+ simplify_exclude=self.get_arg("simplify-exclude"),
+ )
+
+ if not any([results[name] for name in results]):
+ self.warning("No results left after filtering")
+ return metadata
+
+ # XXX Add version info into this data
+ app_info = {"name": self.get_arg("app", default="firefox")}
+
+ # converting the metrics list into a mapping where
+ # keys are the metrics nane
+ if metrics is not None:
+ metrics = dict([(m["name"], m) for m in metrics])
+ else:
+ metrics = {}
+
+ all_perfherder_data = None
+ for name, res in results.items():
+ settings = dict(fullsettings[name])
+ # updating the settings with values provided in metrics, if any
+ if name in metrics:
+ settings.update(metrics[name])
+
+ # XXX Instead of just passing replicates here, we should build
+ # up a partial perfherder data blob (with options) and subtest
+ # overall values.
+ subtests = {}
+ for r in res:
+ vals = [v["value"] for v in r["data"] if is_number(v["value"])]
+ if vals:
+ subtests[r["subtest"]] = vals
+
+ perfherder_data = self._build_blob(
+ subtests,
+ name=name,
+ extra_options=settings.get("extraOptions"),
+ should_alert=strtobool(settings.get("shouldAlert", False)),
+ application=app_info,
+ alert_threshold=float(settings.get("alertThreshold", 2.0)),
+ lower_is_better=strtobool(settings.get("lowerIsBetter", True)),
+ unit=settings.get("unit", "ms"),
+ summary=settings.get("value"),
+ framework=settings.get("framework"),
+ metrics_info=metrics,
+ transformer=res[0].get("transformer", None),
+ )
+
+ if all_perfherder_data is None:
+ all_perfherder_data = perfherder_data
+ else:
+ all_perfherder_data["suites"].extend(perfherder_data["suites"])
+
+ if prefix:
+ # If a prefix was given, store it in the perfherder data as well
+ all_perfherder_data["prefix"] = prefix
+
+ timestamp = self.get_arg("timestamp")
+ if timestamp is not None:
+ all_perfherder_data["pushTimestamp"] = timestamp
+
+ # Validate the final perfherder data blob
+ with pathlib.Path(metadata._mach_cmd.topsrcdir, PERFHERDER_SCHEMA).open() as f:
+ schema = json.load(f)
+ jsonschema.validate(all_perfherder_data, schema)
+
+ file = "perfherder-data.json"
+ if prefix:
+ file = "{}-{}".format(prefix, file)
+ self.info("Writing perfherder results to {}".format(os.path.join(output, file)))
+
+ # XXX "suites" key error occurs when using self.info so a print
+ # is being done for now.
+
+ # print() will produce a BlockingIOError on large outputs, so we use
+ # sys.stdout
+ sys.stdout.write("PERFHERDER_DATA: ")
+ json.dump(all_perfherder_data, sys.stdout)
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+
+ metadata.set_output(write_json(all_perfherder_data, output, file))
+ return metadata
+
+ def _build_blob(
+ self,
+ subtests,
+ name="browsertime",
+ test_type="pageload",
+ extra_options=None,
+ should_alert=False,
+ subtest_should_alert=None,
+ suiteshould_alert=False,
+ framework=None,
+ application=None,
+ alert_threshold=2.0,
+ lower_is_better=True,
+ unit="ms",
+ summary=None,
+ metrics_info=None,
+ transformer=None,
+ ):
+ """Build a PerfHerder data blob from the given subtests.
+
+ NOTE: This is a WIP, see the many TODOs across this file.
+
+ Given a dictionary of subtests, and the values. Build up a
+ perfherder data blob. Note that the naming convention for
+ these arguments is different then the rest of the scripts
+ to make it easier to see where they are going to in the perfherder
+ data.
+
+ For the `should_alert` field, if should_alert is True but `subtest_should_alert`
+ is empty, then all subtests along with the suite will generate alerts.
+ Otherwise, if the subtest_should_alert contains subtests to alert on, then
+ only those will alert and nothing else (including the suite). If the
+ suite value should alert, then set `suiteshould_alert` to True.
+
+ :param subtests dict: A dictionary of subtests and the values.
+ XXX TODO items for subtests:
+ (1) Allow it to contain replicates and individual settings
+ for each of the subtests.
+ (2) The geomean of the replicates will be taken for now,
+ but it should be made more flexible in some way.
+ (3) We need some way to handle making multiple suites.
+ :param name str: Name to give to the suite.
+ :param test_type str: The type of test that was run.
+ :param extra_options list: A list of extra options to store.
+ :param should_alert bool: Whether all values in the suite should
+ generate alerts or not.
+ :param subtest_should_alert list: A list of subtests to alert on. If this
+ is not empty, then it will disable the suite-level alerts.
+ :param suiteshould_alert bool: Used if `subtest_should_alert` is not
+ empty, and if True, then the suite-level value will generate
+ alerts.
+ :param framework dict: Information about the framework that
+ is being tested.
+ :param application dict: Information about the application that
+ is being tested. Must include name, and optionally a version.
+ :param alert_threshold float: The change in percentage this
+ metric must undergo to to generate an alert.
+ :param lower_is_better bool: If True, then lower values are better
+ than higher ones.
+ :param unit str: The unit of the data.
+ :param summary float: The summary value to use in the perfherder
+ data blob. By default, the mean of all the subtests will be
+ used.
+ :param metrics_info dict: Contains a mapping of metric names to the
+ options that are used on the metric.
+ :param transformer str: The name of a predefined tranformer, a module
+ path to a transform, or a path to the file containing the transformer.
+
+ :return dict: The PerfHerder data blob.
+ """
+ if extra_options is None:
+ extra_options = []
+ if subtest_should_alert is None:
+ subtest_should_alert = []
+ if framework is None:
+ framework = {"name": "mozperftest"}
+ if application is None:
+ application = {"name": "firefox", "version": "9000"}
+ if metrics_info is None:
+ metrics_info = {}
+
+ # Use the transform to produce a suite value
+ const = Constant()
+ tfm_cls = None
+ transformer_obj = None
+ if transformer and transformer in const.predefined_transformers:
+ # A pre-built transformer name was given
+ tfm_cls = const.predefined_transformers[transformer]
+ transformer_obj = tfm_cls()
+ elif transformer is not None:
+ tfm_cls = get_transformer(transformer)
+ transformer_obj = tfm_cls()
+ else:
+ self.warning(
+ "No transformer found for this suite. Cannot produce a summary value."
+ )
+
+ perf_subtests = []
+ suite = {
+ "name": name,
+ "type": test_type,
+ "unit": unit,
+ "extraOptions": extra_options,
+ "lowerIsBetter": lower_is_better,
+ "alertThreshold": alert_threshold,
+ "shouldAlert": (should_alert and not subtest_should_alert)
+ or suiteshould_alert,
+ "subtests": perf_subtests,
+ }
+
+ perfherder = {
+ "suites": [suite],
+ "framework": framework,
+ "application": application,
+ }
+
+ allvals = []
+ alert_thresholds = []
+ for measurement in subtests:
+ reps = subtests[measurement]
+ allvals.extend(reps)
+
+ if len(reps) == 0:
+ self.warning("No replicates found for {}, skipping".format(measurement))
+ continue
+
+ # Gather extra settings specified from within a metric specification
+ subtest_lower_is_better = lower_is_better
+ subtest_unit = unit
+ for met in metrics_info:
+ if met not in measurement:
+ continue
+
+ extra_options.extend(metrics_info[met].get("extraOptions", []))
+ alert_thresholds.append(
+ metrics_info[met].get("alertThreshold", alert_threshold)
+ )
+
+ subtest_unit = metrics_info[met].get("unit", unit)
+ subtest_lower_is_better = metrics_info[met].get(
+ "lowerIsBetter", lower_is_better
+ )
+
+ if metrics_info[met].get("shouldAlert", should_alert):
+ subtest_should_alert.append(measurement)
+
+ break
+
+ subtest = {
+ "name": measurement,
+ "replicates": reps,
+ "lowerIsBetter": subtest_lower_is_better,
+ "value": None,
+ "unit": subtest_unit,
+ "shouldAlert": should_alert or measurement in subtest_should_alert,
+ }
+
+ if has_callable_method(transformer_obj, "subtest_summary"):
+ subtest["value"] = transformer_obj.subtest_summary(subtest)
+ if subtest["value"] is None:
+ subtest["value"] = statistics.mean(reps)
+
+ perf_subtests.append(subtest)
+
+ if len(allvals) == 0:
+ raise PerfherderValidDataError(
+ "Could not build perfherder data blob because no valid data was provided, "
+ + "only int/float data is accepted."
+ )
+
+ alert_thresholds = list(set(alert_thresholds))
+ if len(alert_thresholds) > 1:
+ raise PerfherderValidDataError(
+ "Too many alertThreshold's were specified, expecting 1 but found "
+ + f"{len(alert_thresholds)}"
+ )
+ elif len(alert_thresholds) == 1:
+ suite["alertThreshold"] = alert_thresholds[0]
+
+ suite["extraOptions"] = list(set(suite["extraOptions"]))
+
+ if has_callable_method(transformer_obj, "summary"):
+ val = transformer_obj.summary(suite)
+ if val is not None:
+ suite["value"] = val
+
+ return perfherder
diff --git a/python/mozperftest/mozperftest/metrics/utils.py b/python/mozperftest/mozperftest/metrics/utils.py
new file mode 100644
index 0000000000..a947434684
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/utils.py
@@ -0,0 +1,149 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import ast
+import json
+import os
+import pathlib
+import re
+
+from jsonschema import validate
+from jsonschema.exceptions import ValidationError
+
+# Get the jsonschema for intermediate results
+PARENT = pathlib.Path(__file__).parent.parent
+with pathlib.Path(PARENT, "schemas", "intermediate-results-schema.json").open() as f:
+ IR_SCHEMA = json.load(f)
+
+
+# These are the properties we know about in the schema.
+# If anything other than these is present, then we will
+# fail validation.
+KNOWN_PERFHERDER_PROPS = set(
+ ["name", "value", "unit", "lowerIsBetter", "shouldAlert", "alertThreshold"]
+)
+KNOWN_SUITE_PROPS = set(
+ set(["results", "transformer", "transformer-options", "extraOptions", "framework"])
+ | KNOWN_PERFHERDER_PROPS
+)
+KNOWN_SINGLE_MEASURE_PROPS = set(set(["values"]) | KNOWN_PERFHERDER_PROPS)
+
+
+# Regex splitter for the metric fields - used to handle
+# the case when `,` is found within the options values.
+METRIC_SPLITTER = re.compile(r",\s*(?![^\[\]]*\])")
+
+
+def is_number(value):
+ """Determines if the value is an int/float."""
+ return isinstance(value, (int, float)) and not isinstance(value, bool)
+
+
+def has_callable_method(obj, method_name):
+ """Determines if an object/class has a callable method."""
+ if obj and hasattr(obj, method_name) and callable(getattr(obj, method_name)):
+ return True
+ return False
+
+
+def open_file(path):
+ """Opens a file and returns its contents.
+
+ :param path str: Path to the file, if it's a
+ JSON, then a dict will be returned, otherwise,
+ the raw contents (not split by line) will be
+ returned.
+ :return dict/str: Returns a dict for JSON data, and
+ a str for any other type.
+ """
+ print("Reading %s" % path)
+ with open(path) as f:
+ if os.path.splitext(path)[-1] == ".json":
+ return json.load(f)
+ return f.read()
+
+
+def write_json(data, path, file):
+ """Writes data to a JSON file.
+
+ :param data dict: Data to write.
+ :param path str: Directory of where the data will be stored.
+ :param file str: Name of the JSON file.
+ :return str: Path to the output.
+ """
+ path = os.path.join(path, file)
+ with open(path, "w+") as f:
+ json.dump(data, f)
+ return path
+
+
+def validate_intermediate_results(results):
+ """Validates intermediate results coming from the browser layer.
+
+ This method exists because there is no reasonable method to implement
+ inheritance with `jsonschema` until the `unevaluatedProperties` field
+ is implemented in the validation module. Until then, this method
+ checks to make sure that only known properties are available in the
+ results. If any property found is unknown, then we raise a
+ jsonschema.ValidationError.
+
+ :param results dict: The intermediate results to validate.
+ :raises ValidationError: Raised when validation fails.
+ """
+ # Start with the standard validation
+ validate(results, IR_SCHEMA)
+
+ # Now ensure that we have no extra keys
+ suite_keys = set(list(results.keys()))
+ unknown_keys = suite_keys - KNOWN_SUITE_PROPS
+ if unknown_keys:
+ raise ValidationError(f"Found unknown suite-level keys: {list(unknown_keys)}")
+ if isinstance(results["results"], str):
+ # Nothing left to verify
+ return
+
+ # The results are split by measurement so we need to
+ # check that each of those entries have no extra keys
+ for entry in results["results"]:
+ measurement_keys = set(list(entry.keys()))
+ unknown_keys = measurement_keys - KNOWN_SINGLE_MEASURE_PROPS
+ if unknown_keys:
+ raise ValidationError(
+ "Found unknown single-measure-level keys for "
+ f"{entry['name']}: {list(unknown_keys)}"
+ )
+
+
+def metric_fields(value):
+ # old form: just the name
+ if "," not in value and ":" not in value:
+ return {"name": value}
+
+ def _check(field):
+ sfield = field.strip().partition(":")
+ if len(sfield) != 3 or not (sfield[1] and sfield[2]):
+ raise ValueError(f"Unexpected metrics definition {field}")
+ if sfield[0] not in KNOWN_SUITE_PROPS:
+ raise ValueError(
+ f"Unknown field '{sfield[0]}', should be in " f"{KNOWN_SUITE_PROPS}"
+ )
+
+ sfield = [sfield[0], sfield[2]]
+
+ try:
+ # This handles dealing with parsing lists
+ # from a string
+ sfield[1] = ast.literal_eval(sfield[1])
+ except (ValueError, SyntaxError):
+ # Ignore failures, those are from instances
+ # which don't need to be converted from a python
+ # representation
+ pass
+
+ return sfield
+
+ fields = [field.strip() for field in METRIC_SPLITTER.split(value)]
+ res = dict([_check(field) for field in fields])
+ if "name" not in res:
+ raise ValueError(f"{value} misses the 'name' field")
+ return res
diff --git a/python/mozperftest/mozperftest/metrics/visualmetrics.py b/python/mozperftest/mozperftest/metrics/visualmetrics.py
new file mode 100644
index 0000000000..068440d6f2
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/visualmetrics.py
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import errno
+import json
+import os
+import sys
+from pathlib import Path
+
+from mozfile import which
+
+from mozperftest.layers import Layer
+from mozperftest.utils import run_script, silence
+
+METRICS_FIELDS = (
+ "SpeedIndex",
+ "FirstVisualChange",
+ "LastVisualChange",
+ "VisualProgress",
+ "videoRecordingStart",
+)
+
+
+class VisualData:
+ def open_data(self, data):
+ res = {
+ "name": "visualmetrics",
+ "subtest": data["name"],
+ "data": [
+ {"file": "visualmetrics", "value": value, "xaxis": xaxis}
+ for xaxis, value in enumerate(data["values"])
+ ],
+ }
+ return res
+
+ def transform(self, data):
+ return data
+
+ def merge(self, data):
+ return data
+
+
+class VisualMetrics(Layer):
+ """Wrapper around Browsertime's visualmetrics.py script"""
+
+ name = "visualmetrics"
+ activated = False
+ arguments = {}
+
+ def setup(self):
+ self.metrics = {}
+ self.metrics_fields = []
+
+ # making sure we have ffmpeg and imagemagick available
+ for tool in ("ffmpeg", "convert"):
+ if sys.platform in ("win32", "msys"):
+ tool += ".exe"
+ path = which(tool)
+ if not path:
+ raise OSError(errno.ENOENT, f"Could not find {tool}")
+
+ def run(self, metadata):
+ if "VISUALMETRICS_PY" not in os.environ:
+ raise OSError(
+ "The VISUALMETRICS_PY environment variable is not set."
+ "Make sure you run the browsertime layer"
+ )
+ path = Path(os.environ["VISUALMETRICS_PY"])
+ if not path.exists():
+ raise FileNotFoundError(str(path))
+
+ self.visualmetrics = path
+ treated = 0
+
+ for result in metadata.get_results():
+ result_dir = result.get("results")
+ if result_dir is None:
+ continue
+ result_dir = Path(result_dir)
+ if not result_dir.is_dir():
+ continue
+ browsertime_json = Path(result_dir, "browsertime.json")
+ if not browsertime_json.exists():
+ continue
+ treated += self.run_visual_metrics(browsertime_json)
+
+ self.info(f"Treated {treated} videos.")
+
+ if len(self.metrics) > 0:
+ metadata.add_result(
+ {
+ "name": metadata.script["name"] + "-vm",
+ "framework": {"name": "mozperftest"},
+ "transformer": "mozperftest.metrics.visualmetrics:VisualData",
+ "results": list(self.metrics.values()),
+ }
+ )
+
+ # we also extend --perfherder-metrics and --console-metrics if they
+ # are activated
+ def add_to_option(name):
+ existing = self.get_arg(name, [])
+ for field in self.metrics_fields:
+ existing.append({"name": field, "unit": "ms"})
+ self.env.set_arg(name, existing)
+
+ if self.get_arg("perfherder"):
+ add_to_option("perfherder-metrics")
+
+ if self.get_arg("console"):
+ add_to_option("console-metrics")
+
+ else:
+ self.warning("No video was treated.")
+ return metadata
+
+ def run_visual_metrics(self, browsertime_json):
+ verbose = self.get_arg("verbose")
+ self.info(f"Looking at {browsertime_json}")
+ venv = self.mach_cmd.virtualenv_manager
+
+ class _display:
+ def __enter__(self, *args, **kw):
+ return self
+
+ __exit__ = __enter__
+
+ may_silence = not verbose and silence or _display
+
+ with browsertime_json.open() as f:
+ browsertime_json_data = json.loads(f.read())
+
+ videos = 0
+ global_options = [
+ str(self.visualmetrics),
+ "--orange",
+ "--perceptual",
+ "--contentful",
+ "--force",
+ "--renderignore",
+ "5",
+ "--viewport",
+ ]
+ if verbose:
+ global_options += ["-vvv"]
+
+ for site in browsertime_json_data:
+ # collecting metrics from browserScripts
+ # because it can be used in splitting
+ for index, bs in enumerate(site["browserScripts"]):
+ for name, val in bs.items():
+ if not isinstance(val, (str, int)):
+ continue
+ self.append_metrics(index, name, val)
+
+ extra = {"lowerIsBetter": True, "unit": "ms"}
+
+ for index, video in enumerate(site["files"]["video"]):
+ videos += 1
+ video_path = browsertime_json.parent / video
+ output = "[]"
+ with may_silence():
+ res, output = run_script(
+ venv.python_path,
+ global_options + ["--video", str(video_path), "--json"],
+ verbose=verbose,
+ label="visual metrics",
+ display=False,
+ )
+ if not res:
+ self.error(f"Failed {res}")
+ continue
+
+ output = output.strip()
+ if verbose:
+ self.info(str(output))
+ try:
+ output = json.loads(output)
+ except json.JSONDecodeError:
+ self.error("Could not read the json output from visualmetrics.py")
+ continue
+
+ for name, value in output.items():
+ if name.endswith(
+ "Progress",
+ ):
+ self._expand_visual_progress(index, name, value, **extra)
+ else:
+ self.append_metrics(index, name, value, **extra)
+
+ return videos
+
+ def _expand_visual_progress(self, index, name, value, **fields):
+ def _split_percent(val):
+ # value is of the form "567=94%"
+ val = val.split("=")
+ value, percent = val[0].strip(), val[1].strip()
+ if percent.endswith("%"):
+ percent = percent[:-1]
+ return int(percent), int(value)
+
+ percents = [_split_percent(elmt) for elmt in value.split(",")]
+
+ # we want to keep the first added value for each percent
+ # so the trick here is to create a dict() with the reversed list
+ percents = dict(reversed(percents))
+
+ # we are keeping the last 5 percents
+ percents = list(percents.items())
+ percents.sort()
+ for percent, value in percents[:5]:
+ self.append_metrics(index, f"{name}{percent}", value, **fields)
+
+ def append_metrics(self, index, name, value, **fields):
+ if name not in self.metrics_fields:
+ self.metrics_fields.append(name)
+ if name not in self.metrics:
+ self.metrics[name] = {"name": name, "values": []}
+
+ self.metrics[name]["values"].append(value)
+ self.metrics[name].update(**fields)