summaryrefslogtreecommitdiffstats
path: root/python/mozperftest/mozperftest/metrics/consoleoutput.py
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /python/mozperftest/mozperftest/metrics/consoleoutput.py
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'python/mozperftest/mozperftest/metrics/consoleoutput.py')
-rw-r--r--python/mozperftest/mozperftest/metrics/consoleoutput.py59
1 files changed, 59 insertions, 0 deletions
diff --git a/python/mozperftest/mozperftest/metrics/consoleoutput.py b/python/mozperftest/mozperftest/metrics/consoleoutput.py
new file mode 100644
index 0000000000..a4d544f3ef
--- /dev/null
+++ b/python/mozperftest/mozperftest/metrics/consoleoutput.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import os
+
+from mozperftest.layers import Layer
+from mozperftest.metrics.common import COMMON_ARGS, filtered_metrics
+
+RESULTS_TEMPLATE = """\
+
+==========================================================
+ Results ({})
+==========================================================
+
+{}
+
+"""
+
+
+class ConsoleOutput(Layer):
+ """Output metrics in the console."""
+
+ name = "console"
+ # By default activate the console layer when running locally.
+ activated = "MOZ_AUTOMATION" not in os.environ
+ arguments = COMMON_ARGS
+
+ def run(self, metadata):
+ # Get filtered metrics
+ results = filtered_metrics(
+ metadata,
+ self.get_arg("output"),
+ self.get_arg("prefix"),
+ metrics=self.get_arg("metrics"),
+ transformer=self.get_arg("transformer"),
+ split_by=self.get_arg("split-by"),
+ simplify_names=self.get_arg("simplify-names"),
+ simplify_exclude=self.get_arg("simplify-exclude"),
+ )
+
+ if not results:
+ self.warning("No results left after filtering")
+ return metadata
+
+ for name, res in results.items():
+ # Make a nicer view of the data
+ subtests = [
+ "{}: {}".format(r["subtest"], [v["value"] for v in r["data"]])
+ for r in res
+ ]
+
+ # Output the data to console
+ self.info(
+ "\n==========================================================\n"
+ "= Results =\n"
+ "=========================================================="
+ "\n" + "\n".join(subtests) + "\n"
+ )
+ return metadata