summaryrefslogtreecommitdiffstats
path: root/testing/raptor/browsertime
diff options
context:
space:
mode:
Diffstat (limited to 'testing/raptor/browsertime')
-rw-r--r--testing/raptor/browsertime/motionmark-1-3.js107
-rw-r--r--testing/raptor/browsertime/support-scripts/motionmark-1-3.py91
-rw-r--r--testing/raptor/browsertime/support-scripts/sample_python_support.py2
3 files changed, 199 insertions, 1 deletions
diff --git a/testing/raptor/browsertime/motionmark-1-3.js b/testing/raptor/browsertime/motionmark-1-3.js
new file mode 100644
index 0000000000..c240b2dddc
--- /dev/null
+++ b/testing/raptor/browsertime/motionmark-1-3.js
@@ -0,0 +1,107 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* eslint-env node */
+
+// index for the CSS selector in developer.html
+const suiteSelectorNumber = {
+ MotionMark: 1,
+ "HTML suite": 2,
+};
+
+module.exports = async function (context, commands) {
+ context.log.info("Starting MotionMark 1.3 test");
+ let url = context.options.browsertime.url;
+ let page_cycles = context.options.browsertime.page_cycles;
+ let suite_name = context.options.browsertime.suite_name;
+ let page_cycle_delay = context.options.browsertime.page_cycle_delay;
+ let post_startup_delay = context.options.browsertime.post_startup_delay;
+ let page_timeout = context.options.timeouts.pageLoad;
+ let expose_profiler = context.options.browsertime.expose_profiler;
+
+ context.log.info(
+ "Waiting for %d ms (post_startup_delay)",
+ post_startup_delay
+ );
+ await commands.wait.byTime(post_startup_delay);
+
+ for (let count = 0; count < page_cycles; count++) {
+ context.log.info("Navigating to about:blank");
+ await commands.navigate("about:blank");
+
+ context.log.info("Cycle %d, waiting for %d ms", count, page_cycle_delay);
+ await commands.wait.byTime(page_cycle_delay);
+
+ context.log.info("Cycle %d, starting the measure", count);
+ if (expose_profiler === "true") {
+ context.log.info("Custom profiler start!");
+ if (context.options.browser === "firefox") {
+ await commands.profiler.start();
+ } else if (context.options.browser === "chrome") {
+ await commands.trace.start();
+ }
+ }
+ await commands.measure.start(url);
+
+ let suite_selector = `#suites > ul > li:nth-child(${suiteSelectorNumber[suite_name]}) > label > input[type="checkbox"]`;
+
+ await commands.mouse.singleClick.bySelector(suite_selector);
+ await commands.js.runAndWait(`
+ this.benchmarkController.startBenchmark()
+ `);
+
+ let data_exists = null;
+ let starttime = await commands.js.run(`return performance.now();`);
+ while (
+ (data_exists == null || !Object.keys(data_exists).length) &&
+ (await commands.js.run(`return performance.now();`)) - starttime <
+ page_timeout
+ ) {
+ let wait_time = 3000;
+ context.log.info(
+ "Waiting %d ms for data from %s...",
+ wait_time,
+ suite_name
+ );
+ await commands.wait.byTime(wait_time);
+
+ data_exists = await commands.js.run(`
+ return window.benchmarkRunnerClient.results.data
+ `);
+ }
+
+ if (expose_profiler === "true") {
+ context.log.info("Custom profiler stop!");
+ if (context.options.browser === "firefox") {
+ await commands.profiler.stop();
+ } else if (context.options.browser === "chrome") {
+ await commands.trace.stop();
+ }
+ }
+ if (
+ !data_exists &&
+ (await commands.js.run(`return performance.now();`)) - starttime >=
+ page_timeout
+ ) {
+ context.log.error("Benchmark timed out. Aborting...");
+ return false;
+ }
+
+ let data = null;
+ data = await commands.js.run(`
+ const score = window.benchmarkRunnerClient.results.score;
+ const results = window.benchmarkRunnerClient.results.results[0].testsResults;
+ return {
+ score,
+ results,
+ };
+ `);
+ data.suite_name = suite_name;
+
+ commands.measure.addObject({ mm_res: data });
+ context.log.info("Value of summarized benchmark data: ", data);
+ }
+
+ return true;
+};
diff --git a/testing/raptor/browsertime/support-scripts/motionmark-1-3.py b/testing/raptor/browsertime/support-scripts/motionmark-1-3.py
new file mode 100644
index 0000000000..713935fd3f
--- /dev/null
+++ b/testing/raptor/browsertime/support-scripts/motionmark-1-3.py
@@ -0,0 +1,91 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import filters
+from base_python_support import BasePythonSupport
+
+
+class MotionMarkSupport(BasePythonSupport):
+ def handle_result(self, bt_result, raw_result, **kwargs):
+ """Parse a result for the required results.
+
+ See base_python_support.py for what's expected from this method.
+ """
+ suite_name = raw_result["extras"][0]["mm_res"]["suite_name"]
+ score_tracker = {
+ subtest: []
+ for subtest in raw_result["extras"][0]["mm_res"]["results"][
+ suite_name
+ ].keys()
+ }
+
+ motionmark_overall_score = []
+ for res in raw_result["extras"]:
+ motionmark_overall_score.append(round(res["mm_res"]["score"], 3))
+
+ for k, v in res["mm_res"]["results"][suite_name].items():
+ score_tracker[k].append(v["complexity"]["bootstrap"]["median"])
+
+ for k, v in score_tracker.items():
+ bt_result["measurements"][k] = v
+
+ bt_result["measurements"]["score"] = motionmark_overall_score
+
+ def _build_subtest(self, measurement_name, replicates, test):
+ unit = test.get("unit", "ms")
+ if test.get("subtest_unit"):
+ unit = test.get("subtest_unit")
+
+ lower_is_better = test.get(
+ "subtest_lower_is_better", test.get("lower_is_better", True)
+ )
+ if "score" in measurement_name:
+ lower_is_better = False
+ unit = "score"
+
+ subtest = {
+ "unit": unit,
+ "alertThreshold": float(test.get("alert_threshold", 2.0)),
+ "lowerIsBetter": lower_is_better,
+ "name": measurement_name,
+ "replicates": replicates,
+ "value": round(filters.mean(replicates), 3),
+ }
+
+ return subtest
+
+ def summarize_test(self, test, suite, **kwargs):
+ """Summarize the measurements found in the test as a suite with subtests.
+
+ See base_python_support.py for what's expected from this method.
+ """
+ suite["type"] = "benchmark"
+ if suite["subtests"] == {}:
+ suite["subtests"] = []
+ for measurement_name, replicates in test["measurements"].items():
+ if not replicates:
+ continue
+ suite["subtests"].append(
+ self._build_subtest(measurement_name, replicates, test)
+ )
+ suite["subtests"].sort(key=lambda subtest: subtest["name"])
+
+ score = 0
+ for subtest in suite["subtests"]:
+ if subtest["name"] == "score":
+ score = subtest["value"]
+ break
+ suite["value"] = score
+
+ def modify_command(self, cmd, test):
+ """Modify the browsertime command to have the appropriate suite name.
+
+ This is necessary to grab the correct CSS selector in the browsertime
+ script, and later for parsing through the final benchmark data in the
+ support python script (this file).
+
+ Current options are `MotionMark` and `HTML suite`.
+ """
+
+ cmd += ["--browsertime.suite_name", test.get("suite_name")]
diff --git a/testing/raptor/browsertime/support-scripts/sample_python_support.py b/testing/raptor/browsertime/support-scripts/sample_python_support.py
index a1ec0069a5..b31e890c0a 100644
--- a/testing/raptor/browsertime/support-scripts/sample_python_support.py
+++ b/testing/raptor/browsertime/support-scripts/sample_python_support.py
@@ -6,7 +6,7 @@ from base_python_support import BasePythonSupport
class SamplePythonSupport(BasePythonSupport):
- def modify_command(self, cmd):
+ def modify_command(self, cmd, test):
for i, entry in enumerate(cmd):
if "{replace-with-constant-value}" in entry:
cmd[i] = "25"