summaryrefslogtreecommitdiffstats
path: root/testing/awsy
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /testing/awsy
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--testing/awsy/README.md2
-rw-r--r--testing/awsy/awsy/__init__.py24
-rw-r--r--testing/awsy/awsy/awsy_test_case.py398
-rw-r--r--testing/awsy/awsy/parse_about_memory.py173
-rw-r--r--testing/awsy/awsy/process_perf_data.py214
-rw-r--r--testing/awsy/awsy/test_base_memory_usage.py135
-rw-r--r--testing/awsy/awsy/test_memory_usage.py238
-rw-r--r--testing/awsy/awsy/webservers.py96
-rw-r--r--testing/awsy/conf/base-prefs.json15
-rw-r--r--testing/awsy/conf/base-testvars.json5
-rw-r--r--testing/awsy/conf/prefs.json13
-rw-r--r--testing/awsy/conf/testvars.json6
-rw-r--r--testing/awsy/conf/tp6-pages.yml32
-rw-r--r--testing/awsy/conf/tp6-prefs.json15
-rw-r--r--testing/awsy/conf/tp6-testvars.json5
-rw-r--r--testing/awsy/mach_commands.py343
-rw-r--r--testing/awsy/moz.build9
-rw-r--r--testing/awsy/perfdocs/config.yaml16
-rw-r--r--testing/awsy/perfdocs/index.rst168
-rw-r--r--testing/awsy/requirements.txt2
-rw-r--r--testing/awsy/setup.py29
-rw-r--r--testing/awsy/tp5n-pageset.manifest10
-rw-r--r--testing/awsy/tp6-pageset.manifest233
23 files changed, 2181 insertions, 0 deletions
diff --git a/testing/awsy/README.md b/testing/awsy/README.md
new file mode 100644
index 0000000000..ca72bd9ca0
--- /dev/null
+++ b/testing/awsy/README.md
@@ -0,0 +1,2 @@
+# awsy-lite
+Barebones are we slim yet test.
diff --git a/testing/awsy/awsy/__init__.py b/testing/awsy/awsy/__init__.py
new file mode 100644
index 0000000000..ff457dd164
--- /dev/null
+++ b/testing/awsy/awsy/__init__.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Maximum number of tabs to open
+MAX_TABS = 30
+
+# Default amount of seconds to wait in between opening tabs
+PER_TAB_PAUSE = 10
+
+# Default amount of seconds to wait for things to be settled down
+SETTLE_WAIT_TIME = 30
+
+# Amount of times to run through the test suite
+ITERATIONS = 5
+
+__all__ = [
+ "MAX_TABS",
+ "PER_TAB_PAUSE",
+ "SETTLE_WAIT_TIME",
+ "ITERATIONS",
+ "webservers",
+ "process_perf_data",
+]
diff --git a/testing/awsy/awsy/awsy_test_case.py b/testing/awsy/awsy/awsy_test_case.py
new file mode 100644
index 0000000000..4a2c2361bd
--- /dev/null
+++ b/testing/awsy/awsy/awsy_test_case.py
@@ -0,0 +1,398 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import fnmatch
+import glob
+import gzip
+import json
+import os
+import shutil
+import sys
+import tempfile
+import time
+
+import mozlog.structured
+from marionette_driver import Wait
+from marionette_driver.errors import JavascriptException, ScriptTimeoutException
+from marionette_driver.keys import Keys
+from marionette_harness import MarionetteTestCase
+
+AWSY_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if AWSY_PATH not in sys.path:
+ sys.path.append(AWSY_PATH)
+
+from awsy import (
+ ITERATIONS,
+ MAX_TABS,
+ PER_TAB_PAUSE,
+ SETTLE_WAIT_TIME,
+ process_perf_data,
+)
+
+
+class AwsyTestCase(MarionetteTestCase):
+ """
+ Base test case for AWSY tests.
+ """
+
+ def urls(self):
+ raise NotImplementedError()
+
+ def perf_suites(self):
+ raise NotImplementedError()
+
+ def perf_checkpoints(self):
+ raise NotImplementedError()
+
+ def perf_extra_opts(self):
+ return None
+
+ def iterations(self):
+ return self._iterations
+
+ def pages_to_load(self):
+ return self._pages_to_load if self._pages_to_load else len(self.urls())
+
+ def settle(self):
+ """
+ Pauses for the settle time.
+ """
+ time.sleep(self._settleWaitTime)
+
+ def setUp(self):
+ MarionetteTestCase.setUp(self)
+
+ self.logger = mozlog.structured.structuredlog.get_default_logger()
+ self.marionette.set_context("chrome")
+ self._resultsDir = self.testvars["resultsDir"]
+
+ self._binary = self.testvars["bin"]
+ self._run_local = self.testvars.get("run_local", False)
+
+ # Cleanup our files from previous runs.
+ for patt in (
+ "memory-report-*.json.gz",
+ "perfherder-data.json",
+ "dmd-*.json.gz",
+ ):
+ for f in glob.glob(os.path.join(self._resultsDir, patt)):
+ os.unlink(f)
+
+ # Optional testvars.
+ self._pages_to_load = self.testvars.get("entities", 0)
+ self._iterations = self.testvars.get("iterations", ITERATIONS)
+ self._perTabPause = self.testvars.get("perTabPause", PER_TAB_PAUSE)
+ self._settleWaitTime = self.testvars.get("settleWaitTime", SETTLE_WAIT_TIME)
+ self._maxTabs = self.testvars.get("maxTabs", MAX_TABS)
+ self._dmd = self.testvars.get("dmd", False)
+
+ self.logger.info(
+ "areweslimyet run by %d pages, %d iterations,"
+ " %d perTabPause, %d settleWaitTime"
+ % (
+ self._pages_to_load,
+ self._iterations,
+ self._perTabPause,
+ self._settleWaitTime,
+ )
+ )
+ self.reset_state()
+
+ def tearDown(self):
+ MarionetteTestCase.tearDown(self)
+
+ try:
+ self.logger.info("processing data in %s!" % self._resultsDir)
+ perf_blob = process_perf_data.create_perf_data(
+ self._resultsDir,
+ self.perf_suites(),
+ self.perf_checkpoints(),
+ self.perf_extra_opts(),
+ )
+ self.logger.info("PERFHERDER_DATA: %s" % json.dumps(perf_blob))
+
+ perf_file = os.path.join(self._resultsDir, "perfherder-data.json")
+ with open(perf_file, "w") as fp:
+ json.dump(perf_blob, fp, indent=2)
+ self.logger.info("Perfherder data written to %s" % perf_file)
+ except Exception:
+ raise
+ finally:
+ # Make sure we cleanup and upload any existing files even if there
+ # were errors processing the perf data.
+ if self._dmd:
+ self.cleanup_dmd()
+
+ # copy it to moz upload dir if set
+ if "MOZ_UPLOAD_DIR" in os.environ:
+ for file in os.listdir(self._resultsDir):
+ file = os.path.join(self._resultsDir, file)
+ if os.path.isfile(file):
+ shutil.copy2(file, os.environ["MOZ_UPLOAD_DIR"])
+
+ def cleanup_dmd(self):
+ """
+ Handles moving DMD reports from the temp dir to our resultsDir.
+ """
+ from dmd import fixStackTraces
+
+ # Move DMD files from temp dir to resultsDir.
+ tmpdir = tempfile.gettempdir()
+ tmp_files = os.listdir(tmpdir)
+ for f in fnmatch.filter(tmp_files, "dmd-*.json.gz"):
+ f = os.path.join(tmpdir, f)
+ # We don't fix stacks on Windows, even though we could, due to the
+ # tale of woe in bug 1626272.
+ if not sys.platform.startswith("win"):
+ self.logger.info("Fixing stacks for %s, this may take a while" % f)
+ isZipped = True
+ fixStackTraces(f, isZipped, gzip.open)
+ shutil.move(f, self._resultsDir)
+
+ # Also attempt to cleanup the unified memory reports.
+ for f in fnmatch.filter(tmp_files, "unified-memory-report-*.json.gz"):
+ try:
+ os.remove(f)
+ except OSError:
+ self.logger.info("Unable to remove %s" % f)
+
+ def reset_state(self):
+ self._pages_loaded = 0
+
+ # Close all tabs except one
+ for x in self.marionette.window_handles[1:]:
+ self.logger.info("closing window: %s" % x)
+ self.marionette.switch_to_window(x)
+ self.marionette.close()
+
+ self._tabs = self.marionette.window_handles
+ self.marionette.switch_to_window(self._tabs[0])
+
+ def do_memory_report(self, checkpointName, iteration, minimize=False):
+ """Creates a memory report for all processes and and returns the
+ checkpoint.
+
+ This will block until all reports are retrieved or a timeout occurs.
+ Returns the checkpoint or None on error.
+
+ :param checkpointName: The name of the checkpoint.
+
+ :param minimize: If true, minimize memory before getting the report.
+ """
+ self.logger.info("starting checkpoint %s..." % checkpointName)
+
+ checkpoint_file = "memory-report-%s-%d.json.gz" % (checkpointName, iteration)
+ checkpoint_path = os.path.join(self._resultsDir, checkpoint_file)
+ # On Windows, replace / with the Windows directory
+ # separator \ and escape it to prevent it from being
+ # interpreted as an escape character.
+ if sys.platform.startswith("win"):
+ checkpoint_path = checkpoint_path.replace("\\", "\\\\").replace("/", "\\\\")
+
+ checkpoint_script = r"""
+ let [resolve] = arguments;
+ let dumper =
+ Cc["@mozilla.org/memory-info-dumper;1"].getService(
+ Ci.nsIMemoryInfoDumper);
+ dumper.dumpMemoryReportsToNamedFile(
+ "%s",
+ () => resolve("memory report done!"),
+ null,
+ /* anonymize */ false,
+ /* minimize memory usage */ %s);
+ """ % (
+ checkpoint_path,
+ "true" if minimize else "false",
+ )
+
+ checkpoint = None
+ try:
+ finished = self.marionette.execute_async_script(
+ checkpoint_script, script_timeout=60000
+ )
+ if finished:
+ checkpoint = checkpoint_path
+ except JavascriptException as e:
+ self.logger.error("Checkpoint JavaScript error: %s" % e)
+ except ScriptTimeoutException:
+ self.logger.error("Memory report timed out")
+ except Exception:
+ self.logger.error("Unexpected error: %s" % sys.exc_info()[0])
+ else:
+ self.logger.info("checkpoint created, stored in %s" % checkpoint_path)
+
+ # Now trigger a DMD report if requested.
+ if self._dmd:
+ self.do_dmd(checkpointName, iteration)
+
+ return checkpoint
+
+ def do_dmd(self, checkpointName, iteration):
+ """
+ Triggers DMD reports that are used to help identify sources of
+ 'heap-unclassified'.
+
+ NB: This will dump DMD reports to the temp dir. Unfortunately it also
+ dumps memory reports, but that's all we have to work with right now.
+ """
+ self.logger.info("Starting %s DMD reports..." % checkpointName)
+
+ ident = "%s-%d" % (checkpointName, iteration)
+
+ # TODO(ER): This actually takes a minimize argument. We could use that
+ # rather than have a separate `do_gc` function. Also it generates a
+ # memory report so we could combine this with `do_checkpoint`. The main
+ # issue would be moving everything out of the temp dir.
+ #
+ # Generated files have the form:
+ # dmd-<checkpoint>-<iteration>-pid.json.gz, ie:
+ # dmd-TabsOpenForceGC-0-10885.json.gz
+ #
+ # and for the memory report:
+ # unified-memory-report-<checkpoint>-<iteration>.json.gz
+ dmd_script = (
+ r"""
+ let dumper =
+ Cc["@mozilla.org/memory-info-dumper;1"].getService(
+ Ci.nsIMemoryInfoDumper);
+ dumper.dumpMemoryInfoToTempDir(
+ "%s",
+ /* anonymize = */ false,
+ /* minimize = */ false);
+ """
+ % ident
+ )
+
+ try:
+ # This is async and there's no callback so we use the existence
+ # of an incomplete memory report to check if it hasn't finished yet.
+ self.marionette.execute_script(dmd_script, script_timeout=60000)
+ tmpdir = tempfile.gettempdir()
+ prefix = "incomplete-unified-memory-report-%s-%d-*" % (
+ checkpointName,
+ iteration,
+ )
+ max_wait = 240
+ elapsed = 0
+ while fnmatch.filter(os.listdir(tmpdir), prefix) and elapsed < max_wait:
+ self.logger.info("Waiting for memory report to finish")
+ time.sleep(1)
+ elapsed += 1
+
+ incomplete = fnmatch.filter(os.listdir(tmpdir), prefix)
+ if incomplete:
+ # The memory reports never finished.
+ self.logger.error("Incomplete memory reports leftover.")
+ for f in incomplete:
+ os.remove(os.path.join(tmpdir, f))
+
+ except JavascriptException as e:
+ self.logger.error("DMD JavaScript error: %s" % e)
+ except ScriptTimeoutException:
+ self.logger.error("DMD timed out")
+ except Exception:
+ self.logger.error("Unexpected error: %s" % sys.exc_info()[0])
+ else:
+ self.logger.info("DMD started, prefixed with %s" % ident)
+
+ def open_and_focus(self):
+ """Opens the next URL in the list and focuses on the tab it is opened in.
+
+ A new tab will be opened if |_maxTabs| has not been exceeded, otherwise
+ the URL will be loaded in the next tab.
+ """
+ page_to_load = self.urls()[self._pages_loaded % len(self.urls())]
+ tabs_loaded = len(self._tabs)
+ open_tab_script = r"""
+ gBrowser.addTab("about:blank", {
+ inBackground: false,
+ triggeringPrincipal: Services.scriptSecurityManager.getSystemPrincipal(),
+ });
+ """
+
+ if tabs_loaded < self._maxTabs and tabs_loaded <= self._pages_loaded:
+ full_tab_list = self.marionette.window_handles
+
+ self.marionette.execute_script(open_tab_script, script_timeout=60000)
+
+ Wait(self.marionette).until(
+ lambda mn: len(mn.window_handles) == tabs_loaded + 1,
+ message="No new tab has been opened",
+ )
+
+ # NB: The tab list isn't sorted, so we do a set diff to determine
+ # which is the new tab
+ new_tab_list = self.marionette.window_handles
+ new_tabs = list(set(new_tab_list) - set(full_tab_list))
+
+ self._tabs.append(new_tabs[0])
+ tabs_loaded += 1
+
+ tab_idx = self._pages_loaded % self._maxTabs
+
+ tab = self._tabs[tab_idx]
+
+ # Tell marionette which tab we're on
+ # NB: As a work-around for an e10s marionette bug, only select the tab
+ # if we're really switching tabs.
+ if tabs_loaded > 1:
+ self.logger.info("switching to tab")
+ self.marionette.switch_to_window(tab)
+ self.logger.info("switched to tab")
+
+ with self.marionette.using_context("content"):
+ self.logger.info("loading %s" % page_to_load)
+ self.marionette.navigate(page_to_load)
+ self.logger.info("loaded!")
+
+ # The tab handle can change after actually loading content
+ # First build a set up w/o the current tab
+ old_tabs = set(self._tabs)
+ old_tabs.remove(tab)
+ # Perform a set diff to get the (possibly) new handle
+ new_tabs = set(self.marionette.window_handles) - old_tabs
+ # Update the tab list at the current index to preserve the tab
+ # ordering
+ if new_tabs:
+ self._tabs[tab_idx] = list(new_tabs)[0]
+
+ # give the page time to settle
+ time.sleep(self._perTabPause)
+
+ self._pages_loaded += 1
+
+ def signal_user_active(self):
+ """Signal to the browser that the user is active.
+
+ Normally when being driven by marionette the browser thinks the
+ user is inactive the whole time because user activity is
+ detected by looking at key and mouse events.
+
+ This would be a problem for this test because user inactivity is
+ used to schedule some GCs (in particular shrinking GCs), so it
+ would make this unrepresentative of real use.
+
+ Instead we manually cause some inconsequential activity (a press
+ and release of the shift key) to make the browser think the user
+ is active. Then when we sleep to allow things to settle the
+ browser will see the user as becoming inactive and trigger
+ appropriate GCs, as would have happened in real use.
+ """
+ try:
+ action = self.marionette.actions.sequence("key", "keyboard_id")
+ action.key_down(Keys.SHIFT)
+ action.key_up(Keys.SHIFT)
+ action.perform()
+ finally:
+ self.marionette.actions.release()
+
+ def open_pages(self):
+ """
+ Opens all pages with our given configuration.
+ """
+ for _ in range(self.pages_to_load()):
+ self.open_and_focus()
+ self.signal_user_active()
diff --git a/testing/awsy/awsy/parse_about_memory.py b/testing/awsy/awsy/parse_about_memory.py
new file mode 100644
index 0000000000..e98b0e4f9b
--- /dev/null
+++ b/testing/awsy/awsy/parse_about_memory.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+# Firefox about:memory log parser.
+
+import argparse
+import gzip
+import json
+from collections import defaultdict
+
+# This value comes from nsIMemoryReporter.idl.
+KIND_HEAP = 1
+
+
+def path_total(data, path):
+ """
+ Calculates the sum for the given data point path and its children. If
+ path does not end with a '/' then only the value for the exact path is
+ returned.
+ """
+ path_totals = defaultdict(int)
+
+ # Bookkeeping for calculating the heap-unclassified measurement.
+ explicit_heap = defaultdict(int)
+ heap_allocated = defaultdict(int)
+
+ discrete = not path.endswith("/")
+
+ def match(value):
+ """
+ Helper that performs either an explicit match or a prefix match
+ depending on the format of the path passed in.
+ """
+ if discrete:
+ return value == path
+ else:
+ return value.startswith(path)
+
+ def update_bookkeeping(report):
+ """
+ Adds the value to the heap total if this an explicit entry that is a
+ heap measurement and updates the heap allocated value if necessary.
+ """
+ if report["kind"] == KIND_HEAP and report["path"].startswith("explicit/"):
+ explicit_heap[report["process"]] += report["amount"]
+ elif report["path"] == "heap-allocated":
+ heap_allocated[report["process"]] = report["amount"]
+
+ def heap_unclassified(process):
+ """
+ Calculates the heap-unclassified value for the given process. This is
+ simply the difference between all values reported as heap allocated
+ under the explicit/ tree and the value reported for heap-allocated by
+ the allocator.
+ """
+ # Memory reports should always include heap-allocated. If it's missing
+ # just assert.
+ assert process in heap_allocated
+
+ unclassified = heap_allocated[process] - explicit_heap[process]
+
+ # Make sure the value is sane. A misbehaving reporter could lead to
+ # negative values.
+ # This assertion fails on Beta while running TP6, in the Google Docs process.
+ # Disable this for now, but only on Beta. See bug 1735556.
+ # assert unclassified >= 0, "heap-unclassified was negative: %d" % unclassified
+
+ return unclassified
+
+ needs_bookkeeping = path in ("explicit/", "explicit/heap-unclassified")
+
+ # Process all the reports.
+ for report in data["reports"]:
+ if needs_bookkeeping:
+ update_bookkeeping(report)
+
+ if match(report["path"]):
+ path_totals[report["process"]] += report["amount"]
+
+ # Handle special processing for explicit and heap-unclassified.
+ if path == "explicit/":
+ # If 'explicit/' is requested we need to add the 'explicit/heap-unclassified'
+ # node that is generated by about:memory.
+ for k, v in explicit_heap.items():
+ path_totals[k] += heap_unclassified(k)
+ elif path == "explicit/heap-unclassified":
+ # If 'explicit/heap-unclassified' is requested we need to calculate the
+ # value as it's generated by about:memory, not explicitly reported.
+ for k, v in explicit_heap.items():
+ path_totals[k] = heap_unclassified(k)
+
+ return path_totals
+
+
+def calculate_memory_report_values(
+ memory_report_path, data_point_path, process_names=None
+):
+ """
+ Opens the given memory report file and calculates the value for the given
+ data point.
+
+ :param memory_report_path: Path to the memory report file to parse.
+ :param data_point_path: Path of the data point to calculate in the memory
+ report, ie: 'explicit/heap-unclassified'.
+ :param process_name: Name of processes to limit reports to. ie 'Main'
+ """
+ try:
+ with open(memory_report_path) as f:
+ data = json.load(f)
+ except ValueError:
+ # Check if the file is gzipped.
+ with gzip.open(memory_report_path, "rb") as f:
+ data = json.load(f)
+
+ totals = path_total(data, data_point_path)
+
+ # If a process name is provided, restricted output to processes matching
+ # that name.
+ if process_names is not None:
+ for k in list(totals.keys()):
+ if not any([process_name in k for process_name in process_names]):
+ del totals[k]
+
+ return totals
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Extract data points from about:memory reports"
+ )
+ parser.add_argument("report", action="store", help="Path to a memory report file.")
+ parser.add_argument(
+ "prefix",
+ action="store",
+ help="Prefix of data point to measure. "
+ "If the prefix does not end in a '/' "
+ "then an exact match is made.",
+ )
+ parser.add_argument(
+ "--proc-filter",
+ action="store",
+ nargs="*",
+ default=None,
+ help="Process name filter. " "If not provided all processes will be included.",
+ )
+ parser.add_argument(
+ "--mebi",
+ action="store_true",
+ help="Output values as mebibytes (instead of bytes)" " to match about:memory.",
+ )
+
+ args = parser.parse_args()
+ totals = calculate_memory_report_values(args.report, args.prefix, args.proc_filter)
+
+ sorted_totals = sorted(totals.items(), key=lambda item: (-item[1], item[0]))
+ for k, v in sorted_totals:
+ if v:
+ print("{0}\t".format(k)),
+ print("")
+
+ bytes_per_mebibyte = 1024.0 * 1024.0
+ for k, v in sorted_totals:
+ if v:
+ if args.mebi:
+ print("{0:.2f} MiB".format(v / bytes_per_mebibyte)),
+ else:
+ print("{0} bytes".format(v)),
+ print("\t"),
+ print("")
diff --git a/testing/awsy/awsy/process_perf_data.py b/testing/awsy/awsy/process_perf_data.py
new file mode 100644
index 0000000000..32ec9655b0
--- /dev/null
+++ b/testing/awsy/awsy/process_perf_data.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import glob
+import json
+import math
+import os
+import sys
+
+import six
+
+AWSY_PATH = os.path.dirname(os.path.realpath(__file__))
+if AWSY_PATH not in sys.path:
+ sys.path.append(AWSY_PATH)
+
+import parse_about_memory
+
+# A description of each checkpoint and the root path to it.
+CHECKPOINTS = [
+ {"name": "Fresh start", "path": "memory-report-Start-0.json.gz"},
+ {"name": "Fresh start [+30s]", "path": "memory-report-StartSettled-0.json.gz"},
+ {"name": "After tabs open", "path": "memory-report-TabsOpen-4.json.gz"},
+ {
+ "name": "After tabs open [+30s]",
+ "path": "memory-report-TabsOpenSettled-4.json.gz",
+ },
+ {
+ "name": "After tabs open [+30s, forced GC]",
+ "path": "memory-report-TabsOpenForceGC-4.json.gz",
+ },
+ {
+ "name": "Tabs closed extra processes",
+ "path": "memory-report-TabsClosedExtraProcesses-4.json.gz",
+ },
+ {"name": "Tabs closed", "path": "memory-report-TabsClosed-4.json.gz"},
+ {"name": "Tabs closed [+30s]", "path": "memory-report-TabsClosedSettled-4.json.gz"},
+ {
+ "name": "Tabs closed [+30s, forced GC]",
+ "path": "memory-report-TabsClosedForceGC-4.json.gz",
+ },
+]
+
+# A description of each perfherder suite and the path to its values.
+PERF_SUITES = [
+ {"name": "Resident Memory", "node": "resident"},
+ {"name": "Explicit Memory", "node": "explicit/"},
+ {"name": "Heap Unclassified", "node": "explicit/heap-unclassified"},
+ {"name": "JS", "node": "js-main-runtime/"},
+ {"name": "Images", "node": "explicit/images/"},
+]
+
+
+def median(values):
+ sorted_ = sorted(values)
+ # pylint --py3k W1619
+ med = int(len(sorted_) / 2)
+
+ if len(sorted_) % 2:
+ return sorted_[med]
+ # pylint --py3k W1619
+ return (sorted_[med - 1] + sorted_[med]) / 2
+
+
+def update_checkpoint_paths(checkpoint_files, checkpoints):
+ """
+ Updates checkpoints with memory report file fetched in data_path
+ :param checkpoint_files: list of files in data_path
+ :param checkpoints: The checkpoints to update the path of.
+ """
+ target_path = [
+ ["Start-", 0],
+ ["StartSettled-", 0],
+ ["TabsOpen-", -1],
+ ["TabsOpenSettled-", -1],
+ ["TabsOpenForceGC-", -1],
+ ["TabsClosedExtraProcesses-", -1],
+ ["TabsClosed-", -1],
+ ["TabsClosedSettled-", -1],
+ ["TabsClosedForceGC-", -1],
+ ]
+ for i in range(len(target_path)):
+ (name, idx) = target_path[i]
+ paths = sorted([x for x in checkpoint_files if name in x])
+ if paths:
+ indices = [i for i, x in enumerate(checkpoints) if name in x["path"]]
+ if indices:
+ checkpoints[indices[0]]["path"] = paths[idx]
+ else:
+ print("found files but couldn't find {}".format(name))
+
+
+def create_suite(
+ name, node, data_path, checkpoints=CHECKPOINTS, alertThreshold=None, extra_opts=None
+):
+ """
+ Creates a suite suitable for adding to a perfherder blob. Calculates the
+ geometric mean of the checkpoint values and adds that to the suite as
+ well.
+
+ :param name: The name of the suite.
+ :param node: The path of the data node to extract data from.
+ :param data_path: The directory to retrieve data from.
+ :param checkpoints: Which checkpoints to include.
+ :param alertThreshold: The percentage of change that triggers an alert.
+ """
+ suite = {"name": name, "subtests": [], "lowerIsBetter": True, "unit": "bytes"}
+
+ if alertThreshold:
+ suite["alertThreshold"] = alertThreshold
+
+ opts = []
+ if extra_opts:
+ opts.extend(extra_opts)
+
+ if "DMD" in os.environ and os.environ["DMD"]:
+ opts.append("dmd")
+
+ if len(opts) > 0:
+ suite["extraOptions"] = opts
+
+ update_checkpoint_paths(
+ glob.glob(os.path.join(data_path, "memory-report*")), checkpoints
+ )
+
+ total = 0
+ for checkpoint in checkpoints:
+ memory_report_path = os.path.join(data_path, checkpoint["path"])
+
+ name_filter = checkpoint.get("name_filter", None)
+ if checkpoint.get("median"):
+ process = median
+ else:
+ process = sum
+
+ if node != "resident":
+ totals = parse_about_memory.calculate_memory_report_values(
+ memory_report_path, node, name_filter
+ )
+ value = process(totals.values())
+ else:
+ # For "resident" we really want RSS of the chrome ("Main") process
+ # and USS of the child processes. We'll still call it resident
+ # for simplicity (it's nice to be able to compare RSS of non-e10s
+ # with RSS + USS of e10s).
+ totals_rss = parse_about_memory.calculate_memory_report_values(
+ memory_report_path, node, ["Main"]
+ )
+ totals_uss = parse_about_memory.calculate_memory_report_values(
+ memory_report_path, "resident-unique"
+ )
+ value = list(totals_rss.values())[0] + sum(
+ [v for k, v in six.iteritems(totals_uss) if "Main" not in k]
+ )
+
+ subtest = {
+ "name": checkpoint["name"],
+ "value": value,
+ "lowerIsBetter": True,
+ "unit": "bytes",
+ }
+ suite["subtests"].append(subtest)
+ total += math.log(subtest["value"])
+
+ # Add the geometric mean. For more details on the calculation see:
+ # https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_arithmetic_mean_of_logarithms
+ # pylint --py3k W1619
+ suite["value"] = math.exp(total / len(checkpoints))
+
+ return suite
+
+
+def create_perf_data(
+ data_path, perf_suites=PERF_SUITES, checkpoints=CHECKPOINTS, extra_opts=None
+):
+ """
+ Builds up a performance data blob suitable for submitting to perfherder.
+ """
+ if ("GCOV_PREFIX" in os.environ) or ("JS_CODE_COVERAGE_OUTPUT_DIR" in os.environ):
+ print(
+ "Code coverage is being collected, performance data will not be gathered."
+ )
+ return {}
+
+ perf_blob = {"framework": {"name": "awsy"}, "suites": []}
+
+ for suite in perf_suites:
+ perf_blob["suites"].append(
+ create_suite(
+ suite["name"],
+ suite["node"],
+ data_path,
+ checkpoints,
+ suite.get("alertThreshold"),
+ extra_opts,
+ )
+ )
+
+ return perf_blob
+
+
+if __name__ == "__main__":
+ args = sys.argv[1:]
+ if not args:
+ print("Usage: process_perf_data.py data_path")
+ sys.exit(1)
+
+ # Determine which revisions we need to process.
+ data_path = args[0]
+ perf_blob = create_perf_data(data_path)
+ print("PERFHERDER_DATA: {}".format(json.dumps(perf_blob)))
+
+ sys.exit(0)
diff --git a/testing/awsy/awsy/test_base_memory_usage.py b/testing/awsy/awsy/test_base_memory_usage.py
new file mode 100644
index 0000000000..07d65f218f
--- /dev/null
+++ b/testing/awsy/awsy/test_base_memory_usage.py
@@ -0,0 +1,135 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+AWSY_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if AWSY_PATH not in sys.path:
+ sys.path.append(AWSY_PATH)
+
+from awsy.awsy_test_case import AwsyTestCase
+
+# A description of each checkpoint and the root path to it.
+CHECKPOINTS = [
+ {
+ "name": "After tabs open [+30s, forced GC]",
+ "path": "memory-report-TabsOpenForceGC-4.json.gz",
+ "name_filter": ["web ", "Web Content"], # We only want the content process
+ "median": True, # We want the median from all content processes
+ },
+]
+
+# A description of each perfherder suite and the path to its values.
+PERF_SUITES = [
+ {"name": "Base Content Resident Unique Memory", "node": "resident-unique"},
+ {"name": "Base Content Heap Unclassified", "node": "explicit/heap-unclassified"},
+ {"name": "Base Content JS", "node": "js-main-runtime/", "alertThreshold": 0.25},
+ {"name": "Base Content Explicit", "node": "explicit/"},
+]
+
+
+class TestMemoryUsage(AwsyTestCase):
+ """
+ Provides a base case test that just loads about:memory and reports the
+ memory usage of a single content process.
+ """
+
+ def urls(self):
+ return self._urls
+
+ def perf_suites(self):
+ return PERF_SUITES
+
+ def perf_checkpoints(self):
+ return CHECKPOINTS
+
+ def perf_extra_opts(self):
+ return self._extra_opts
+
+ def setUp(self):
+ AwsyTestCase.setUp(self)
+ self.logger.info("setting up!")
+
+ # Override AwsyTestCase value, this is always going to be 1 iteration.
+ self._iterations = 1
+
+ # Override "entities" from our configuration.
+ #
+ # We aim to load a number of about:blank pages exactly matching the
+ # number of content processes we can have. After this we should no
+ # longer have a preallocated content process (although to be sure we
+ # explicitly drop it at the end of the test).
+ process_count = self.marionette.get_pref("dom.ipc.processCount")
+ self._pages_to_load = process_count
+ self._urls = ["about:blank"] * process_count
+
+ if self.marionette.get_pref("fission.autostart"):
+ self._extra_opts = ["fission"]
+ else:
+ self._extra_opts = None
+
+ self.logger.info(
+ "areweslimyet run by %d pages, "
+ "%d iterations, %d perTabPause, %d settleWaitTime, "
+ "%d content processes"
+ % (
+ self._pages_to_load,
+ self._iterations,
+ self._perTabPause,
+ self._settleWaitTime,
+ process_count,
+ )
+ )
+ self.logger.info("done setting up!")
+
+ def tearDown(self):
+ self.logger.info("tearing down!")
+ AwsyTestCase.tearDown(self)
+ self.logger.info("done tearing down!")
+
+ def set_preallocated_process_enabled_state(self, enabled):
+ """Sets the pref that controls whether we have a preallocated content
+ process to the given value.
+
+ This will cause the preallocated process to be destroyed or created
+ as appropriate.
+ """
+ if enabled:
+ self.logger.info("re-enabling preallocated process")
+ else:
+ self.logger.info("disabling preallocated process")
+ self.marionette.set_pref("dom.ipc.processPrelaunch.enabled", enabled)
+
+ def test_open_tabs(self):
+ """Marionette test entry that returns an array of checkpoint arrays.
+
+ This will generate a set of checkpoints for each iteration requested.
+ Upon successful completion the results will be stored in
+ |self.testvars["results"]| and accessible to the test runner via the
+ |testvars| object it passed in.
+ """
+ # setup the results array
+ results = [[] for _ in range(self.iterations())]
+
+ def create_checkpoint(name, iteration, minimize=False):
+ checkpoint = self.do_memory_report(name, iteration, minimize)
+ self.assertIsNotNone(checkpoint, "Checkpoint was recorded")
+ results[iteration].append(checkpoint)
+
+ # As long as we force the number of iterations to 1 in setUp() above,
+ # we don't need to loop over this work.
+ assert self._iterations == 1
+ self.open_pages()
+ self.set_preallocated_process_enabled_state(False)
+ self.settle()
+ self.settle()
+ create_checkpoint("TabsOpenForceGC", 0, minimize=True)
+ self.set_preallocated_process_enabled_state(True)
+ # (If we wanted to do something after the preallocated process has been
+ # recreated, we should call self.settle() again to wait for it.)
+
+ # TODO(ER): Temporary hack until bug 1121139 lands
+ self.logger.info("setting results")
+ self.testvars["results"] = results
diff --git a/testing/awsy/awsy/test_memory_usage.py b/testing/awsy/awsy/test_memory_usage.py
new file mode 100644
index 0000000000..e3daaf08dc
--- /dev/null
+++ b/testing/awsy/awsy/test_memory_usage.py
@@ -0,0 +1,238 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+import mozinfo
+import yaml
+from marionette_driver.errors import JavascriptException, ScriptTimeoutException
+from mozproxy import get_playback
+
+AWSY_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+if AWSY_PATH not in sys.path:
+ sys.path.append(AWSY_PATH)
+
+from awsy import process_perf_data, webservers
+from awsy.awsy_test_case import AwsyTestCase
+
+
+class TestMemoryUsage(AwsyTestCase):
+ """Provides a test that collects memory usage at various checkpoints:
+ - "Start" - Just after startup
+ - "StartSettled" - After an additional wait time
+ - "TabsOpen" - After opening all provided URLs
+ - "TabsOpenSettled" - After an additional wait time
+ - "TabsOpenForceGC" - After forcibly invoking garbage collection
+ - "TabsClosed" - After closing all tabs
+ - "TabsClosedSettled" - After an additional wait time
+ - "TabsClosedForceGC" - After forcibly invoking garbage collection
+ """
+
+ def urls(self):
+ return self._urls
+
+ def perf_suites(self):
+ return process_perf_data.PERF_SUITES
+
+ def perf_checkpoints(self):
+ return process_perf_data.CHECKPOINTS
+
+ def perf_extra_opts(self):
+ return self._extra_opts
+
+ def setupTp5(self):
+ urls = None
+ default_tp5n_manifest = os.path.join(
+ self._webroot_dir, "page_load_test", "tp5n", "tp5n.manifest"
+ )
+ tp5n_manifest = self.testvars.get("pageManifest", default_tp5n_manifest)
+ with open(tp5n_manifest) as fp:
+ urls = fp.readlines()
+ # pylint --py3k: W1636
+ urls = list(map(lambda x: x.replace("localhost", "localhost:{}"), urls))
+
+ # We haven't set self._urls yet, so this value might be zero if
+ # 'entities' wasn't specified.
+ to_load = self.pages_to_load()
+ if not to_load:
+ to_load = len(urls)
+ self._webservers = webservers.WebServers(
+ "localhost", 8001, self._webroot_dir, to_load
+ )
+ self._webservers.start()
+ for url, server in zip(urls, self._webservers.servers):
+ self._urls.append(url.strip().format(server.port))
+
+ def setupTp6(self):
+ # tp5n stores its manifest in the zip file that gets extracted, tp6
+ # doesn't so we just keep one in our project dir for now.
+ default_tp6_pages_manifest = os.path.join(AWSY_PATH, "conf", "tp6-pages.yml")
+ tp6_pages_manifest = self.testvars.get(
+ "pageManifest", default_tp6_pages_manifest
+ )
+ urls = []
+ with open(tp6_pages_manifest) as f:
+ d = yaml.safe_load(f)
+ for r in d:
+ url = r["url"]
+ if isinstance(url, list):
+ urls.extend(url)
+ else:
+ urls.append(url)
+
+ self._urls = urls
+
+ # Indicate that we're using tp6 in the perf data.
+ self._extra_opts = ["tp6"]
+
+ if self.marionette.get_pref("fission.autostart"):
+ self._extra_opts.append("fission")
+
+ # Now we setup the mitm proxy with our tp6 pageset.
+ tp6_pageset_manifest = os.path.join(AWSY_PATH, "tp6-pageset.manifest")
+ config = {
+ "playback_tool": "mitmproxy",
+ "playback_version": "5.1.1",
+ "playback_files": [tp6_pageset_manifest],
+ "platform": mozinfo.os,
+ "obj_path": self._webroot_dir,
+ "binary": self._binary,
+ "run_local": self._run_local,
+ "app": "firefox",
+ "host": "127.0.0.1",
+ "ignore_mitmdump_exit_failure": True,
+ }
+
+ self._playback = get_playback(config)
+ self._playback.start()
+
+ # We need to reload after the mitmproxy cert is installed
+ self.marionette.restart(in_app=False, clean=False)
+
+ # Setup WebDriver capabilities that we need
+ self.marionette.delete_session()
+ caps = {
+ "unhandledPromptBehavior": "dismiss", # Ignore page navigation warnings
+ }
+ self.marionette.start_session(caps)
+ self.marionette.set_context("chrome")
+
+ def setUp(self):
+ AwsyTestCase.setUp(self)
+ self.logger.info("setting up")
+ self._webroot_dir = self.testvars["webRootDir"]
+ self._urls = []
+ self._extra_opts = None
+
+ if self.testvars.get("tp6", False):
+ self.setupTp6()
+ else:
+ self.setupTp5()
+
+ self.logger.info(
+ "areweslimyet run by %d pages, %d iterations,"
+ " %d perTabPause, %d settleWaitTime"
+ % (
+ self._pages_to_load,
+ self._iterations,
+ self._perTabPause,
+ self._settleWaitTime,
+ )
+ )
+ self.logger.info("done setting up!")
+
+ def tearDown(self):
+ self.logger.info("tearing down!")
+
+ self.logger.info("tearing down webservers!")
+
+ if self.testvars.get("tp6", False):
+ self._playback.stop()
+ else:
+ self._webservers.stop()
+
+ AwsyTestCase.tearDown(self)
+
+ self.logger.info("done tearing down!")
+
+ def clear_preloaded_browser(self):
+ """
+ Clears out the preloaded browser.
+ """
+ self.logger.info("closing preloaded browser")
+ script = """
+ if (window.NewTabPagePreloading) {
+ return NewTabPagePreloading.removePreloadedBrowser(window);
+ }
+ return "NewTabPagePreloading.removePreloadedBrowser not available";
+ """
+ try:
+ result = self.marionette.execute_script(script, script_timeout=180000)
+ except JavascriptException as e:
+ self.logger.error("removePreloadedBrowser() JavaScript error: %s" % e)
+ except ScriptTimeoutException:
+ self.logger.error("removePreloadedBrowser() timed out")
+ except Exception:
+ self.logger.error(
+ "removePreloadedBrowser() Unexpected error: %s" % sys.exc_info()[0]
+ )
+ else:
+ if result:
+ self.logger.info(result)
+
+ def test_open_tabs(self):
+ """Marionette test entry that returns an array of checkpoint arrays.
+
+ This will generate a set of checkpoints for each iteration requested.
+ Upon successful completion the results will be stored in
+ |self.testvars["results"]| and accessible to the test runner via the
+ |testvars| object it passed in.
+ """
+ # setup the results array
+ results = [[] for _ in range(self.iterations())]
+
+ def create_checkpoint(name, iteration, minimize=False):
+ checkpoint = self.do_memory_report(name, iteration, minimize)
+ self.assertIsNotNone(checkpoint, "Checkpoint was recorded")
+ results[iteration].append(checkpoint)
+
+ # The first iteration gets Start and StartSettled entries before
+ # opening tabs
+ create_checkpoint("Start", 0)
+ self.settle()
+ create_checkpoint("StartSettled", 0)
+
+ for itr in range(self.iterations()):
+ self.open_pages()
+
+ create_checkpoint("TabsOpen", itr)
+ self.settle()
+ create_checkpoint("TabsOpenSettled", itr)
+ create_checkpoint("TabsOpenForceGC", itr, minimize=True)
+
+ # Close all tabs
+ self.reset_state()
+
+ with self.marionette.using_context("content"):
+ self.logger.info("navigating to about:blank")
+ self.marionette.navigate("about:blank")
+ self.logger.info("navigated to about:blank")
+ self.signal_user_active()
+
+ # Create checkpoint that may contain retained processes that will
+ # be reused.
+ create_checkpoint("TabsClosedExtraProcesses", itr)
+
+ # Clear out the retained processes and measure again.
+ self.clear_preloaded_browser()
+
+ create_checkpoint("TabsClosed", itr)
+ self.settle()
+ create_checkpoint("TabsClosedSettled", itr)
+ create_checkpoint("TabsClosedForceGC", itr, minimize=True)
+
+ # TODO(ER): Temporary hack until bug 1121139 lands
+ self.logger.info("setting results")
+ self.testvars["results"] = results
diff --git a/testing/awsy/awsy/webservers.py b/testing/awsy/awsy/webservers.py
new file mode 100644
index 0000000000..ca278db706
--- /dev/null
+++ b/testing/awsy/awsy/webservers.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+# mozhttpd web server.
+
+import argparse
+import os
+import socket
+
+import mozhttpd
+
+# directory of this file
+here = os.path.dirname(os.path.realpath(__file__))
+
+
+class WebServers(object):
+ def __init__(self, host, port, docroot, count):
+ self.host = host
+ self.port = port
+ self.docroot = docroot
+ self.count = count
+ self.servers = []
+
+ def start(self):
+ self.stop()
+ self.servers = []
+ port = self.port
+ num_errors = 0
+ while len(self.servers) < self.count:
+ self.servers.append(
+ mozhttpd.MozHttpd(host=self.host, port=port, docroot=self.docroot)
+ )
+ try:
+ self.servers[-1].start()
+ except socket.error as error:
+ if isinstance(error, socket.error):
+ if error.errno == 98:
+ print("port {} is in use.".format(port))
+ else:
+ print("port {} error {}".format(port, error))
+ elif isinstance(error, str):
+ print("port {} error {}".format(port, error))
+ self.servers.pop()
+ num_errors += 1
+ except Exception as error:
+ print("port {} error {}".format(port, error))
+ self.servers.pop()
+ num_errors += 1
+
+ if num_errors > 15:
+ raise Exception("Too many errors in webservers.py")
+ port += 1
+
+ def stop(self):
+ while len(self.servers) > 0:
+ server = self.servers.pop()
+ server.stop()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Start mozhttpd servers for use by areweslimyet."
+ )
+
+ parser.add_argument(
+ "--port",
+ type=int,
+ default=8001,
+ help="Starting port. Defaults to 8001. Web servers will be "
+ "created for each port from the starting port to starting port "
+ "+ count - 1.",
+ )
+ parser.add_argument(
+ "--count",
+ type=int,
+ default=100,
+ help="Number of web servers to start. Defaults to 100.",
+ )
+ parser.add_argument(
+ "--host",
+ type=str,
+ default="localhost",
+ help="Name of webserver host. Defaults to localhost.",
+ )
+
+ args = parser.parse_args()
+ web_servers = WebServers(args.host, args.port, "%s/html" % here, args.count)
+ web_servers.start()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/awsy/conf/base-prefs.json b/testing/awsy/conf/base-prefs.json
new file mode 100644
index 0000000000..365dcaa410
--- /dev/null
+++ b/testing/awsy/conf/base-prefs.json
@@ -0,0 +1,15 @@
+{
+ "browser.tabs.remote.separatePrivilegedContentProcess": true,
+ "fission.bfcacheInParent": false,
+ "javascript.options.asyncstack": false,
+ "image.mem.surfacecache.min_expiration_ms": 10000,
+ "network.proxy.socks": "localhost",
+ "network.proxy.socks_port": 90000,
+ "network.proxy.socks_remote_dns": true,
+ "network.proxy.type": 1,
+ "plugin.disable": true,
+ "startup.homepage_override_url": "",
+ "startup.homepage_welcome_url": "",
+ "browser.startup.homepage": "about:blank",
+ "browser.newtabpage.enabled": false
+}
diff --git a/testing/awsy/conf/base-testvars.json b/testing/awsy/conf/base-testvars.json
new file mode 100644
index 0000000000..de6c6f9be8
--- /dev/null
+++ b/testing/awsy/conf/base-testvars.json
@@ -0,0 +1,5 @@
+{
+ "iterations": 1,
+ "perTabPause": 10,
+ "settleWaitTime": 60
+}
diff --git a/testing/awsy/conf/prefs.json b/testing/awsy/conf/prefs.json
new file mode 100644
index 0000000000..7d92790300
--- /dev/null
+++ b/testing/awsy/conf/prefs.json
@@ -0,0 +1,13 @@
+{
+ "browser.newtabpage.enabled": true,
+ "browser.tabs.remote.separatePrivilegedContentProcess": true,
+ "javascript.options.asyncstack": false,
+ "image.mem.surfacecache.min_expiration_ms": 10000,
+ "network.proxy.socks": "localhost",
+ "network.proxy.socks_port": 90000,
+ "network.proxy.socks_remote_dns": true,
+ "network.proxy.type": 1,
+ "plugin.disable": true,
+ "startup.homepage_override_url": "",
+ "startup.homepage_welcome_url": ""
+}
diff --git a/testing/awsy/conf/testvars.json b/testing/awsy/conf/testvars.json
new file mode 100644
index 0000000000..454f583340
--- /dev/null
+++ b/testing/awsy/conf/testvars.json
@@ -0,0 +1,6 @@
+{
+ "entities": 100,
+ "iterations": 3,
+ "perTabPause": 10,
+ "settleWaitTime": 30
+}
diff --git a/testing/awsy/conf/tp6-pages.yml b/testing/awsy/conf/tp6-pages.yml
new file mode 100644
index 0000000000..8a20689e86
--- /dev/null
+++ b/testing/awsy/conf/tp6-pages.yml
@@ -0,0 +1,32 @@
+- url: https://www.amazon.com/s?k=laptop&ref=nb_sb_noss_1
+- url: https://www.bing.com/search?q=barack+obama
+- url: https://www.buzzfeed.com/
+- url: https://www.cnn.com/2021/03/22/weather/climate-change-warm-waters-lake-michigan/index.html
+- url: https://www.ebay.com/
+- url: http://www.espn.com/nba/story/_/page/allstarweekend25788027/the-comparison-lebron-james-michael-jordan-their-own-words
+- url: https://expedia.com/Hotel-Search?destination=New+York%2C+New+York&latLong=40.756680%2C-73.986470&regionId=178293&startDate=&endDate=&rooms=1&_xpid=11905%7C1&adults=2
+- url: https://www.facebook.com
+- url: https://www.fandom.com/articles/fallout-76-will-live-and-die-on-the-creativity-of-its-playerbase
+- url: https://docs.google.com/document/d/1US-07msg12slQtI_xchzYxcKlTs6Fp7WqIc6W5GK5M8/edit?usp=sharing
+- url: https://mail.google.com/
+- url: https://www.google.com/search?hl=en&q=barack+obama&cad=h
+- url: https://docs.google.com/presentation/d/1Ici0ceWwpFvmIb3EmKeWSq_vAQdmmdFcWqaiLqUkJng/edit?usp=sharing
+- url: https://www.imdb.com/title/tt0084967/?ref_=nv_sr_2
+- url: https://imgur.com/gallery/m5tYJL6
+- url: https://www.instagram.com/
+- url: https://www.linkedin.com/in/thommy-harris-hk-385723106/
+- url: https://www.microsoft.com/en-us/
+- url: https://www.netflix.com/title/80117263
+- url: https://www.nytimes.com/2020/02/19/opinion/surprise-medical-bill.html
+- url: https://office.live.com/start/Word.aspx?omkt=en-US
+- url: https://outlook.live.com/mail/inbox
+- url: https://www.paypal.com/myaccount/summary/
+- url: https://pinterest.com/
+- url: https://www.reddit.com/r/technology/comments/9sqwyh/we_posed_as_100_senators_to_run_ads_on_facebook/
+- url: https://www.tumblr.com/dashboard
+- url: https://www.twitch.tv/videos/326804629
+- url: https://twitter.com/BarackObama
+- url: https://marvel.fandom.com/wiki/Black_Panther
+- url: https://en.wikipedia.org/wiki/Barack_Obama
+- url: https://mail.yahoo.com/
+- url: https://www.youtube.com
diff --git a/testing/awsy/conf/tp6-prefs.json b/testing/awsy/conf/tp6-prefs.json
new file mode 100644
index 0000000000..7b260f565f
--- /dev/null
+++ b/testing/awsy/conf/tp6-prefs.json
@@ -0,0 +1,15 @@
+{
+ "browser.newtabpage.enabled": true,
+ "browser.tabs.remote.separatePrivilegedContentProcess": true,
+ "javascript.options.asyncstack": false,
+ "image.mem.surfacecache.min_expiration_ms": 10000,
+ "network.proxy.http": "localhost",
+ "network.proxy.http_port": 8080,
+ "network.proxy.ssl": "localhost",
+ "network.proxy.ssl_port": 8080,
+ "network.proxy.no_proxies_on": "localhost",
+ "network.proxy.type": 1,
+ "plugin.disable": true,
+ "startup.homepage_override_url": "",
+ "startup.homepage_welcome_url": ""
+}
diff --git a/testing/awsy/conf/tp6-testvars.json b/testing/awsy/conf/tp6-testvars.json
new file mode 100644
index 0000000000..218f5aefea
--- /dev/null
+++ b/testing/awsy/conf/tp6-testvars.json
@@ -0,0 +1,5 @@
+{
+ "iterations": 1,
+ "perTabPause": 15,
+ "settleWaitTime": 30
+}
diff --git a/testing/awsy/mach_commands.py b/testing/awsy/mach_commands.py
new file mode 100644
index 0000000000..bfd8eaf216
--- /dev/null
+++ b/testing/awsy/mach_commands.py
@@ -0,0 +1,343 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import logging
+import os
+import sys
+
+import mozinfo
+import six
+from mach.decorators import Command, CommandArgument, CommandArgumentGroup
+from mozbuild.base import BinaryNotFoundException
+from mozbuild.base import MachCommandConditions as conditions
+
+
+def setup_awsy_argument_parser():
+ from marionette_harness.runtests import MarionetteArguments
+ from mozlog.structured import commandline
+
+ parser = MarionetteArguments()
+ commandline.add_logging_group(parser)
+
+ return parser
+
+
+from awsy import ITERATIONS, MAX_TABS, PER_TAB_PAUSE, SETTLE_WAIT_TIME
+
+
+def run_awsy(command_context, tests, binary=None, **kwargs):
+ import json
+
+ from marionette_harness.runtests import MarionetteHarness, MarionetteTestRunner
+ from mozlog.structured import commandline
+
+ parser = setup_awsy_argument_parser()
+
+ awsy_source_dir = os.path.join(command_context.topsrcdir, "testing", "awsy")
+ if not tests:
+ if kwargs["base"]:
+ filename = "test_base_memory_usage.py"
+ else:
+ filename = "test_memory_usage.py"
+ tests = [os.path.join(awsy_source_dir, "awsy", filename)]
+
+ args = argparse.Namespace(tests=tests)
+
+ args.binary = binary
+
+ if kwargs["quick"]:
+ kwargs["entities"] = 3
+ kwargs["iterations"] = 1
+ kwargs["perTabPause"] = 1
+ kwargs["settleWaitTime"] = 1
+
+ runtime_testvars = {}
+ for arg in (
+ "webRootDir",
+ "pageManifest",
+ "resultsDir",
+ "entities",
+ "iterations",
+ "perTabPause",
+ "settleWaitTime",
+ "maxTabs",
+ "dmd",
+ "tp6",
+ ):
+ if arg in kwargs and kwargs[arg] is not None:
+ runtime_testvars[arg] = kwargs[arg]
+
+ if "webRootDir" not in runtime_testvars:
+ awsy_tests_dir = os.path.join(command_context.topobjdir, "_tests", "awsy")
+ web_root_dir = os.path.join(awsy_tests_dir, "html")
+ runtime_testvars["webRootDir"] = web_root_dir
+ else:
+ web_root_dir = runtime_testvars["webRootDir"]
+ awsy_tests_dir = os.path.dirname(web_root_dir)
+
+ if "resultsDir" not in runtime_testvars:
+ runtime_testvars["resultsDir"] = os.path.join(awsy_tests_dir, "results")
+
+ runtime_testvars["bin"] = binary
+ runtime_testvars["run_local"] = True
+
+ page_load_test_dir = os.path.join(web_root_dir, "page_load_test")
+ if not os.path.isdir(page_load_test_dir):
+ os.makedirs(page_load_test_dir)
+
+ if not os.path.isdir(runtime_testvars["resultsDir"]):
+ os.makedirs(runtime_testvars["resultsDir"])
+
+ runtime_testvars_path = os.path.join(awsy_tests_dir, "runtime-testvars.json")
+ if kwargs["testvars"]:
+ kwargs["testvars"].append(runtime_testvars_path)
+ else:
+ kwargs["testvars"] = [runtime_testvars_path]
+
+ runtime_testvars_file = open(runtime_testvars_path, "wb" if six.PY2 else "w")
+ runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
+ runtime_testvars_file.close()
+
+ manifest_file = os.path.join(awsy_source_dir, "tp5n-pageset.manifest")
+ tooltool_args = {
+ "args": [
+ sys.executable,
+ os.path.join(command_context.topsrcdir, "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--tooltool-manifest=%s" % manifest_file,
+ "--cache-dir=%s"
+ % os.path.join(command_context.topsrcdir, "tooltool-cache"),
+ ]
+ }
+ command_context.run_process(cwd=page_load_test_dir, **tooltool_args)
+ tp5nzip = os.path.join(page_load_test_dir, "tp5n.zip")
+ tp5nmanifest = os.path.join(page_load_test_dir, "tp5n", "tp5n.manifest")
+ if not os.path.exists(tp5nmanifest):
+ unzip_args = {"args": ["unzip", "-q", "-o", tp5nzip, "-d", page_load_test_dir]}
+ try:
+ command_context.run_process(**unzip_args)
+ except Exception as exc:
+ troubleshoot = ""
+ if mozinfo.os == "win":
+ troubleshoot = (
+ " Try using --web-root to specify a "
+ "directory closer to the drive root."
+ )
+
+ command_context.log(
+ logging.ERROR,
+ "awsy",
+ {"directory": page_load_test_dir, "exception": exc},
+ "Failed to unzip `tp5n.zip` into "
+ "`{directory}` with `{exception}`." + troubleshoot,
+ )
+ raise exc
+
+ # If '--preferences' was not specified supply our default set.
+ if not kwargs["prefs_files"]:
+ kwargs["prefs_files"] = [os.path.join(awsy_source_dir, "conf", "prefs.json")]
+
+ # Setup DMD env vars if necessary.
+ if kwargs["dmd"]:
+ bin_dir = os.path.dirname(binary)
+
+ if "DMD" not in os.environ:
+ os.environ["DMD"] = "1"
+
+ # Work around a startup crash with DMD on windows
+ if mozinfo.os == "win":
+ kwargs["pref"] = "security.sandbox.content.level:0"
+ command_context.log(
+ logging.WARNING,
+ "awsy",
+ {},
+ "Forcing 'security.sandbox.content.level' = 0 because DMD is enabled.",
+ )
+ elif mozinfo.os == "mac":
+ # On mac binary is in MacOS and dmd.py is in Resources, ie:
+ # Name.app/Contents/MacOS/libdmd.dylib
+ # Name.app/Contents/Resources/dmd.py
+ bin_dir = os.path.join(bin_dir, "../Resources/")
+
+ # Also add the bin dir to the python path so we can use dmd.py
+ if bin_dir not in sys.path:
+ sys.path.append(bin_dir)
+
+ for k, v in six.iteritems(kwargs):
+ setattr(args, k, v)
+
+ parser.verify_usage(args)
+
+ args.logger = commandline.setup_logging(
+ "Are We Slim Yet Tests", args, {"mach": sys.stdout}
+ )
+ failed = MarionetteHarness(MarionetteTestRunner, args=vars(args)).run()
+ if failed > 0:
+ return 1
+ else:
+ return 0
+
+
+@Command(
+ "awsy-test",
+ category="testing",
+ description="Run Are We Slim Yet (AWSY) memory usage testing using marionette.",
+ parser=setup_awsy_argument_parser,
+)
+@CommandArgumentGroup("AWSY")
+@CommandArgument(
+ "--web-root",
+ group="AWSY",
+ action="store",
+ type=str,
+ dest="webRootDir",
+ help="Path to web server root directory. If not specified, "
+ "defaults to topobjdir/_tests/awsy/html.",
+)
+@CommandArgument(
+ "--page-manifest",
+ group="AWSY",
+ action="store",
+ type=str,
+ dest="pageManifest",
+ help="Path to page manifest text file containing a list "
+ "of urls to test. The urls must be served from localhost. If not "
+ "specified, defaults to page_load_test/tp5n/tp5n.manifest under "
+ "the web root.",
+)
+@CommandArgument(
+ "--results",
+ group="AWSY",
+ action="store",
+ type=str,
+ dest="resultsDir",
+ help="Path to results directory. If not specified, defaults "
+ "to the parent directory of the web root.",
+)
+@CommandArgument(
+ "--quick",
+ group="AWSY",
+ action="store_true",
+ dest="quick",
+ default=False,
+ help="Set --entities=3, --iterations=1, --per-tab-pause=1, "
+ "--settle-wait-time=1 for a quick test. Overrides any explicit "
+ "argument settings.",
+)
+@CommandArgument(
+ "--entities",
+ group="AWSY",
+ action="store",
+ type=int,
+ dest="entities",
+ help="Number of urls to load. Defaults to the total number of urls.",
+)
+@CommandArgument(
+ "--max-tabs",
+ group="AWSY",
+ action="store",
+ type=int,
+ dest="maxTabs",
+ help="Maximum number of tabs to open. Defaults to %s." % MAX_TABS,
+)
+@CommandArgument(
+ "--iterations",
+ group="AWSY",
+ action="store",
+ type=int,
+ dest="iterations",
+ help="Number of times to run through the test suite. "
+ "Defaults to %s." % ITERATIONS,
+)
+@CommandArgument(
+ "--per-tab-pause",
+ group="AWSY",
+ action="store",
+ type=int,
+ dest="perTabPause",
+ help="Seconds to wait in between opening tabs. Defaults to %s." % PER_TAB_PAUSE,
+)
+@CommandArgument(
+ "--settle-wait-time",
+ group="AWSY",
+ action="store",
+ type=int,
+ dest="settleWaitTime",
+ help="Seconds to wait for things to settled down. "
+ "Defaults to %s." % SETTLE_WAIT_TIME,
+)
+@CommandArgument(
+ "--dmd",
+ group="AWSY",
+ action="store_true",
+ dest="dmd",
+ default=False,
+ help="Enable DMD during testing. Requires a DMD-enabled build.",
+)
+@CommandArgument(
+ "--tp6",
+ group="AWSY",
+ action="store_true",
+ dest="tp6",
+ default=False,
+ help="Use the tp6 pageset during testing.",
+)
+@CommandArgument(
+ "--base",
+ group="AWSY",
+ action="store_true",
+ dest="base",
+ default=False,
+ help="Run base memory usage tests.",
+)
+def run_awsy_test(command_context, tests, **kwargs):
+ """mach awsy-test runs the in-tree version of the Are We Slim Yet
+ (AWSY) tests.
+
+ awsy-test is implemented as a marionette test and marionette
+ test arguments also apply although they are not necessary
+ since reasonable defaults will be chosen.
+
+ The AWSY specific arguments can be found in the Command
+ Arguments for AWSY section below.
+
+ awsy-test will automatically download the tp5n.zip talos
+ pageset from tooltool and install it under
+ topobjdir/_tests/awsy/html. You can specify your own page set
+ by specifying --web-root and --page-manifest.
+
+ The results of the test will be placed in the results
+ directory specified by the --results argument.
+
+ On Windows, you may experience problems due to path length
+ errors when extracting the tp5n.zip file containing the
+ test pages or when attempting to write checkpoints to the
+ results directory. In that case, you should specify both
+ the --web-root and --results arguments pointing to a location
+ with a short path. For example:
+
+ --web-root=c:\\\\tmp\\\\html --results=c:\\\\tmp\\\\results
+
+ Note that the double backslashes are required.
+ """
+ kwargs["logger_name"] = "Awsy Tests"
+ if "test_objects" in kwargs:
+ tests = []
+ for obj in kwargs["test_objects"]:
+ tests.append(obj["file_relpath"])
+ del kwargs["test_objects"]
+
+ if not kwargs.get("binary") and conditions.is_firefox(command_context):
+ try:
+ kwargs["binary"] = command_context.get_binary_path("app")
+ except BinaryNotFoundException as e:
+ command_context.log(
+ logging.ERROR, "awsy", {"error": str(e)}, "ERROR: {error}"
+ )
+ command_context.log(logging.INFO, "awsy", {"help": e.help()}, "{help}")
+ return 1
+ return run_awsy(command_context, tests, **kwargs)
diff --git a/testing/awsy/moz.build b/testing/awsy/moz.build
new file mode 100644
index 0000000000..b3360f0f4b
--- /dev/null
+++ b/testing/awsy/moz.build
@@ -0,0 +1,9 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("awsy/**"):
+ BUG_COMPONENT = ("Testing", "AWSY")
+ SCHEDULES.exclusive = ["awsy"]
diff --git a/testing/awsy/perfdocs/config.yaml b/testing/awsy/perfdocs/config.yaml
new file mode 100644
index 0000000000..1d8b439769
--- /dev/null
+++ b/testing/awsy/perfdocs/config.yaml
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+---
+name: awsy
+manifest: None
+static-only: False
+suites:
+ Awsy tests:
+ description: ""
+ tests:
+ base: "FF"
+ dmd: "FF"
+ tp5: "FF"
+ tp6: "FF"
+ owner: ":mccr8 and Perftest Team"
diff --git a/testing/awsy/perfdocs/index.rst b/testing/awsy/perfdocs/index.rst
new file mode 100644
index 0000000000..bbd0fd826c
--- /dev/null
+++ b/testing/awsy/perfdocs/index.rst
@@ -0,0 +1,168 @@
+====
+AWSY
+====
+
+Are We Slim Yet project (commonly known as AWSY) tracks memory usage across builds.
+
+On treeherder, the AWSY builds are listed in subgroups of `SY`.
+
+AWSY tests consist of three types: TP5*, TP6, and Base Memory Usage.
+
+*\*TP5 tests are out of date and no longer maintained. These tests are scheduled to be removed:* https://bugzilla.mozilla.org/show_bug.cgi?id=1712406
+
+{documentation}
+
+Running AWSY Locally
+*********************
+
+Running tests locally is most likely only useful for debugging what is going on in a test,
+as the test output is only reported as raw JSON. The CLI is documented via:
+
+.. code-block:: bash
+
+ ./mach awsy-test --help
+
+*Currently all tests will download TP5 even if it is not used, see:* https://bugzilla.mozilla.org/show_bug.cgi?id=1683920
+
+TP5 tests
+=========
+
+.. code-block:: bash
+
+ ./mach awsy-test
+
+TP6 tests
+=========
+
+.. code-block:: bash
+
+ ./mach awsy-test --tp6
+
+Base Memory Usage tests
+========================
+
+.. code-block:: bash
+
+ ./mach awsy-test --base
+
+Running AWSY on Try
+********************
+
+AWSY runs can be generated through the mach try fuzzy finder:
+
+.. code-block:: bash
+
+ ./mach try fuzzy
+
+A query for "awsy" will return all AWSY tests. The default test is TP5, TP6 and Base test names will contain `tp6` and `base`, respectively.
+
+The following documents all tests we currently run for AWSY.
+*The following content was migrated from* https://wiki.mozilla.org/AWSY/Tests *and will be updated to clarify TP5/TP6 tests vs Base tests:* https://bugzilla.mozilla.org/show_bug.cgi?id=1714600
+
+
+TP5/TP6 Tests
+**************
+
+The following tests exist for both TP5 and TP6. Running `./mach awsy-test` by default will run TP5 tests*.
+To run TP6 tests, add the `--tp6` flag: `./mach awsy-test --tp6`
+
+*\*TP5 tests are out of date and no longer maintained. These tests are scheduled to be removed:* https://bugzilla.mozilla.org/show_bug.cgi?id=1712406
+
+
+Explicit Memory
+================
+
+* This is memory explicitly reported by a memory reporter. It includes all the memory allocated via explicit calls to heap allocation functions (such as malloc and new), and some (only that covered by memory reporters) of the memory allocated via explicit calls to non-heap allocations functions (such as mmap and VirtualAlloc).
+
+**Possible regression causes**
+
+* A regression in this usually means a new feature is using or retaining more memory and should be looked at. These are easier to diagnose as we can compare memory reports.
+
+See the `about:memory` `mdn page <https://developer.mozilla.org/docs/Mozilla/Performance/about:memory#Explicit_Allocations>`__ for more details.
+
+
+Heap Unclassified
+==================
+
+*to do: add test definition*
+
+
+Images
+=======
+
+* This is a subset of the "explicit" measurement that focuses on memory used to render images.
+
+**Possible regression causes**
+
+* A regressions in this can indicate leaks or poor memory usage in the image subsystem. In the past this was persistent problem.
+
+
+JS
+====
+
+*to do: add test definition*
+
+
+Resident Memory
+================
+
+* This is a higher level measurement provided by the operating system. We sum the "resident" memory (`RSS <https://en.wikipedia.org/wiki/Resident_set_size>`_) with the `resident-unique <https://en.wikipedia.org/wiki/Unique_set_size>`_ memory of the content processes. It's pretty noisy and large so it's not very useful in detecting smaller regressions.
+
+**Possible regression causes**
+
+* Regressions in this often track regressions in explicit and heap unclassified. If we see a regression in resident, but not in other reports this can indicate we are leaking untracked memory (perhaps through shared memory, graphics allocations, file handles, etc).
+
+
+Base Content Tests
+*******************
+
+* An updated test focused on supporting Fission. This measures the base overhead of an empty content process. It tracks resident unique, heap unclassified, JS, and explicit memory metrics as well as storing full memory reports as artifacts. The median value for each metric is used from across all content processes. It has much lower thresholds for alerting and is recorded in `Perfherder <https://wiki.mozilla.org/EngineeringProductivity/Projects/Perfherder>`_.
+
+
+Base Content Explicit
+======================
+
+**Possible regression causes**
+
+A change has caused more JavaScript to load at startup or into blank pages
+
+* **Common solution**: lazily load any new modules you rely on
+* **Common solution**: Split your code out to only load what is minimally needed initially. You modified the JS engine and it's using more memory
+* **Common solution**: Attempt to reduce your object size for the common case, these tend to add up! You implemented a new feature in JavaScript
+* **Common solution**: Write the majority (or all of it) in compiled code (C++/Rust). This will reduce overhead and generally improve performance.
+
+
+Base Content Heap Unclassified
+===============================
+
+* The "heap-unclassified" value represents heap-allocated memory that is not measured by any memory reporter. This is typically 10--20% of "explicit".
+
+
+**Possible regression causes**
+
+* A regression in this can indicate that we're leaking memory or that additional memory reporters should be added.
+* An improvement can indicate that leaks have been fixed or that we added new memory reporters.
+
+See the `about:memory` `mdn page <https://developer.mozilla.org/docs/Mozilla/Performance/about:memory#Explicit_Allocations>`__ for more details.
+
+
+Base Content JS
+================
+
+* This is the "js-main-runtime/" value in `about:memory` which is all the memory attributed to the javascript engine.
+
+**Possible regression causes**
+
+* A regression in this number can indicate leaks in the JS engine, optimizations that take performance into consideration at the expense of more memory, or problems with the garbage collector.
+
+
+Base Content Resident Unique Memory
+====================================
+
+*to do: add test definition*
+
+
+Other references
+-----------------
+
+`Are We Slim Yet MDN web docs <https://developer.mozilla.org/en-US/docs/Mozilla/Performance/AWSY>`_
diff --git a/testing/awsy/requirements.txt b/testing/awsy/requirements.txt
new file mode 100644
index 0000000000..4ab234e85f
--- /dev/null
+++ b/testing/awsy/requirements.txt
@@ -0,0 +1,2 @@
+marionette-harness >= 4.0.0
+PyYaml >= 5.1
diff --git a/testing/awsy/setup.py b/testing/awsy/setup.py
new file mode 100644
index 0000000000..1c3f4f0db0
--- /dev/null
+++ b/testing/awsy/setup.py
@@ -0,0 +1,29 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from setuptools import find_packages, setup
+
+PACKAGE_NAME = "awsy"
+PACKAGE_VERSION = "0.0.1"
+
+setup(
+ name=PACKAGE_NAME,
+ version=PACKAGE_VERSION,
+ description="AreWeSlimYet",
+ long_description="A memory testing framework for Firefox.",
+ author="Mozilla Automation and Testing Team",
+ author_email="tools@lists.mozilla.org",
+ license="MPL 1.1/GPL 2.0/LGPL 2.1",
+ packages=find_packages(),
+ zip_safe=False,
+ install_requires=["marionette_harness", "PyYaml"],
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+)
diff --git a/testing/awsy/tp5n-pageset.manifest b/testing/awsy/tp5n-pageset.manifest
new file mode 100644
index 0000000000..c785df7a45
--- /dev/null
+++ b/testing/awsy/tp5n-pageset.manifest
@@ -0,0 +1,10 @@
+[
+ {
+ "filename": "tp5n.zip",
+ "size": 81753769,
+ "digest": "7e74bc532d220fa2484f84bd7c2659da7d2ae3aa0bc225ba63e3db70dc0c0697503427209098afa85e235397c4ec58cd488cab7b3435e8079583d3994fff8326",
+ "algorithm": "sha512",
+ "unpack": false
+ }
+]
+
diff --git a/testing/awsy/tp6-pageset.manifest b/testing/awsy/tp6-pageset.manifest
new file mode 100644
index 0000000000..cddc30f57d
--- /dev/null
+++ b/testing/awsy/tp6-pageset.manifest
@@ -0,0 +1,233 @@
+[
+ {
+ "algorithm": "sha512",
+ "digest": "d801dc23873ef5fac668aa58fa948f5de0d9f3ccc53d6773fb5a137515bd04e72cc8c0c7975c6e1fc19c72b3d721effb5432fce78b0ca6f3a90f2d6467ee5b68",
+ "filename": "mitm5-linux-firefox-amazon.zip",
+ "size": 6588776,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "5ab1a9e1c7c4d5925bdac9e87123ec0aaeee477d0b2e910d37ba0df70f6c8278688910584b2874d19d8f360af14b6b37ac707012d6b8969a1877c64fed233489",
+ "filename": "mitm5-linux-firefox-bing-search.zip",
+ "size": 2413805,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "f73b07ae7df78a54cb660ea6e1e6375ee49c24bcef687b90d5356cf3bd6e74635bfb8a26160459ac326466443a62b41daecf5edb31dfefff2f0adc7b9fe0768f",
+ "filename": "mitm5-linux-firefox-buzzfeed.zip",
+ "size": 9558243,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "033d5b87d18a299dacc441f007febb5d104f5da39a8004a116904dea309a8351efedf604014196839107ad93abd264b02a70b8bea06a46ff9ae467d312b2efb2",
+ "filename": "mitm5-linux-firefox-cnn.zip",
+ "size": 39806225,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "f41a3be375159e94933d958abea4154db9586325d72b10cea3d0aec17a7e934c73c64a4fd3f5ff4d322489cea094e2f6ae4c88f88cbe0cefbdc88c9451c3de11",
+ "filename": "mitm5-linux-firefox-ebay.zip",
+ "size": 3351856,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "ca0a6d9561bbac193162f797ac3e6226446408ca0d8268313956414db9170be6d7c0d7dc316df4de15a3ed4e0e8c948b60472511339313e58cfc331b556db522",
+ "filename": "mitm5-linux-firefox-espn.zip",
+ "size": 7665306,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "55c8d2d1e1c4a7417e80cd9f3ae250fd871b1ba583feca0a2bb29861bba4d39b9d75c2ca5b1d32c133a64b528cd8787c1af37c6308df8941b12494ddc63bcd17",
+ "filename": "mitm5-linux-firefox-expedia.zip",
+ "size": 9707769,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "398e9f8cedb2887da29f5a7bc761330c21635bd484e4fa17178a9f53eb3de2ab7a167bb9c11f888c0799f8993fce44471c1947111fa24890289333579c6bd194",
+ "filename": "mitm5-linux-firefox-facebook-redesign.zip",
+ "size": 14553670,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "3faf88b190c58b5988d1b7ecbfd7cac7d9cabbed7b07badb732e88d320ac8b9262a1aec67022cc0c5dd8ccfbb6a3f8d0e208a1e5d4426ec143b2dafc0b616346",
+ "filename": "mitm5-linux-firefox-fandom.zip",
+ "size": 2875881,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "996586f9e20cface8337be4b8c6d4fdd6515c25efb58b5dfeb52c73d4d748b4580de58d8119f60fde393c59a53b6e8dded024d67e4b73f60aa71937a6b12aafb",
+ "filename": "mitm5-linux-firefox-google-docs.zip",
+ "size": 51574956,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "22b289c5a054413baa8fe5df047abd430b82a4275882c6247822fa437d3c59516e66dea1deaa01f2568adccf83b3975d75f34a3081dfc92a3d9a4ab54530f1c9",
+ "filename": "mitm5-linux-firefox-google-mail.zip",
+ "size": 15505653,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "ad5e9b0a201eded3fa06ce2d646e9cbb774fbe29c236766aaad6d60fe588cade6a02a393493fd9c953910da4a511dd992d18f432ccba55a89f1a5f2d3b10cb3a",
+ "filename": "mitm5-linux-firefox-google-search.zip",
+ "size": 8938916,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "f6b099a40b6db5a3fb44c468ce763b294920c6e72473b1c566290bee204c3cc4ffdd8a1ba7f4f2119acef27e406f316c121ffbc33012645b1e47e51d933d1760",
+ "filename": "mitm5-linux-firefox-google-slides.zip",
+ "size": 15782166,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "2ea8757271b162713fdd400f68858dc4e9c03a6572edda591af6dbc2927a8efa47c0b9a86de51dd4d19b0bc0d3b7c14ddbe55e37a9e6cdc45b12b3f6479f43bb",
+ "filename": "mitm5-linux-firefox-imdb.zip",
+ "size": 2904905,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "58e44e939734bb987fc222431021084b6f65887c46b32b668342acdb0b7ceec9f3f64411c529b89c7c27dcc5fbfa27ee37ef2726994ec05f922310f341c54a6c",
+ "filename": "mitm5-linux-firefox-imgur.zip",
+ "size": 8018854,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "2be95e5707cba6c40c46546a4d3dadaf3d69780d5aa1c457948d85973a65da729b4aceb6bfcce26e74ba7a513e00404b345375889cc21a0f89d42a47ad977c1b",
+ "filename": "mitm5-linux-firefox-instagram.zip",
+ "size": 6135175,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "f007a12f22b2927c9d5a2a4820cbd37ac62c36a6225d11297b4078265d43ed4c2ca8318a3ae8b40452be6c713f6afb0cdadae522723289d4e88d8c51170cdaa6",
+ "filename": "mitm5-linux-firefox-linkedin.zip",
+ "size": 5058731,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "36cd7c235b116a39a11082fe14b89434aace368cc7304654852e2f077ac9b024f7c663ff6f81c5012aca4fcf04d4b0f96e5abd1d5bb3f3a1f7ae0113ddf75c01",
+ "filename": "mitm5-linux-firefox-live-office.zip",
+ "size": 11077445,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "5c892f7b1630a9d34a682ebee3d7564bbf9ce2295a290af8e423187b0f0e2c52dabf9b640114f7920367fbab9ed0bb92fc30f3c2b43df63ce08e63dae9b23a63",
+ "filename": "mitm5-linux-firefox-live.zip",
+ "size": 12907525,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "235d264832d0ad181a59b6c39db8dc401fd1d287fcc0cd677f38a9e404da3d4ecbe1bfea8a01c6bf653402976b86037a04514e2a5a5b11e4370feecd147994f2",
+ "filename": "mitm5-linux-firefox-microsoft.zip",
+ "size": 1181178,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "0c28c65094e784840ca258e07a49001370fcf5cb1fad65e054e9e795ad852648379d5a73a7a0142f1a9e10c30b615fed788dea411bf79c4926dd039dade33212",
+ "filename": "mitm5-linux-firefox-netflix.zip",
+ "size": 122430671,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "037dafd334046bdf9837ad5a321eb03e7c043f0c9f145c2086d88ca345d9385ea2239109e0a3119976fe8cf44bdbf8ac2ade2411d0ff92ded8dba300ce2f9e81",
+ "filename": "mitm5-linux-firefox-nytimes.zip",
+ "size": 6117949,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "531f4ec6c3c475c4d14d198abc3bd8a02181a4759477714b3f30d4b97cf25065ccb6e373d78ec7b9549d1cdd410a94af7112701c2a6cdcb53bf8a24be2996144",
+ "filename": "mitm5-linux-firefox-paypal.zip",
+ "size": 3606449,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "3b4bf399eca881bd586e1d8ce49758f1b8033d0fdbcb360663c0855ed9b2926020cecb9cb53ad2f141022dbbd73be4a229e2a3048461aefaca9d9be117b1705a",
+ "filename": "mitm5-linux-firefox-pettay.zip",
+ "size": 162774,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "b63c6a28c715599630bc8786a382837c88b20621cc86a31cb8d28b1ef414f7d3492ae291b0b45165febfee43f487e9ef0be39d5cd24ab91f153839a065c0ba43",
+ "filename": "mitm5-linux-firefox-pinterest.zip",
+ "size": 20950617,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "05056a163e3d4d444e398d817de4d722e0c01b7e7da41ab73d6079bfddef5b000613c7e95af737286f20df63eb38f3a94ed6ff171e288cdaf5d156955eeb57cb",
+ "filename": "mitm5-linux-firefox-reddit.zip",
+ "size": 5162729,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "56f094f91bf239ed1555b9c7aafcbe553ea524fa8c6eef5ee3030cf77949c13bab19e9cc002621c50a10d5405c2340eb9f37309ff7f1599da6bc15114d526808",
+ "filename": "mitm5-linux-firefox-tumblr.zip",
+ "size": 17684438,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "aae14f5e85217e14dd407cb93ab1eee160d67d406895edd639c6a9be860789f8d141408305456409dcadf7b927cb5b133e70504294f0a6cd90a6984e19c79be1",
+ "filename": "mitm5-linux-firefox-twitch.zip",
+ "size": 30949276,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "af6ad361d2c027b48d72048d43b8fe7bed1efc307c9fdbb596b4e8e4f4c3247c1161bb21e888d2a4ca75f1a3ee47219c8289070f96ea7ab6e626d1d31f39bc75",
+ "filename": "mitm5-linux-firefox-twitter.zip",
+ "size": 7176713,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "c53c45c374cd924d03972f56b6a7416451861147f0a14b8c51c2c722a82582ae7ce8927a500981152aa6f4d3617b5ce455b85910981a9aabbd672e1ea47c0ff7",
+ "filename": "mitm5-linux-firefox-wikia.zip",
+ "size": 4847101,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "f9cbdb3b217556ab1ef5ad44fc0d7f8b3239a15b79ad42dc3756603465bcc4d0e7390b16159246554c8c552b52fa4b94a23d2b127d692b66aa885c2328e25803",
+ "filename": "mitm5-linux-firefox-wikipedia.zip",
+ "size": 3876066,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "e690faac0d78b330b8c2f22fe23920fcf101c2775beae9575c191477023b2cb4f8c12b4948ece4e04ccbc65b20f68aa6c8acedfb2e5144089e192a7047de76b9",
+ "filename": "mitm5-linux-firefox-yahoo-mail.zip",
+ "size": 11922543,
+ "visibility": "public"
+ },
+ {
+ "algorithm": "sha512",
+ "digest": "bbbf25606130d97a61aad5e48e80ee0f9743cfb8481cd6ece7fcadc370b1f9737ba1dc37dc8d889efdc0128a3ee9bbc8a061bc4ad5a65b8efcccaf6c3b1f223e",
+ "filename": "mitm5-linux-firefox-youtube.zip",
+ "size": 7078105,
+ "visibility": "public"
+ }
+] \ No newline at end of file