summaryrefslogtreecommitdiffstats
path: root/testing/mozharness/mozharness/mozilla/testing
diff options
context:
space:
mode:
Diffstat (limited to 'testing/mozharness/mozharness/mozilla/testing')
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/__init__.py0
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/android.py725
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/codecoverage.py679
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/errors.py177
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/per_test_base.py540
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/raptor.py1478
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/testing/talos.py893
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/testing/testbase.py767
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/try_tools.py246
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/testing/unittest.py255
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/verify_tools.py69
11 files changed, 5829 insertions, 0 deletions
diff --git a/testing/mozharness/mozharness/mozilla/testing/__init__.py b/testing/mozharness/mozharness/mozilla/testing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/testing/android.py b/testing/mozharness/mozharness/mozilla/testing/android.py
new file mode 100644
index 0000000000..7e17707552
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/android.py
@@ -0,0 +1,725 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import datetime
+import functools
+import glob
+import os
+import posixpath
+import re
+import signal
+import subprocess
+import tempfile
+import time
+from threading import Timer
+
+import six
+
+from mozharness.base.script import PostScriptAction, PreScriptAction
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_RETRY
+
+
+def ensure_dir(dir):
+ """Ensures the given directory exists"""
+ if dir and not os.path.exists(dir):
+ try:
+ os.makedirs(dir)
+ except OSError as error:
+ if error.errno != errno.EEXIST:
+ raise
+
+
+class AndroidMixin(object):
+ """
+ Mixin class used by Android test scripts.
+ """
+
+ def __init__(self, **kwargs):
+ self._adb_path = None
+ self._device = None
+ self.app_name = None
+ self.device_name = os.environ.get("DEVICE_NAME", None)
+ self.device_serial = os.environ.get("DEVICE_SERIAL", None)
+ self.device_ip = os.environ.get("DEVICE_IP", None)
+ self.logcat_proc = None
+ self.logcat_file = None
+ self.use_gles3 = False
+ self.use_root = True
+ self.xre_path = None
+ super(AndroidMixin, self).__init__(**kwargs)
+
+ @property
+ def adb_path(self):
+ """Get the path to the adb executable."""
+ self.activate_virtualenv()
+ if not self._adb_path:
+ self._adb_path = self.query_exe("adb")
+ return self._adb_path
+
+ @property
+ def device(self):
+ if not self._device:
+ # We must access the adb_path property to activate the
+ # virtualenv before importing mozdevice in order to
+ # import the mozdevice installed into the virtualenv and
+ # not any system-wide installation of mozdevice.
+ adb = self.adb_path
+ import mozdevice
+
+ self._device = mozdevice.ADBDeviceFactory(
+ adb=adb, device=self.device_serial, use_root=self.use_root
+ )
+ return self._device
+
+ @property
+ def is_android(self):
+ c = self.config
+ installer_url = c.get("installer_url", None)
+ return (
+ self.device_serial is not None
+ or self.is_emulator
+ or (
+ installer_url is not None
+ and (installer_url.endswith(".apk") or installer_url.endswith(".aab"))
+ )
+ )
+
+ @property
+ def is_emulator(self):
+ c = self.config
+ return True if c.get("emulator_avd_name") else False
+
+ def _get_repo_url(self, path):
+ """
+ Return a url for a file (typically a tooltool manifest) in this hg repo
+ and using this revision (or mozilla-central/default if repo/rev cannot
+ be determined).
+
+ :param path specifies the directory path to the file of interest.
+ """
+ if "GECKO_HEAD_REPOSITORY" in os.environ and "GECKO_HEAD_REV" in os.environ:
+ # probably taskcluster
+ repo = os.environ["GECKO_HEAD_REPOSITORY"]
+ revision = os.environ["GECKO_HEAD_REV"]
+ else:
+ # something unexpected!
+ repo = "https://hg.mozilla.org/mozilla-central"
+ revision = "default"
+ self.warning(
+ "Unable to find repo/revision for manifest; "
+ "using mozilla-central/default"
+ )
+ url = "%s/raw-file/%s/%s" % (repo, revision, path)
+ return url
+
+ def _tooltool_fetch(self, url, dir):
+ c = self.config
+ manifest_path = self.download_file(
+ url, file_name="releng.manifest", parent_dir=dir
+ )
+ if not os.path.exists(manifest_path):
+ self.fatal(
+ "Could not retrieve manifest needed to retrieve "
+ "artifacts from %s" % manifest_path
+ )
+ # from TooltoolMixin, included in TestingMixin
+ self.tooltool_fetch(
+ manifest_path, output_dir=dir, cache=c.get("tooltool_cache", None)
+ )
+
+ def _launch_emulator(self):
+ env = self.query_env()
+
+ # Write a default ddms.cfg to avoid unwanted prompts
+ avd_home_dir = self.abs_dirs["abs_avds_dir"]
+ DDMS_FILE = os.path.join(avd_home_dir, "ddms.cfg")
+ with open(DDMS_FILE, "w") as f:
+ f.write("pingOptIn=false\npingId=0\n")
+ self.info("wrote dummy %s" % DDMS_FILE)
+
+ # Delete emulator auth file, so it doesn't prompt
+ AUTH_FILE = os.path.join(
+ os.path.expanduser("~"), ".emulator_console_auth_token"
+ )
+ if os.path.exists(AUTH_FILE):
+ try:
+ os.remove(AUTH_FILE)
+ self.info("deleted %s" % AUTH_FILE)
+ except Exception:
+ self.warning("failed to remove %s" % AUTH_FILE)
+
+ env["ANDROID_EMULATOR_HOME"] = avd_home_dir
+ avd_path = os.path.join(avd_home_dir, "avd")
+ if os.path.exists(avd_path):
+ env["ANDROID_AVD_HOME"] = avd_path
+ self.info("Found avds at %s" % avd_path)
+ else:
+ self.warning("AVDs missing? Not found at %s" % avd_path)
+
+ if "deprecated_sdk_path" in self.config:
+ sdk_path = os.path.abspath(os.path.join(avd_home_dir, ".."))
+ else:
+ sdk_path = self.abs_dirs["abs_sdk_dir"]
+ if os.path.exists(sdk_path):
+ env["ANDROID_SDK_HOME"] = sdk_path
+ env["ANDROID_SDK_ROOT"] = sdk_path
+ self.info("Found sdk at %s" % sdk_path)
+ else:
+ self.warning("Android sdk missing? Not found at %s" % sdk_path)
+
+ avd_config_path = os.path.join(
+ avd_path, "%s.ini" % self.config["emulator_avd_name"]
+ )
+ avd_folder = os.path.join(avd_path, "%s.avd" % self.config["emulator_avd_name"])
+ if os.path.isfile(avd_config_path):
+ # The ini file points to the absolute path to the emulator folder,
+ # which might be different, so we need to update it.
+ old_config = ""
+ with open(avd_config_path, "r") as config_file:
+ old_config = config_file.readlines()
+ self.info("Old Config: %s" % old_config)
+ with open(avd_config_path, "w") as config_file:
+ for line in old_config:
+ if line.startswith("path="):
+ config_file.write("path=%s\n" % avd_folder)
+ self.info("Updating path from: %s" % line)
+ else:
+ config_file.write("%s\n" % line)
+ else:
+ self.warning("Could not find config path at %s" % avd_config_path)
+
+ # enable EGL 3.0 in advancedFeatures.ini
+ AF_FILE = os.path.join(avd_home_dir, "advancedFeatures.ini")
+ with open(AF_FILE, "w") as f:
+ if self.use_gles3:
+ f.write("GLESDynamicVersion=on\n")
+ else:
+ f.write("GLESDynamicVersion=off\n")
+
+ # extra diagnostics for kvm acceleration
+ emu = self.config.get("emulator_process_name")
+ if os.path.exists("/dev/kvm") and emu and "x86" in emu:
+ try:
+ self.run_command(["ls", "-l", "/dev/kvm"])
+ self.run_command(["kvm-ok"])
+ self.run_command(["emulator", "-accel-check"], env=env)
+ except Exception as e:
+ self.warning("Extra kvm diagnostics failed: %s" % str(e))
+
+ self.info("emulator env: %s" % str(env))
+ command = ["emulator", "-avd", self.config["emulator_avd_name"]]
+ if "emulator_extra_args" in self.config:
+ command += self.config["emulator_extra_args"]
+
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ tmp_file = tempfile.NamedTemporaryFile(
+ mode="w", prefix="emulator-", suffix=".log", dir=dir, delete=False
+ )
+ self.info("Launching the emulator with: %s" % " ".join(command))
+ self.info("Writing log to %s" % tmp_file.name)
+ proc = subprocess.Popen(
+ command, stdout=tmp_file, stderr=tmp_file, env=env, bufsize=0
+ )
+ return proc
+
+ def _verify_emulator(self):
+ boot_ok = self._retry(
+ 30,
+ 10,
+ self.is_boot_completed,
+ "Verify Android boot completed",
+ max_time=330,
+ )
+ if not boot_ok:
+ self.warning("Unable to verify Android boot completion")
+ return False
+ return True
+
+ def _verify_emulator_and_restart_on_fail(self):
+ emulator_ok = self._verify_emulator()
+ if not emulator_ok:
+ self.device_screenshot("screenshot-emulator-start")
+ self.kill_processes(self.config["emulator_process_name"])
+ subprocess.check_call(["ps", "-ef"])
+ # remove emulator tmp files
+ for dir in glob.glob("/tmp/android-*"):
+ self.rmtree(dir)
+ time.sleep(5)
+ self.emulator_proc = self._launch_emulator()
+ return emulator_ok
+
+ def _retry(self, max_attempts, interval, func, description, max_time=0):
+ """
+ Execute func until it returns True, up to max_attempts times, waiting for
+ interval seconds between each attempt. description is logged on each attempt.
+ If max_time is specified, no further attempts will be made once max_time
+ seconds have elapsed; this provides some protection for the case where
+ the run-time for func is long or highly variable.
+ """
+ status = False
+ attempts = 0
+ if max_time > 0:
+ end_time = datetime.datetime.now() + datetime.timedelta(seconds=max_time)
+ else:
+ end_time = None
+ while attempts < max_attempts and not status:
+ if (end_time is not None) and (datetime.datetime.now() > end_time):
+ self.info(
+ "Maximum retry run-time of %d seconds exceeded; "
+ "remaining attempts abandoned" % max_time
+ )
+ break
+ if attempts != 0:
+ self.info("Sleeping %d seconds" % interval)
+ time.sleep(interval)
+ attempts += 1
+ self.info(
+ ">> %s: Attempt #%d of %d" % (description, attempts, max_attempts)
+ )
+ status = func()
+ return status
+
+ def dump_perf_info(self):
+ """
+ Dump some host and android device performance-related information
+ to an artifact file, to help understand task performance.
+ """
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ perf_path = os.path.join(dir, "android-performance.log")
+ with open(perf_path, "w") as f:
+
+ f.write("\n\nHost cpufreq/scaling_governor:\n")
+ cpus = glob.glob("/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor")
+ for cpu in cpus:
+ out = subprocess.check_output(["cat", cpu], universal_newlines=True)
+ f.write("%s: %s" % (cpu, out))
+
+ f.write("\n\nHost /proc/cpuinfo:\n")
+ out = subprocess.check_output(
+ ["cat", "/proc/cpuinfo"], universal_newlines=True
+ )
+ f.write(out)
+
+ f.write("\n\nHost /proc/meminfo:\n")
+ out = subprocess.check_output(
+ ["cat", "/proc/meminfo"], universal_newlines=True
+ )
+ f.write(out)
+
+ f.write("\n\nHost process list:\n")
+ out = subprocess.check_output(["ps", "-ef"], universal_newlines=True)
+ f.write(out)
+
+ f.write("\n\nDevice /proc/cpuinfo:\n")
+ cmd = "cat /proc/cpuinfo"
+ out = self.shell_output(cmd)
+ f.write(out)
+ cpuinfo = out
+
+ f.write("\n\nDevice /proc/meminfo:\n")
+ cmd = "cat /proc/meminfo"
+ out = self.shell_output(cmd)
+ f.write(out)
+
+ f.write("\n\nDevice process list:\n")
+ cmd = "ps"
+ out = self.shell_output(cmd)
+ f.write(out)
+
+ # Search android cpuinfo for "BogoMIPS"; if found and < (minimum), retry
+ # this task, in hopes of getting a higher-powered environment.
+ # (Carry on silently if BogoMIPS is not found -- this may vary by
+ # Android implementation -- no big deal.)
+ # See bug 1321605: Sometimes the emulator is really slow, and
+ # low bogomips can be a good predictor of that condition.
+ bogomips_minimum = int(self.config.get("bogomips_minimum") or 0)
+ for line in cpuinfo.split("\n"):
+ m = re.match("BogoMIPS.*: (\d*)", line, re.IGNORECASE)
+ if m:
+ bogomips = int(m.group(1))
+ if bogomips_minimum > 0 and bogomips < bogomips_minimum:
+ self.fatal(
+ "INFRA-ERROR: insufficient Android bogomips (%d < %d)"
+ % (bogomips, bogomips_minimum),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+ self.info("Found Android bogomips: %d" % bogomips)
+ break
+
+ def logcat_path(self):
+ logcat_filename = "logcat-%s.log" % self.device_serial
+ return os.path.join(
+ self.query_abs_dirs()["abs_blob_upload_dir"], logcat_filename
+ )
+
+ def logcat_start(self):
+ """
+ Start recording logcat. Writes logcat to the upload directory.
+ """
+ # Start logcat for the device. The adb process runs until the
+ # corresponding device is stopped. Output is written directly to
+ # the blobber upload directory so that it is uploaded automatically
+ # at the end of the job.
+ self.logcat_file = open(self.logcat_path(), "w")
+ logcat_cmd = [
+ self.adb_path,
+ "-s",
+ self.device_serial,
+ "logcat",
+ "-v",
+ "threadtime",
+ "Trace:S",
+ "StrictMode:S",
+ "ExchangeService:S",
+ ]
+ self.info(" ".join(logcat_cmd))
+ self.logcat_proc = subprocess.Popen(
+ logcat_cmd, stdout=self.logcat_file, stdin=subprocess.PIPE
+ )
+
+ def logcat_stop(self):
+ """
+ Stop logcat process started by logcat_start.
+ """
+ if self.logcat_proc:
+ self.info("Killing logcat pid %d." % self.logcat_proc.pid)
+ self.logcat_proc.kill()
+ self.logcat_file.close()
+
+ def _install_android_app_retry(self, app_path, replace):
+ import mozdevice
+
+ try:
+ if app_path.endswith(".aab"):
+ self.device.install_app_bundle(
+ self.query_abs_dirs()["abs_bundletool_path"], app_path, timeout=120
+ )
+ self.device.run_as_package = self.query_package_name()
+ else:
+ self.device.run_as_package = self.device.install_app(
+ app_path, replace=replace, timeout=120
+ )
+ return True
+ except (
+ mozdevice.ADBError,
+ mozdevice.ADBProcessError,
+ mozdevice.ADBTimeoutError,
+ ) as e:
+ self.info(
+ "Failed to install %s on %s: %s %s"
+ % (app_path, self.device_name, type(e).__name__, e)
+ )
+ return False
+
+ def install_android_app(self, app_path, replace=False):
+ """
+ Install the specified app.
+ """
+ app_installed = self._retry(
+ 5,
+ 10,
+ functools.partial(self._install_android_app_retry, app_path, replace),
+ "Install app",
+ )
+
+ if not app_installed:
+ self.fatal(
+ "INFRA-ERROR: Failed to install %s" % os.path.basename(app_path),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ def uninstall_android_app(self):
+ """
+ Uninstall the app associated with the configured app, if it is
+ installed.
+ """
+ import mozdevice
+
+ try:
+ package_name = self.query_package_name()
+ self.device.uninstall_app(package_name)
+ except (
+ mozdevice.ADBError,
+ mozdevice.ADBProcessError,
+ mozdevice.ADBTimeoutError,
+ ) as e:
+ self.info(
+ "Failed to uninstall %s from %s: %s %s"
+ % (package_name, self.device_name, type(e).__name__, e)
+ )
+ self.fatal(
+ "INFRA-ERROR: %s Failed to uninstall %s"
+ % (type(e).__name__, package_name),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ def is_boot_completed(self):
+ import mozdevice
+
+ try:
+ return self.device.is_device_ready(timeout=30)
+ except (ValueError, mozdevice.ADBError, mozdevice.ADBTimeoutError):
+ pass
+ return False
+
+ def shell_output(self, cmd, enable_run_as=False):
+ import mozdevice
+
+ try:
+ return self.device.shell_output(
+ cmd, timeout=30, enable_run_as=enable_run_as
+ )
+ except (mozdevice.ADBTimeoutError) as e:
+ self.info(
+ "Failed to run shell command %s from %s: %s %s"
+ % (cmd, self.device_name, type(e).__name__, e)
+ )
+ self.fatal(
+ "INFRA-ERROR: %s Failed to run shell command %s"
+ % (type(e).__name__, cmd),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ def device_screenshot(self, prefix):
+ """
+ On emulator, save a screenshot of the entire screen to the upload directory;
+ otherwise, save a screenshot of the device to the upload directory.
+
+ :param prefix specifies a filename prefix for the screenshot
+ """
+ from mozscreenshot import dump_device_screen, dump_screen
+
+ reset_dir = False
+ if not os.environ.get("MOZ_UPLOAD_DIR", None):
+ dirs = self.query_abs_dirs()
+ os.environ["MOZ_UPLOAD_DIR"] = dirs["abs_blob_upload_dir"]
+ reset_dir = True
+ if self.is_emulator:
+ if self.xre_path:
+ dump_screen(self.xre_path, self, prefix=prefix)
+ else:
+ self.info("Not saving screenshot: no XRE configured")
+ else:
+ dump_device_screen(self.device, self, prefix=prefix)
+ if reset_dir:
+ del os.environ["MOZ_UPLOAD_DIR"]
+
+ def download_hostutils(self, xre_dir):
+ """
+ Download and install hostutils from tooltool.
+ """
+ xre_path = None
+ self.rmtree(xre_dir)
+ self.mkdir_p(xre_dir)
+ if self.config["hostutils_manifest_path"]:
+ url = self._get_repo_url(self.config["hostutils_manifest_path"])
+ self._tooltool_fetch(url, xre_dir)
+ for p in glob.glob(os.path.join(xre_dir, "host-utils-*")):
+ if os.path.isdir(p) and os.path.isfile(os.path.join(p, "xpcshell")):
+ xre_path = p
+ if not xre_path:
+ self.fatal("xre path not found in %s" % xre_dir)
+ else:
+ self.fatal("configure hostutils_manifest_path!")
+ return xre_path
+
+ def query_package_name(self):
+ if self.app_name is None:
+ # For convenience, assume geckoview.test/geckoview_example when install
+ # target looks like geckoview.
+ if "androidTest" in self.installer_path:
+ self.app_name = "org.mozilla.geckoview.test"
+ elif "test_runner" in self.installer_path:
+ self.app_name = "org.mozilla.geckoview.test_runner"
+ elif "geckoview" in self.installer_path:
+ self.app_name = "org.mozilla.geckoview_example"
+ if self.app_name is None:
+ # Find appname from package-name.txt - assumes download-and-extract
+ # has completed successfully.
+ # The app/package name will typically be org.mozilla.fennec,
+ # but org.mozilla.firefox for release builds, and there may be
+ # other variations. 'aapt dump badging <apk>' could be used as an
+ # alternative to package-name.txt, but introduces a dependency
+ # on aapt, found currently in the Android SDK build-tools component.
+ app_dir = self.abs_dirs["abs_work_dir"]
+ self.app_path = os.path.join(app_dir, self.installer_path)
+ unzip = self.query_exe("unzip")
+ package_path = os.path.join(app_dir, "package-name.txt")
+ unzip_cmd = [unzip, "-q", "-o", self.app_path]
+ self.run_command(unzip_cmd, cwd=app_dir, halt_on_failure=True)
+ self.app_name = str(
+ self.read_from_file(package_path, verbose=True)
+ ).rstrip()
+ return self.app_name
+
+ def kill_processes(self, process_name):
+ self.info("Killing every process called %s" % process_name)
+ process_name = six.ensure_binary(process_name)
+ out = subprocess.check_output(["ps", "-A"])
+ for line in out.splitlines():
+ if process_name in line:
+ pid = int(line.split(None, 1)[0])
+ self.info("Killing pid %d." % pid)
+ os.kill(pid, signal.SIGKILL)
+
+ def delete_ANRs(self):
+ remote_dir = self.device.stack_trace_dir
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.device.mkdir(remote_dir)
+ self.info("%s created" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ for trace_file in self.device.ls(remote_dir, recursive=True):
+ trace_path = posixpath.join(remote_dir, trace_file)
+ if self.device.is_file(trace_path):
+ self.device.rm(trace_path)
+ self.info("%s deleted" % trace_path)
+ except Exception as e:
+ self.info(
+ "failed to delete %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ def check_for_ANRs(self):
+ """
+ Copy ANR (stack trace) files from device to upload directory.
+ """
+ dirs = self.query_abs_dirs()
+ remote_dir = self.device.stack_trace_dir
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.info("%s not found; ANR check skipped" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ self.device.pull(remote_dir, dirs["abs_blob_upload_dir"])
+ self.delete_ANRs()
+ except Exception as e:
+ self.info(
+ "failed to pull %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ def delete_tombstones(self):
+ remote_dir = "/data/tombstones"
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.device.mkdir(remote_dir)
+ self.info("%s created" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ for trace_file in self.device.ls(remote_dir, recursive=True):
+ trace_path = posixpath.join(remote_dir, trace_file)
+ if self.device.is_file(trace_path):
+ self.device.rm(trace_path)
+ self.info("%s deleted" % trace_path)
+ except Exception as e:
+ self.info(
+ "failed to delete %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ def check_for_tombstones(self):
+ """
+ Copy tombstone files from device to upload directory.
+ """
+ dirs = self.query_abs_dirs()
+ remote_dir = "/data/tombstones"
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.info("%s not found; tombstone check skipped" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ self.device.pull(remote_dir, dirs["abs_blob_upload_dir"])
+ self.delete_tombstones()
+ except Exception as e:
+ self.info(
+ "failed to pull %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ # Script actions
+
+ def start_emulator(self):
+ """
+ Starts the emulator
+ """
+ if not self.is_emulator:
+ return
+
+ dirs = self.query_abs_dirs()
+ ensure_dir(dirs["abs_work_dir"])
+ ensure_dir(dirs["abs_blob_upload_dir"])
+
+ if not os.path.isfile(self.adb_path):
+ self.fatal("The adb binary '%s' is not a valid file!" % self.adb_path)
+ self.kill_processes("xpcshell")
+ self.emulator_proc = self._launch_emulator()
+
+ def verify_device(self):
+ """
+ Check to see if the emulator can be contacted via adb.
+ If any communication attempt fails, kill the emulator, re-launch, and re-check.
+ """
+ if not self.is_android:
+ return
+
+ if self.is_emulator:
+ max_restarts = 5
+ emulator_ok = self._retry(
+ max_restarts,
+ 10,
+ self._verify_emulator_and_restart_on_fail,
+ "Check emulator",
+ )
+ if not emulator_ok:
+ self.fatal(
+ "INFRA-ERROR: Unable to start emulator after %d attempts"
+ % max_restarts,
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ self.mkdir_p(self.query_abs_dirs()["abs_blob_upload_dir"])
+ self.dump_perf_info()
+ self.logcat_start()
+ self.delete_ANRs()
+ self.delete_tombstones()
+ self.info("verify_device complete")
+
+ @PreScriptAction("run-tests")
+ def timed_screenshots(self, action, success=None):
+ """
+ If configured, start screenshot timers.
+ """
+ if not self.is_android:
+ return
+
+ def take_screenshot(seconds):
+ self.device_screenshot("screenshot-%ss-" % str(seconds))
+ self.info("timed (%ss) screenshot complete" % str(seconds))
+
+ self.timers = []
+ for seconds in self.config.get("screenshot_times", []):
+ self.info("screenshot requested %s seconds from now" % str(seconds))
+ t = Timer(int(seconds), take_screenshot, [seconds])
+ t.start()
+ self.timers.append(t)
+
+ @PostScriptAction("run-tests")
+ def stop_device(self, action, success=None):
+ """
+ Stop logcat and kill the emulator, if necessary.
+ """
+ if not self.is_android:
+ return
+
+ for t in self.timers:
+ t.cancel()
+ if self.worst_status != TBPL_RETRY:
+ self.check_for_ANRs()
+ self.check_for_tombstones()
+ else:
+ self.info("ANR and tombstone checks skipped due to TBPL_RETRY")
+ self.logcat_stop()
+ if self.is_emulator:
+ self.kill_processes(self.config["emulator_process_name"])
diff --git a/testing/mozharness/mozharness/mozilla/testing/codecoverage.py b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
new file mode 100644
index 0000000000..fd850324ed
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
@@ -0,0 +1,679 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import errno
+import json
+import os
+import posixpath
+import shutil
+import sys
+import tempfile
+import uuid
+import zipfile
+
+import mozinfo
+
+from mozharness.base.script import PostScriptAction, PreScriptAction
+from mozharness.mozilla.testing.per_test_base import SingleTestMixin
+
+code_coverage_config_options = [
+ [
+ ["--code-coverage"],
+ {
+ "action": "store_true",
+ "dest": "code_coverage",
+ "default": False,
+ "help": "Whether gcov c++ code coverage should be run.",
+ },
+ ],
+ [
+ ["--per-test-coverage"],
+ {
+ "action": "store_true",
+ "dest": "per_test_coverage",
+ "default": False,
+ "help": "Whether per-test coverage should be collected.",
+ },
+ ],
+ [
+ ["--disable-ccov-upload"],
+ {
+ "action": "store_true",
+ "dest": "disable_ccov_upload",
+ "default": False,
+ "help": "Whether test run should package and upload code coverage data.",
+ },
+ ],
+ [
+ ["--java-code-coverage"],
+ {
+ "action": "store_true",
+ "dest": "java_code_coverage",
+ "default": False,
+ "help": "Whether Java code coverage should be run.",
+ },
+ ],
+]
+
+
+class CodeCoverageMixin(SingleTestMixin):
+ """
+ Mixin for setting GCOV_PREFIX during test execution, packaging up
+ the resulting .gcda files and uploading them to blobber.
+ """
+
+ gcov_dir = None
+ grcov_dir = None
+ grcov_bin = None
+ jsvm_dir = None
+ prefix = None
+ per_test_reports = {}
+
+ def __init__(self, **kwargs):
+ if mozinfo.os == "linux" or mozinfo.os == "mac":
+ self.grcov_bin = "grcov"
+ elif mozinfo.os == "win":
+ self.grcov_bin = "grcov.exe"
+ else:
+ raise Exception("Unexpected OS: {}".format(mozinfo.os))
+
+ super(CodeCoverageMixin, self).__init__(**kwargs)
+
+ @property
+ def code_coverage_enabled(self):
+ try:
+ return bool(self.config.get("code_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def per_test_coverage(self):
+ try:
+ return bool(self.config.get("per_test_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def ccov_upload_disabled(self):
+ try:
+ return bool(self.config.get("disable_ccov_upload"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def jsd_code_coverage_enabled(self):
+ try:
+ return bool(self.config.get("jsd_code_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def java_code_coverage_enabled(self):
+ try:
+ return bool(self.config.get("java_code_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ def _setup_cpp_js_coverage_tools(self):
+ fetches_dir = os.environ["MOZ_FETCHES_DIR"]
+ with open(os.path.join(fetches_dir, "target.mozinfo.json"), "r") as f:
+ build_mozinfo = json.load(f)
+
+ self.prefix = build_mozinfo["topsrcdir"]
+
+ strip_count = len(list(filter(None, self.prefix.split("/"))))
+ os.environ["GCOV_PREFIX_STRIP"] = str(strip_count)
+
+ # Download the gcno archive from the build machine.
+ url_to_gcno = self.query_build_dir_url("target.code-coverage-gcno.zip")
+ self.download_file(url_to_gcno, parent_dir=self.grcov_dir)
+
+ # Download the chrome-map.json file from the build machine.
+ url_to_chrome_map = self.query_build_dir_url("chrome-map.json")
+ self.download_file(url_to_chrome_map, parent_dir=self.grcov_dir)
+
+ def _setup_java_coverage_tools(self):
+ # Download and extract jacoco-cli from the build task.
+ url_to_jacoco = self.query_build_dir_url("target.jacoco-cli.jar")
+ self.jacoco_jar = os.path.join(tempfile.mkdtemp(), "target.jacoco-cli.jar")
+ self.download_file(url_to_jacoco, self.jacoco_jar)
+
+ # Download and extract class files from the build task.
+ self.classfiles_dir = tempfile.mkdtemp()
+ for archive in ["target.geckoview_classfiles.zip", "target.app_classfiles.zip"]:
+ url_to_classfiles = self.query_build_dir_url(archive)
+ classfiles_zip_path = os.path.join(self.classfiles_dir, archive)
+ self.download_file(url_to_classfiles, classfiles_zip_path)
+ with zipfile.ZipFile(classfiles_zip_path, "r") as z:
+ z.extractall(self.classfiles_dir)
+ os.remove(classfiles_zip_path)
+
+ # Create the directory where the emulator coverage file will be placed.
+ self.java_coverage_output_dir = tempfile.mkdtemp()
+
+ @PostScriptAction("download-and-extract")
+ def setup_coverage_tools(self, action, success=None):
+ if not self.code_coverage_enabled and not self.java_code_coverage_enabled:
+ return
+
+ self.grcov_dir = os.path.join(os.environ["MOZ_FETCHES_DIR"], "grcov")
+ if not os.path.isfile(os.path.join(self.grcov_dir, self.grcov_bin)):
+ raise Exception(
+ "File not found: {}".format(
+ os.path.join(self.grcov_dir, self.grcov_bin)
+ )
+ )
+
+ if self.code_coverage_enabled:
+ self._setup_cpp_js_coverage_tools()
+
+ if self.java_code_coverage_enabled:
+ self._setup_java_coverage_tools()
+
+ @PostScriptAction("download-and-extract")
+ def find_tests_for_coverage(self, action, success=None):
+ """
+ For each file modified on this push, determine if the modified file
+ is a test, by searching test manifests. Populate self.verify_suites
+ with test files, organized by suite.
+
+ This depends on test manifests, so can only run after test zips have
+ been downloaded and extracted.
+ """
+ if not self.per_test_coverage:
+ return
+
+ self.find_modified_tests()
+
+ # TODO: Add tests that haven't been run for a while (a week? N pushes?)
+
+ # Add baseline code coverage collection tests
+ baseline_tests_by_ext = {
+ ".html": {
+ "test": "testing/mochitest/baselinecoverage/plain/test_baselinecoverage.html",
+ "suite": "mochitest-plain",
+ },
+ ".js": {
+ "test": "testing/mochitest/baselinecoverage/browser_chrome/browser_baselinecoverage.js", # NOQA: E501
+ "suite": "mochitest-browser-chrome",
+ },
+ ".xhtml": {
+ "test": "testing/mochitest/baselinecoverage/chrome/test_baselinecoverage.xhtml",
+ "suite": "mochitest-chrome",
+ },
+ }
+
+ baseline_tests_by_suite = {
+ "mochitest-browser-chrome": "testing/mochitest/baselinecoverage/browser_chrome/"
+ "browser_baselinecoverage_browser-chrome.js"
+ }
+
+ wpt_baseline_test = "tests/web-platform/mozilla/tests/baselinecoverage/wpt_baselinecoverage.html" # NOQA: E501
+ if self.config.get("per_test_category") == "web-platform":
+ if "testharness" not in self.suites:
+ self.suites["testharness"] = []
+ if wpt_baseline_test not in self.suites["testharness"]:
+ self.suites["testharness"].append(wpt_baseline_test)
+ return
+
+ # Go through all the tests and find all
+ # the baseline tests that are needed.
+ tests_to_add = {}
+ for suite in self.suites:
+ if len(self.suites[suite]) == 0:
+ continue
+ if suite in baseline_tests_by_suite:
+ if suite not in tests_to_add:
+ tests_to_add[suite] = []
+ tests_to_add[suite].append(baseline_tests_by_suite[suite])
+ continue
+
+ # Default to file types if the suite has no baseline
+ for test in self.suites[suite]:
+ _, test_ext = os.path.splitext(test)
+
+ if test_ext not in baseline_tests_by_ext:
+ # Add the '.js' test as a default baseline
+ # if none other exists.
+ test_ext = ".js"
+ baseline_test_suite = baseline_tests_by_ext[test_ext]["suite"]
+ baseline_test_name = baseline_tests_by_ext[test_ext]["test"]
+
+ if baseline_test_suite not in tests_to_add:
+ tests_to_add[baseline_test_suite] = []
+ if baseline_test_name not in tests_to_add[baseline_test_suite]:
+ tests_to_add[baseline_test_suite].append(baseline_test_name)
+
+ # Add all baseline tests needed
+ for suite in tests_to_add:
+ for test in tests_to_add[suite]:
+ if suite not in self.suites:
+ self.suites[suite] = []
+ if test not in self.suites[suite]:
+ self.suites[suite].append(test)
+
+ @property
+ def coverage_args(self):
+ return []
+
+ def set_coverage_env(self, env, is_baseline_test=False):
+ # Set the GCOV directory.
+ self.gcov_dir = tempfile.mkdtemp()
+ env["GCOV_PREFIX"] = self.gcov_dir
+
+ # Set the GCOV/JSVM directories where counters will be dumped in per-test mode.
+ if self.per_test_coverage and not is_baseline_test:
+ env["GCOV_RESULTS_DIR"] = tempfile.mkdtemp()
+ env["JSVM_RESULTS_DIR"] = tempfile.mkdtemp()
+
+ # Set JSVM directory.
+ self.jsvm_dir = tempfile.mkdtemp()
+ env["JS_CODE_COVERAGE_OUTPUT_DIR"] = self.jsvm_dir
+
+ @PreScriptAction("run-tests")
+ def _set_gcov_prefix(self, action):
+ if not self.code_coverage_enabled:
+ return
+
+ if self.per_test_coverage:
+ return
+
+ self.set_coverage_env(os.environ)
+
+ def parse_coverage_artifacts(
+ self,
+ gcov_dir,
+ jsvm_dir,
+ merge=False,
+ output_format="lcov",
+ filter_covered=False,
+ ):
+ jsvm_output_file = "jsvm_lcov_output.info"
+ grcov_output_file = "grcov_lcov_output.info"
+
+ dirs = self.query_abs_dirs()
+
+ sys.path.append(dirs["abs_test_install_dir"])
+ sys.path.append(os.path.join(dirs["abs_test_install_dir"], "mozbuild"))
+
+ from codecoverage.lcov_rewriter import LcovFileRewriter
+
+ jsvm_files = [os.path.join(jsvm_dir, e) for e in os.listdir(jsvm_dir)]
+ rewriter = LcovFileRewriter(os.path.join(self.grcov_dir, "chrome-map.json"))
+ rewriter.rewrite_files(jsvm_files, jsvm_output_file, "")
+
+ # Run grcov on the zipped .gcno and .gcda files.
+ grcov_command = [
+ os.path.join(self.grcov_dir, self.grcov_bin),
+ "-t",
+ output_format,
+ "-p",
+ self.prefix,
+ "--ignore",
+ "**/fetches/*",
+ os.path.join(self.grcov_dir, "target.code-coverage-gcno.zip"),
+ gcov_dir,
+ ]
+
+ if "coveralls" in output_format:
+ grcov_command += ["--token", "UNUSED", "--commit-sha", "UNUSED"]
+
+ if merge:
+ grcov_command += [jsvm_output_file]
+
+ if mozinfo.os == "win" or mozinfo.os == "mac":
+ grcov_command += ["--llvm"]
+
+ if filter_covered:
+ grcov_command += ["--filter", "covered"]
+
+ def skip_cannot_normalize(output_to_filter):
+ return "\n".join(
+ line
+ for line in output_to_filter.rstrip().splitlines()
+ if "cannot be normalized because" not in line
+ )
+
+ # 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
+ # the other is the path to the standard error output.
+ tmp_output_file, _ = self.get_output_from_command(
+ grcov_command,
+ silent=True,
+ save_tmpfiles=True,
+ return_type="files",
+ throw_exception=True,
+ output_filter=skip_cannot_normalize,
+ )
+ shutil.move(tmp_output_file, grcov_output_file)
+
+ shutil.rmtree(gcov_dir)
+ shutil.rmtree(jsvm_dir)
+
+ if merge:
+ os.remove(jsvm_output_file)
+ return grcov_output_file
+ else:
+ return grcov_output_file, jsvm_output_file
+
+ def add_per_test_coverage_report(self, env, suite, test):
+ gcov_dir = (
+ env["GCOV_RESULTS_DIR"] if "GCOV_RESULTS_DIR" in env else self.gcov_dir
+ )
+ jsvm_dir = (
+ env["JSVM_RESULTS_DIR"] if "JSVM_RESULTS_DIR" in env else self.jsvm_dir
+ )
+
+ grcov_file = self.parse_coverage_artifacts(
+ gcov_dir,
+ jsvm_dir,
+ merge=True,
+ output_format="coveralls",
+ filter_covered=True,
+ )
+
+ report_file = str(uuid.uuid4()) + ".json"
+ shutil.move(grcov_file, report_file)
+
+ # Get the test path relative to topsrcdir.
+ # This mapping is constructed by self.find_modified_tests().
+ test = self.test_src_path.get(test.replace(os.sep, posixpath.sep), test)
+
+ # Log a warning if the test path is still an absolute path.
+ if os.path.isabs(test):
+ self.warn("Found absolute path for test: {}".format(test))
+
+ if suite not in self.per_test_reports:
+ self.per_test_reports[suite] = {}
+ assert test not in self.per_test_reports[suite]
+ self.per_test_reports[suite][test] = report_file
+
+ if "GCOV_RESULTS_DIR" in env:
+ assert "JSVM_RESULTS_DIR" in env
+ # In this case, parse_coverage_artifacts has removed GCOV_RESULTS_DIR and
+ # JSVM_RESULTS_DIR so we need to remove GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR.
+ try:
+ shutil.rmtree(self.gcov_dir)
+ except FileNotFoundError:
+ pass
+
+ try:
+ shutil.rmtree(self.jsvm_dir)
+ except FileNotFoundError:
+ pass
+
+ def is_covered(self, sf):
+ # For C/C++ source files, we can consider a file as being uncovered
+ # when all its source lines are uncovered.
+ all_lines_uncovered = all(c is None or c == 0 for c in sf["coverage"])
+ if all_lines_uncovered:
+ return False
+
+ # For JavaScript files, we can't do the same, as the top-level is always
+ # executed, even if it just contains declarations. So, we need to check if
+ # all its functions, except the top-level, are uncovered.
+ functions = sf["functions"] if "functions" in sf else []
+ all_functions_uncovered = all(
+ not f["exec"] or f["name"] == "top-level" for f in functions
+ )
+ if all_functions_uncovered and len(functions) > 1:
+ return False
+
+ return True
+
+ @PostScriptAction("run-tests")
+ def _package_coverage_data(self, action, success=None):
+ dirs = self.query_abs_dirs()
+
+ if not self.code_coverage_enabled:
+ return
+
+ if self.per_test_coverage:
+ if not self.per_test_reports:
+ self.info("No tests were found...not saving coverage data.")
+ return
+
+ # Get the baseline tests that were run.
+ baseline_tests_ext_cov = {}
+ baseline_tests_suite_cov = {}
+ for suite, data in self.per_test_reports.items():
+ for test, grcov_file in data.items():
+ if "baselinecoverage" not in test:
+ continue
+
+ # TODO: Optimize this part which loads JSONs
+ # with a size of about 40Mb into memory for diffing later.
+ # Bug 1460064 is filed for this.
+ with open(grcov_file, "r") as f:
+ data = json.load(f)
+
+ if suite in os.path.split(test)[-1]:
+ baseline_tests_suite_cov[suite] = data
+ else:
+ _, baseline_filetype = os.path.splitext(test)
+ baseline_tests_ext_cov[baseline_filetype] = data
+
+ dest = os.path.join(
+ dirs["abs_blob_upload_dir"], "per-test-coverage-reports.zip"
+ )
+ with zipfile.ZipFile(dest, "w", zipfile.ZIP_DEFLATED) as z:
+ for suite, data in self.per_test_reports.items():
+ for test, grcov_file in data.items():
+ if "baselinecoverage" in test:
+ # Don't keep the baseline coverage
+ continue
+ else:
+ # Get test coverage
+ with open(grcov_file, "r") as f:
+ report = json.load(f)
+
+ # Remove uncovered files, as they are unneeded for per-test
+ # coverage purposes.
+ report["source_files"] = [
+ sf
+ for sf in report["source_files"]
+ if self.is_covered(sf)
+ ]
+
+ # Get baseline coverage
+ baseline_coverage = {}
+ if suite in baseline_tests_suite_cov:
+ baseline_coverage = baseline_tests_suite_cov[suite]
+ elif self.config.get("per_test_category") == "web-platform":
+ baseline_coverage = baseline_tests_ext_cov[".html"]
+ else:
+ for file_type in baseline_tests_ext_cov:
+ if not test.endswith(file_type):
+ continue
+ baseline_coverage = baseline_tests_ext_cov[
+ file_type
+ ]
+ break
+
+ if not baseline_coverage:
+ # Default to the '.js' baseline as it is the largest
+ self.info("Did not find a baseline test for: " + test)
+ baseline_coverage = baseline_tests_ext_cov[".js"]
+
+ unique_coverage = rm_baseline_cov(baseline_coverage, report)
+
+ with open(grcov_file, "w") as f:
+ json.dump(
+ {
+ "test": test,
+ "suite": suite,
+ "report": unique_coverage,
+ },
+ f,
+ )
+
+ z.write(grcov_file)
+ return
+
+ del os.environ["GCOV_PREFIX_STRIP"]
+ del os.environ["GCOV_PREFIX"]
+ del os.environ["JS_CODE_COVERAGE_OUTPUT_DIR"]
+
+ if not self.ccov_upload_disabled:
+ grcov_output_file, jsvm_output_file = self.parse_coverage_artifacts(
+ self.gcov_dir, self.jsvm_dir
+ )
+
+ try:
+ os.makedirs(dirs["abs_blob_upload_dir"])
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # Zip the grcov output and upload it.
+ grcov_zip_path = os.path.join(
+ dirs["abs_blob_upload_dir"], "code-coverage-grcov.zip"
+ )
+ with zipfile.ZipFile(grcov_zip_path, "w", zipfile.ZIP_DEFLATED) as z:
+ z.write(grcov_output_file)
+
+ # Zip the JSVM coverage data and upload it.
+ jsvm_zip_path = os.path.join(
+ dirs["abs_blob_upload_dir"], "code-coverage-jsvm.zip"
+ )
+ with zipfile.ZipFile(jsvm_zip_path, "w", zipfile.ZIP_DEFLATED) as z:
+ z.write(jsvm_output_file)
+
+ shutil.rmtree(self.grcov_dir)
+
+ @PostScriptAction("run-tests")
+ def process_java_coverage_data(self, action, success=None):
+ """
+ Run JaCoCo on the coverage.ec file in order to get a XML report.
+ After that, run grcov on the XML report to get a lcov report.
+ Finally, archive the lcov file and upload it, as process_coverage_data is doing.
+ """
+ if not self.java_code_coverage_enabled:
+ return
+
+ # If the emulator became unresponsive, the task has failed and we don't
+ # have any coverage report file, so stop running this function and
+ # allow the task to be retried automatically.
+ if not success and not os.listdir(self.java_coverage_output_dir):
+ return
+
+ report_files = [
+ os.path.join(self.java_coverage_output_dir, f)
+ for f in os.listdir(self.java_coverage_output_dir)
+ ]
+ assert len(report_files) > 0, "JaCoCo coverage data files were not found."
+
+ dirs = self.query_abs_dirs()
+ xml_path = tempfile.mkdtemp()
+ jacoco_command = (
+ ["java", "-jar", self.jacoco_jar, "report"]
+ + report_files
+ + [
+ "--classfiles",
+ self.classfiles_dir,
+ "--name",
+ "geckoview-junit",
+ "--xml",
+ os.path.join(xml_path, "geckoview-junit.xml"),
+ ]
+ )
+ self.run_command(jacoco_command, halt_on_failure=True)
+
+ grcov_command = [
+ os.path.join(self.grcov_dir, self.grcov_bin),
+ "-t",
+ "lcov",
+ xml_path,
+ ]
+ tmp_output_file, _ = self.get_output_from_command(
+ grcov_command,
+ silent=True,
+ save_tmpfiles=True,
+ return_type="files",
+ throw_exception=True,
+ )
+
+ if not self.ccov_upload_disabled:
+ grcov_zip_path = os.path.join(
+ dirs["abs_blob_upload_dir"], "code-coverage-grcov.zip"
+ )
+ with zipfile.ZipFile(grcov_zip_path, "w", zipfile.ZIP_DEFLATED) as z:
+ z.write(tmp_output_file, "grcov_lcov_output.info")
+
+
+def rm_baseline_cov(baseline_coverage, test_coverage):
+ """
+ Returns the difference between test_coverage and
+ baseline_coverage, such that what is returned
+ is the unique coverage for the test in question.
+ """
+
+ # Get all files into a quicker search format
+ unique_test_coverage = test_coverage
+ baseline_files = {el["name"]: el for el in baseline_coverage["source_files"]}
+ test_files = {el["name"]: el for el in test_coverage["source_files"]}
+
+ # Perform the difference and find everything
+ # unique to the test.
+ unique_file_coverage = {}
+ for test_file in test_files:
+ if test_file not in baseline_files:
+ unique_file_coverage[test_file] = test_files[test_file]
+ continue
+
+ if len(test_files[test_file]["coverage"]) != len(
+ baseline_files[test_file]["coverage"]
+ ):
+ # File has line number differences due to gcov bug:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1410217
+ continue
+
+ # TODO: Attempt to rewrite this section to remove one of the two
+ # iterations over a test's source file's coverage for optimization.
+ # Bug 1460064 was filed for this.
+
+ # Get line numbers and the differences
+ file_coverage = {
+ i
+ for i, cov in enumerate(test_files[test_file]["coverage"])
+ if cov is not None and cov > 0
+ }
+
+ baseline = {
+ i
+ for i, cov in enumerate(baseline_files[test_file]["coverage"])
+ if cov is not None and cov > 0
+ }
+
+ unique_coverage = file_coverage - baseline
+
+ if len(unique_coverage) > 0:
+ unique_file_coverage[test_file] = test_files[test_file]
+
+ # Return the data to original format to return
+ # coverage within the test_coverge data object.
+ fmt_unique_coverage = []
+ for i, cov in enumerate(unique_file_coverage[test_file]["coverage"]):
+ if cov is None:
+ fmt_unique_coverage.append(None)
+ continue
+
+ # TODO: Bug 1460061, determine if hit counts
+ # need to be considered.
+ if cov > 0:
+ # If there is a count
+ if i in unique_coverage:
+ # Only add the count if it's unique
+ fmt_unique_coverage.append(
+ unique_file_coverage[test_file]["coverage"][i]
+ )
+ continue
+ # Zero out everything that is not unique
+ fmt_unique_coverage.append(0)
+ unique_file_coverage[test_file]["coverage"] = fmt_unique_coverage
+
+ # Reformat to original test_coverage list structure
+ unique_test_coverage["source_files"] = list(unique_file_coverage.values())
+
+ return unique_test_coverage
diff --git a/testing/mozharness/mozharness/mozilla/testing/errors.py b/testing/mozharness/mozharness/mozilla/testing/errors.py
new file mode 100644
index 0000000000..84c00b0a8b
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/errors.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Mozilla error lists for running tests.
+
+Error lists are used to parse output in mozharness.base.log.OutputParser.
+
+Each line of output is matched against each substring or regular expression
+in the error list. On a match, we determine the 'level' of that line,
+whether IGNORE, DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL.
+
+"""
+
+import re
+
+from mozharness.base.log import ERROR, INFO, WARNING
+
+# ErrorLists {{{1
+_mochitest_summary = {
+ "regex": re.compile(
+ r"""(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))"""
+ ), # NOQA: E501
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": "Todo",
+}
+
+_reftest_summary = {
+ "regex": re.compile(
+ r"""REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \("""
+ ), # NOQA: E501
+ "pass_group": "Successful",
+ "fail_group": "Unexpected",
+ "known_fail_group": "Known problems",
+}
+
+TinderBoxPrintRe = {
+ "mochitest-chrome_summary": _mochitest_summary,
+ "mochitest-webgl1-core_summary": _mochitest_summary,
+ "mochitest-webgl1-ext_summary": _mochitest_summary,
+ "mochitest-webgl2-core_summary": _mochitest_summary,
+ "mochitest-webgl2-ext_summary": _mochitest_summary,
+ "mochitest-webgl2-deqp_summary": _mochitest_summary,
+ "mochitest-webgpu_summary": _mochitest_summary,
+ "mochitest-media_summary": _mochitest_summary,
+ "mochitest-plain_summary": _mochitest_summary,
+ "mochitest-plain-gpu_summary": _mochitest_summary,
+ "marionette_summary": {
+ "regex": re.compile(r"""(passed|failed|todo):\ +(\d+)"""),
+ "pass_group": "passed",
+ "fail_group": "failed",
+ "known_fail_group": "todo",
+ },
+ "reftest_summary": _reftest_summary,
+ "reftest-qr_summary": _reftest_summary,
+ "crashtest_summary": _reftest_summary,
+ "crashtest-qr_summary": _reftest_summary,
+ "xpcshell_summary": {
+ "regex": re.compile(r"""INFO \| (Passed|Failed|Todo): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": "Todo",
+ },
+ "jsreftest_summary": _reftest_summary,
+ "instrumentation_summary": _mochitest_summary,
+ "cppunittest_summary": {
+ "regex": re.compile(r"""cppunittests INFO \| (Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "gtest_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "jittest_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "mozbase_summary": {
+ "regex": re.compile(r"""(OK)|(FAILED) \(errors=(\d+)"""),
+ "pass_group": "OK",
+ "fail_group": "FAILED",
+ "known_fail_group": None,
+ },
+ "geckoview_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "geckoview-junit_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "harness_error": {
+ "full_regex": re.compile(
+ r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \|[^\|]* (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)" # NOQA: E501
+ ),
+ "minimum_regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""),
+ "retry_regex": re.compile(
+ r"""(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|program finished with exit code 80|INFRA-ERROR)""" # NOQA: E501
+ ),
+ },
+}
+
+TestPassed = [
+ {
+ "regex": re.compile("""(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )"""),
+ "level": INFO,
+ },
+]
+
+BaseHarnessErrorList = [
+ {
+ "substr": "TEST-UNEXPECTED",
+ "level": ERROR,
+ },
+ {
+ "substr": "PROCESS-CRASH",
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile("""ERROR: (Address|Leak)Sanitizer"""),
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile("""thread '([^']+)' panicked"""),
+ "level": ERROR,
+ },
+ {
+ "substr": "pure virtual method called",
+ "level": ERROR,
+ },
+ {
+ "substr": "Pure virtual function called!",
+ "level": ERROR,
+ },
+]
+
+HarnessErrorList = BaseHarnessErrorList + [
+ {
+ "substr": "A content process crashed",
+ "level": ERROR,
+ },
+]
+
+# wpt can have expected crashes so we can't always turn treeherder orange in those cases
+WptHarnessErrorList = BaseHarnessErrorList
+
+LogcatErrorList = [
+ {
+ "substr": "Fatal signal 11 (SIGSEGV)",
+ "level": ERROR,
+ "explanation": "This usually indicates the B2G process has crashed",
+ },
+ {
+ "substr": "Fatal signal 7 (SIGBUS)",
+ "level": ERROR,
+ "explanation": "This usually indicates the B2G process has crashed",
+ },
+ {"substr": "[JavaScript Error:", "level": WARNING},
+ {
+ "substr": "seccomp sandbox violation",
+ "level": ERROR,
+ "explanation": "A content process has violated the system call sandbox (bug 790923)",
+ },
+]
diff --git a/testing/mozharness/mozharness/mozilla/testing/per_test_base.py b/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
new file mode 100644
index 0000000000..8e83643142
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
@@ -0,0 +1,540 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import itertools
+import json
+import math
+import os
+import posixpath
+import sys
+
+import mozinfo
+from manifestparser import TestManifest
+
+
+class SingleTestMixin(object):
+ """Utility functions for per-test testing like test verification and per-test coverage."""
+
+ def __init__(self, **kwargs):
+ super(SingleTestMixin, self).__init__(**kwargs)
+
+ self.suites = {}
+ self.tests_downloaded = False
+ self.reftest_test_dir = None
+ self.jsreftest_test_dir = None
+ # Map from full test path on the test machine to a relative path in the source checkout.
+ # Use self._map_test_path_to_source(test_machine_path, source_path) to add a mapping.
+ self.test_src_path = {}
+ self.per_test_log_index = 1
+
+ def _map_test_path_to_source(self, test_machine_path, source_path):
+ test_machine_path = test_machine_path.replace(os.sep, posixpath.sep)
+ source_path = source_path.replace(os.sep, posixpath.sep)
+ self.test_src_path[test_machine_path] = source_path
+
+ def _is_gpu_suite(self, suite):
+ if suite and (suite == "gpu" or suite.startswith("webgl")):
+ return True
+ return False
+
+ def _find_misc_tests(self, dirs, changed_files, gpu=False):
+ manifests = [
+ (
+ os.path.join(dirs["abs_mochitest_dir"], "tests", "mochitest.ini"),
+ "mochitest-plain",
+ ),
+ (
+ os.path.join(dirs["abs_mochitest_dir"], "chrome", "chrome.ini"),
+ "mochitest-chrome",
+ ),
+ (
+ os.path.join(
+ dirs["abs_mochitest_dir"], "browser", "browser-chrome.ini"
+ ),
+ "mochitest-browser-chrome",
+ ),
+ (
+ os.path.join(dirs["abs_mochitest_dir"], "a11y", "a11y.ini"),
+ "mochitest-a11y",
+ ),
+ (
+ os.path.join(dirs["abs_xpcshell_dir"], "tests", "xpcshell.ini"),
+ "xpcshell",
+ ),
+ ]
+ is_fission = "fission.autostart=true" in self.config.get("extra_prefs", [])
+ tests_by_path = {}
+ all_disabled = []
+ for (path, suite) in manifests:
+ if os.path.exists(path):
+ man = TestManifest([path], strict=False)
+ active = man.active_tests(
+ exists=False, disabled=True, filters=[], **mozinfo.info
+ )
+ # Remove disabled tests. Also, remove tests with the same path as
+ # disabled tests, even if they are not disabled, since per-test mode
+ # specifies tests by path (it cannot distinguish between two or more
+ # tests with the same path specified in multiple manifests).
+ disabled = [t["relpath"] for t in active if "disabled" in t]
+ all_disabled += disabled
+ new_by_path = {
+ t["relpath"]: (suite, t.get("subsuite"), None)
+ for t in active
+ if "disabled" not in t and t["relpath"] not in disabled
+ }
+ tests_by_path.update(new_by_path)
+ self.info(
+ "Per-test run updated with manifest %s (%d active, %d skipped)"
+ % (path, len(new_by_path), len(disabled))
+ )
+
+ ref_manifests = [
+ (
+ os.path.join(
+ dirs["abs_reftest_dir"],
+ "tests",
+ "layout",
+ "reftests",
+ "reftest.list",
+ ),
+ "reftest",
+ "gpu",
+ ), # gpu
+ (
+ os.path.join(
+ dirs["abs_reftest_dir"],
+ "tests",
+ "testing",
+ "crashtest",
+ "crashtests.list",
+ ),
+ "crashtest",
+ None,
+ ),
+ ]
+ sys.path.append(dirs["abs_reftest_dir"])
+ import manifest
+
+ self.reftest_test_dir = os.path.join(dirs["abs_reftest_dir"], "tests")
+ for (path, suite, subsuite) in ref_manifests:
+ if os.path.exists(path):
+ man = manifest.ReftestManifest()
+ man.load(path)
+ for t in man.tests:
+ relpath = os.path.relpath(t["path"], self.reftest_test_dir)
+ referenced = (
+ t["referenced-test"] if "referenced-test" in t else None
+ )
+ tests_by_path[relpath] = (suite, subsuite, referenced)
+ self._map_test_path_to_source(t["path"], relpath)
+ self.info(
+ "Per-test run updated with manifest %s (%d tests)"
+ % (path, len(man.tests))
+ )
+
+ suite = "jsreftest"
+ self.jsreftest_test_dir = os.path.join(
+ dirs["abs_test_install_dir"], "jsreftest", "tests"
+ )
+ path = os.path.join(self.jsreftest_test_dir, "jstests.list")
+ if os.path.exists(path):
+ man = manifest.ReftestManifest()
+ man.load(path)
+ for t in man.files:
+ # expect manifest test to look like:
+ # ".../tests/jsreftest/tests/jsreftest.html?test=test262/.../some_test.js"
+ # while the test is in mercurial at:
+ # js/src/tests/test262/.../some_test.js
+ epos = t.find("=")
+ if epos > 0:
+ relpath = t[epos + 1 :]
+ test_path = os.path.join(self.jsreftest_test_dir, relpath)
+ relpath = os.path.join("js", "src", "tests", relpath)
+ self._map_test_path_to_source(test_path, relpath)
+ tests_by_path.update({relpath: (suite, None, None)})
+ else:
+ self.warning("unexpected jsreftest test format: %s" % str(t))
+ self.info(
+ "Per-test run updated with manifest %s (%d tests)"
+ % (path, len(man.files))
+ )
+
+ # for each changed file, determine if it is a test file, and what suite it is in
+ for file in changed_files:
+ # manifest paths use os.sep (like backslash on Windows) but
+ # automation-relevance uses posixpath.sep
+ file = file.replace(posixpath.sep, os.sep)
+ entry = tests_by_path.get(file)
+ if not entry:
+ if file in all_disabled:
+ self.info("'%s' has been skipped on this platform." % file)
+ if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
+ self.info("Per-test run could not find requested test '%s'" % file)
+ continue
+
+ if gpu and not self._is_gpu_suite(entry[1]):
+ self.info(
+ "Per-test run (gpu) discarded non-gpu test %s (%s)"
+ % (file, entry[1])
+ )
+ continue
+ elif not gpu and self._is_gpu_suite(entry[1]):
+ self.info(
+ "Per-test run (non-gpu) discarded gpu test %s (%s)"
+ % (file, entry[1])
+ )
+ continue
+
+ if is_fission and (
+ (entry[0] == "mochitest-a11y") or (entry[0] == "mochitest-chrome")
+ ):
+ self.info(
+ "Per-test run (fission) discarded non-e10s test %s (%s)"
+ % (file, entry[0])
+ )
+ continue
+
+ if entry[2] is not None and "about:" not in entry[2]:
+ # Test name substitution, for reftest reference file handling:
+ # - if both test and reference modified, run the test file
+ # - if only reference modified, run the test file
+ test_file = os.path.join(
+ os.path.dirname(file), os.path.basename(entry[2])
+ )
+ self.info("Per-test run substituting %s for %s" % (test_file, file))
+ file = test_file
+
+ self.info("Per-test run found test %s (%s/%s)" % (file, entry[0], entry[1]))
+ subsuite_mapping = {
+ # Map (<suite>, <subsuite>): <full-suite>
+ # <suite> is associated with a manifest, explicitly in code above
+ # <subsuite> comes from "subsuite" tags in some manifest entries
+ # <full-suite> is a unique id for the suite, matching desktop mozharness configs
+ (
+ "mochitest-browser-chrome",
+ "a11y",
+ None,
+ ): "mochitest-browser-a11y",
+ (
+ "mochitest-browser-chrome",
+ "media-bc",
+ None,
+ ): "mochitest-browser-media",
+ (
+ "mochitest-browser-chrome",
+ "devtools",
+ None,
+ ): "mochitest-devtools-chrome",
+ ("mochitest-browser-chrome", "remote", None): "mochitest-remote",
+ (
+ "mochitest-browser-chrome",
+ "screenshots",
+ None,
+ ): "mochitest-browser-chrome-screenshots", # noqa
+ ("mochitest-plain", "media", None): "mochitest-media",
+ # below should be on test-verify-gpu job
+ ("mochitest-chrome", "gpu", None): "mochitest-chrome-gpu",
+ ("mochitest-plain", "gpu", None): "mochitest-plain-gpu",
+ ("mochitest-plain", "webgl1-core", None): "mochitest-webgl1-core",
+ ("mochitest-plain", "webgl1-ext", None): "mochitest-webgl1-ext",
+ ("mochitest-plain", "webgl2-core", None): "mochitest-webgl2-core",
+ ("mochitest-plain", "webgl2-ext", None): "mochitest-webgl2-ext",
+ ("mochitest-plain", "webgl2-deqp", None): "mochitest-webgl2-deqp",
+ ("mochitest-plain", "webgpu", None): "mochitest-webgpu",
+ }
+ if entry in subsuite_mapping:
+ suite = subsuite_mapping[entry]
+ else:
+ suite = entry[0]
+ suite_files = self.suites.get(suite)
+ if not suite_files:
+ suite_files = []
+ if file not in suite_files:
+ suite_files.append(file)
+ self.suites[suite] = suite_files
+
+ def _find_wpt_tests(self, dirs, changed_files):
+ # Setup sys.path to include all the dependencies required to import
+ # the web-platform-tests manifest parser. web-platform-tests provides
+ # the localpaths.py to do the path manipulation, which we load,
+ # providing the __file__ variable so it can resolve the relative
+ # paths correctly.
+ paths_file = os.path.join(
+ dirs["abs_wpttest_dir"], "tests", "tools", "localpaths.py"
+ )
+ with open(paths_file, "r") as f:
+ exec(f.read(), {"__file__": paths_file})
+ import manifest as wptmanifest
+
+ tests_root = os.path.join(dirs["abs_wpttest_dir"], "tests")
+
+ for extra in ("", "mozilla"):
+ base_path = os.path.join(dirs["abs_wpttest_dir"], extra)
+ man_path = os.path.join(base_path, "meta", "MANIFEST.json")
+ man = wptmanifest.manifest.load(tests_root, man_path)
+ self.info("Per-test run updated with manifest %s" % man_path)
+
+ repo_tests_path = os.path.join("testing", "web-platform", extra, "tests")
+ tests_path = os.path.join("tests", "web-platform", extra, "tests")
+ for (type, path, test) in man:
+ if type not in ["testharness", "reftest", "wdspec"]:
+ continue
+ repo_path = os.path.join(repo_tests_path, path)
+ # manifest paths use os.sep (like backslash on Windows) but
+ # automation-relevance uses posixpath.sep
+ repo_path = repo_path.replace(os.sep, posixpath.sep)
+ if repo_path in changed_files:
+ self.info(
+ "Per-test run found web-platform test '%s', type %s"
+ % (path, type)
+ )
+ suite_files = self.suites.get(type)
+ if not suite_files:
+ suite_files = []
+ test_path = os.path.join(tests_path, path)
+ suite_files.append(test_path)
+ self.suites[type] = suite_files
+ self._map_test_path_to_source(test_path, repo_path)
+ changed_files.remove(repo_path)
+
+ if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
+ for file in changed_files:
+ self.info(
+ "Per-test run could not find requested web-platform test '%s'"
+ % file
+ )
+
+ def find_modified_tests(self):
+ """
+ For each file modified on this push, determine if the modified file
+ is a test, by searching test manifests. Populate self.suites
+ with test files, organized by suite.
+
+ This depends on test manifests, so can only run after test zips have
+ been downloaded and extracted.
+ """
+ repository = os.environ.get("GECKO_HEAD_REPOSITORY")
+ revision = os.environ.get("GECKO_HEAD_REV")
+ if not repository or not revision:
+ self.warning("unable to run tests in per-test mode: no repo or revision!")
+ self.suites = {}
+ self.tests_downloaded = True
+ return
+
+ def get_automationrelevance():
+ response = self.load_json_url(url)
+ return response
+
+ dirs = self.query_abs_dirs()
+ mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])
+ e10s = self.config.get("e10s", False)
+ mozinfo.update({"e10s": e10s})
+ is_fission = "fission.autostart=true" in self.config.get("extra_prefs", [])
+ mozinfo.update({"fission": is_fission})
+ headless = self.config.get("headless", False)
+ mozinfo.update({"headless": headless})
+ if mozinfo.info["buildapp"] == "mobile/android":
+ # extra android mozinfo normally comes from device queries, but this
+ # code may run before the device is ready, so rely on configuration
+ mozinfo.update(
+ {"android_version": str(self.config.get("android_version", 24))}
+ )
+ mozinfo.update({"is_emulator": self.config.get("is_emulator", True)})
+ mozinfo.update({"verify": True})
+ self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))
+
+ # determine which files were changed on this push
+ changed_files = set()
+ url = "%s/json-automationrelevance/%s" % (repository.rstrip("/"), revision)
+ contents = self.retry(get_automationrelevance, attempts=2, sleeptime=10)
+ for c in contents["changesets"]:
+ self.info(
+ " {cset} {desc}".format(
+ cset=c["node"][0:12],
+ desc=c["desc"].splitlines()[0].encode("ascii", "ignore"),
+ )
+ )
+ changed_files |= set(c["files"])
+ changed_files = list(changed_files)
+
+ # check specified test paths, as from 'mach try ... <path>'
+ if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
+ suite_to_paths = json.loads(os.environ["MOZHARNESS_TEST_PATHS"])
+ specified_paths = itertools.chain.from_iterable(suite_to_paths.values())
+ specified_paths = list(specified_paths)
+ # filter the list of changed files to those found under the
+ # specified path(s)
+ changed_and_specified = set()
+ for changed in changed_files:
+ for specified in specified_paths:
+ if changed.startswith(specified):
+ changed_and_specified.add(changed)
+ break
+ if changed_and_specified:
+ changed_files = changed_and_specified
+ else:
+ # if specified paths do not match changed files, assume the
+ # specified paths are explicitly requested tests
+ changed_files = set()
+ changed_files.update(specified_paths)
+ self.info("Per-test run found explicit request in MOZHARNESS_TEST_PATHS:")
+ self.info(str(changed_files))
+
+ if self.config.get("per_test_category") == "web-platform":
+ self._find_wpt_tests(dirs, changed_files)
+ elif self.config.get("gpu_required", False) is not False:
+ self._find_misc_tests(dirs, changed_files, gpu=True)
+ else:
+ self._find_misc_tests(dirs, changed_files)
+
+ # per test mode run specific tests from any given test suite
+ # _find_*_tests organizes tests to run into suites so we can
+ # run each suite at a time
+
+ # chunk files
+ total_tests = sum([len(self.suites[x]) for x in self.suites])
+
+ if total_tests == 0:
+ self.warning("No tests to verify.")
+ self.suites = {}
+ self.tests_downloaded = True
+ return
+
+ files_per_chunk = total_tests / float(self.config.get("total_chunks", 1))
+ files_per_chunk = int(math.ceil(files_per_chunk))
+
+ chunk_number = int(self.config.get("this_chunk", 1))
+ suites = {}
+ start = (chunk_number - 1) * files_per_chunk
+ end = chunk_number * files_per_chunk
+ current = -1
+ for suite in self.suites:
+ for test in self.suites[suite]:
+ current += 1
+ if current >= start and current < end:
+ if suite not in suites:
+ suites[suite] = []
+ suites[suite].append(test)
+ if current >= end:
+ break
+
+ self.suites = suites
+ self.tests_downloaded = True
+
+ def query_args(self, suite):
+ """
+ For the specified suite, return an array of command line arguments to
+ be passed to test harnesses when running in per-test mode.
+
+ Each array element is an array of command line arguments for a modified
+ test in the suite.
+ """
+ # not in verify or per-test coverage mode: run once, with no additional args
+ if not self.per_test_coverage and not self.verify_enabled:
+ return [[]]
+
+ files = []
+ jsreftest_extra_dir = os.path.join("js", "src", "tests")
+ # For some suites, the test path needs to be updated before passing to
+ # the test harness.
+ for file in self.suites.get(suite):
+ if self.config.get("per_test_category") != "web-platform" and suite in [
+ "reftest",
+ "crashtest",
+ ]:
+ file = os.path.join(self.reftest_test_dir, file)
+ elif (
+ self.config.get("per_test_category") != "web-platform"
+ and suite == "jsreftest"
+ ):
+ file = os.path.relpath(file, jsreftest_extra_dir)
+ file = os.path.join(self.jsreftest_test_dir, file)
+
+ if file is None:
+ continue
+
+ file = file.replace(os.sep, posixpath.sep)
+ files.append(file)
+
+ self.info("Per-test file(s) for '%s': %s" % (suite, files))
+
+ args = []
+ for file in files:
+ cur = []
+
+ cur.extend(self.coverage_args)
+ cur.extend(self.verify_args)
+
+ cur.append(file)
+ args.append(cur)
+
+ return args
+
+ def query_per_test_category_suites(self, category, all_suites):
+ """
+ In per-test mode, determine which suites are active, for the given
+ suite category.
+ """
+ suites = None
+ if self.verify_enabled or self.per_test_coverage:
+ if self.config.get("per_test_category") == "web-platform":
+ suites = list(self.suites)
+ self.info("Per-test suites: %s" % suites)
+ elif all_suites and self.tests_downloaded:
+ suites = dict(
+ (key, all_suites.get(key))
+ for key in self.suites
+ if key in all_suites.keys()
+ )
+ self.info("Per-test suites: %s" % suites)
+ else:
+ # Until test zips are downloaded, manifests are not available,
+ # so it is not possible to determine which suites are active/
+ # required for per-test mode; assume all suites from supported
+ # suite categories are required.
+ if category in ["mochitest", "xpcshell", "reftest"]:
+ suites = all_suites
+ return suites
+
+ def log_per_test_status(self, test_name, tbpl_status, log_level):
+ """
+ Log status of a single test. This will display in the
+ Job Details pane in treeherder - a convenient summary of per-test mode.
+ Special test name formatting is needed because treeherder truncates
+ lines that are too long, and may remove duplicates after truncation.
+ """
+ max_test_name_len = 40
+ if len(test_name) > max_test_name_len:
+ head = test_name
+ new = ""
+ previous = None
+ max_test_name_len = max_test_name_len - len(".../")
+ while len(new) < max_test_name_len:
+ head, tail = os.path.split(head)
+ previous = new
+ new = os.path.join(tail, new)
+ test_name = os.path.join("...", previous or new)
+ test_name = test_name.rstrip(os.path.sep)
+ self.log(
+ "TinderboxPrint: Per-test run of %s<br/>: %s" % (test_name, tbpl_status),
+ level=log_level,
+ )
+
+ def get_indexed_logs(self, dir, test_suite):
+ """
+ Per-test tasks need distinct file names for the raw and errorsummary logs
+ on each run.
+ """
+ index = ""
+ if self.verify_enabled or self.per_test_coverage:
+ index = "-test%d" % self.per_test_log_index
+ self.per_test_log_index += 1
+ raw_log_file = os.path.join(dir, "%s%s_raw.log" % (test_suite, index))
+ error_summary_file = os.path.join(
+ dir, "%s%s_errorsummary.log" % (test_suite, index)
+ )
+ return raw_log_file, error_summary_file
diff --git a/testing/mozharness/mozharness/mozilla/testing/raptor.py b/testing/mozharness/mozharness/mozilla/testing/raptor.py
new file mode 100644
index 0000000000..ceb97da963
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/raptor.py
@@ -0,0 +1,1478 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import copy
+import glob
+import multiprocessing
+import os
+import pathlib
+import re
+import subprocess
+import sys
+import tempfile
+from shutil import copyfile, rmtree
+
+from six import string_types
+
+import mozharness
+from mozharness.base.errors import PythonErrorList
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, INFO, OutputParser
+from mozharness.base.python import Python3Virtualenv
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import (
+ EXIT_STATUS_DICT,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList, TinderBoxPrintRe
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+here = os.path.abspath(os.path.dirname(__file__))
+
+RaptorErrorList = (
+ PythonErrorList
+ + HarnessErrorList
+ + [
+ {"regex": re.compile(r"""run-as: Package '.*' is unknown"""), "level": DEBUG},
+ {"substr": r"""raptorDebug""", "level": DEBUG},
+ {
+ "regex": re.compile(r"""^raptor[a-zA-Z-]*( - )?( )?(?i)error(:)?"""),
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile(r"""^raptor[a-zA-Z-]*( - )?( )?(?i)critical(:)?"""),
+ "level": CRITICAL,
+ },
+ {
+ "regex": re.compile(r"""No machine_name called '.*' can be found"""),
+ "level": CRITICAL,
+ },
+ {
+ "substr": r"""No such file or directory: 'browser_output.txt'""",
+ "level": CRITICAL,
+ "explanation": "Most likely the browser failed to launch, or the test otherwise "
+ "failed to start.",
+ },
+ ]
+)
+
+# When running raptor locally, we can attempt to make use of
+# the users locally cached ffmpeg binary from from when the user
+# ran `./mach browsertime --setup`
+FFMPEG_LOCAL_CACHE = {
+ "mac": "ffmpeg-macos",
+ "linux": "ffmpeg-4.4.1-i686-static",
+ "win": "ffmpeg-4.4.1-full_build",
+}
+
+
+class Raptor(
+ TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Python3Virtualenv
+):
+ """
+ Install and run Raptor tests
+ """
+
+ # Options to Browsertime. Paths are expected to be absolute.
+ browsertime_options = [
+ [
+ ["--browsertime-node"],
+ {"dest": "browsertime_node", "default": None, "help": argparse.SUPPRESS},
+ ],
+ [
+ ["--browsertime-browsertimejs"],
+ {
+ "dest": "browsertime_browsertimejs",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-vismet-script"],
+ {
+ "dest": "browsertime_vismet_script",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-chromedriver"],
+ {
+ "dest": "browsertime_chromedriver",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-ffmpeg"],
+ {"dest": "browsertime_ffmpeg", "default": None, "help": argparse.SUPPRESS},
+ ],
+ [
+ ["--browsertime-geckodriver"],
+ {
+ "dest": "browsertime_geckodriver",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-video"],
+ {
+ "dest": "browsertime_video",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-visualmetrics"],
+ {
+ "dest": "browsertime_visualmetrics",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-no-ffwindowrecorder"],
+ {
+ "dest": "browsertime_no_ffwindowrecorder",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-arg"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "browsertime_user_args",
+ "default": [],
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime"],
+ {
+ "dest": "browsertime",
+ "action": "store_true",
+ "default": True,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ ]
+
+ config_options = (
+ [
+ [
+ ["--test"],
+ {"action": "store", "dest": "test", "help": "Raptor test to run"},
+ ],
+ [
+ ["--app"],
+ {
+ "default": "firefox",
+ "choices": [
+ "firefox",
+ "chrome",
+ "chrome-m",
+ "chromium",
+ "fennec",
+ "geckoview",
+ "refbrow",
+ "fenix",
+ "safari",
+ "custom-car",
+ ],
+ "dest": "app",
+ "help": "Name of the application we are testing (default: firefox).",
+ },
+ ],
+ [
+ ["--activity"],
+ {
+ "dest": "activity",
+ "help": "The Android activity used to launch the Android app. "
+ "e.g.: org.mozilla.fenix.browser.BrowserPerformanceTestActivity",
+ },
+ ],
+ [
+ ["--intent"],
+ {
+ "dest": "intent",
+ "help": "Name of the Android intent action used to launch the Android app",
+ },
+ ],
+ [
+ ["--is-release-build"],
+ {
+ "action": "store_true",
+ "dest": "is_release_build",
+ "help": "Whether the build is a release build which requires work arounds "
+ "using MOZ_DISABLE_NONLOCAL_CONNECTIONS to support installing unsigned "
+ "webextensions. Defaults to False.",
+ },
+ ],
+ [
+ ["--add-option"],
+ {
+ "action": "extend",
+ "dest": "raptor_cmd_line_args",
+ "default": None,
+ "help": "Extra options to Raptor.",
+ },
+ ],
+ [
+ ["--device-name"],
+ {
+ "dest": "device_name",
+ "default": None,
+ "help": "Device name of mobile device.",
+ },
+ ],
+ [
+ ["--geckoProfile"],
+ {
+ "dest": "gecko_profile",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--geckoProfileInterval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": "int",
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--geckoProfileEntries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": "int",
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--geckoProfileFeatures"],
+ {
+ "dest": "gecko_profile_features",
+ "type": "str",
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--gecko-profile"],
+ {
+ "dest": "gecko_profile",
+ "action": "store_true",
+ "default": False,
+ "help": "Whether to profile the test run and save the profile results.",
+ },
+ ],
+ [
+ ["--gecko-profile-interval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": "int",
+ "help": "The interval between samples taken by the profiler (ms).",
+ },
+ ],
+ [
+ ["--gecko-profile-entries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": "int",
+ "help": "How many samples to take with the profiler.",
+ },
+ ],
+ [
+ ["--gecko-profile-threads"],
+ {
+ "dest": "gecko_profile_threads",
+ "type": "str",
+ "help": "Comma-separated list of threads to sample.",
+ },
+ ],
+ [
+ ["--gecko-profile-features"],
+ {
+ "dest": "gecko_profile_features",
+ "type": "str",
+ "help": "Features to enable in the profiler.",
+ },
+ ],
+ [
+ ["--extra-profiler-run"],
+ {
+ "dest": "extra_profiler_run",
+ "action": "store_true",
+ "default": False,
+ "help": "Run the tests again with profiler enabled after the main run.",
+ },
+ ],
+ [
+ ["--page-cycles"],
+ {
+ "dest": "page_cycles",
+ "type": "int",
+ "help": (
+ "How many times to repeat loading the test page (for page load "
+ "tests); for benchmark tests this is how many times the benchmark test "
+ "will be run."
+ ),
+ },
+ ],
+ [
+ ["--page-timeout"],
+ {
+ "dest": "page_timeout",
+ "type": "int",
+ "help": "How long to wait (ms) for one page_cycle to complete, before timing out.", # NOQA: E501
+ },
+ ],
+ [
+ ["--browser-cycles"],
+ {
+ "dest": "browser_cycles",
+ "type": "int",
+ "help": (
+ "The number of times a cold load test is repeated (for cold load tests "
+ "only, where the browser is shutdown and restarted between test "
+ "iterations)."
+ ),
+ },
+ ],
+ [
+ ["--project"],
+ {
+ "action": "store",
+ "dest": "project",
+ "default": "mozilla-central",
+ "type": "str",
+ "help": "Name of the project (try, mozilla-central, etc.)",
+ },
+ ],
+ [
+ ["--test-url-params"],
+ {
+ "action": "store",
+ "dest": "test_url_params",
+ "help": "Parameters to add to the test_url query string.",
+ },
+ ],
+ [
+ ["--host"],
+ {
+ "dest": "host",
+ "type": "str",
+ "default": "127.0.0.1",
+ "help": "Hostname from which to serve urls (default: 127.0.0.1). "
+ "The value HOST_IP will cause the value of host to be "
+ "to be loaded from the environment variable HOST_IP.",
+ },
+ ],
+ [
+ ["--power-test"],
+ {
+ "dest": "power_test",
+ "action": "store_true",
+ "default": False,
+ "help": (
+ "Use Raptor to measure power usage on Android browsers (Geckoview "
+ "Example, Fenix, Refbrow, and Fennec) as well as on Intel-based MacOS "
+ "machines that have Intel Power Gadget installed."
+ ),
+ },
+ ],
+ [
+ ["--memory-test"],
+ {
+ "dest": "memory_test",
+ "action": "store_true",
+ "default": False,
+ "help": "Use Raptor to measure memory usage.",
+ },
+ ],
+ [
+ ["--cpu-test"],
+ {
+ "dest": "cpu_test",
+ "action": "store_true",
+ "default": False,
+ "help": "Use Raptor to measure CPU usage.",
+ },
+ ],
+ [
+ ["--disable-perf-tuning"],
+ {
+ "action": "store_true",
+ "dest": "disable_perf_tuning",
+ "default": False,
+ "help": "Disable performance tuning on android.",
+ },
+ ],
+ [
+ ["--conditioned-profile"],
+ {
+ "dest": "conditioned_profile",
+ "type": "str",
+ "default": None,
+ "help": (
+ "Name of conditioned profile to use. Prefix with `artifact:` "
+ "if we should obtain the profile from CI.",
+ ),
+ },
+ ],
+ [
+ ["--live-sites"],
+ {
+ "dest": "live_sites",
+ "action": "store_true",
+ "default": False,
+ "help": "Run tests using live sites instead of recorded sites.",
+ },
+ ],
+ [
+ ["--test-bytecode-cache"],
+ {
+ "dest": "test_bytecode_cache",
+ "action": "store_true",
+ "default": False,
+ "help": (
+ "If set, the pageload test will set the preference "
+ "`dom.script_loader.bytecode_cache.strategy=-1` and wait 20 seconds "
+ "after the first cold pageload to populate the bytecode cache before "
+ "running a warm pageload test. Only available if `--chimera` "
+ "is also provided."
+ ),
+ },
+ ],
+ [
+ ["--chimera"],
+ {
+ "dest": "chimera",
+ "action": "store_true",
+ "default": False,
+ "help": "Run tests in chimera mode. Each browser cycle will run a cold and warm test.", # NOQA: E501
+ },
+ ],
+ [
+ ["--debug-mode"],
+ {
+ "dest": "debug_mode",
+ "action": "store_true",
+ "default": False,
+ "help": "Run Raptor in debug mode (open browser console, limited page-cycles, etc.)", # NOQA: E501
+ },
+ ],
+ [
+ ["--noinstall"],
+ {
+ "dest": "noinstall",
+ "action": "store_true",
+ "default": False,
+ "help": "Do not offer to install Android APK.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Run without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_false",
+ "dest": "fission",
+ "default": True,
+ "help": "Disable Fission (site isolation) in Gecko.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Set a browser preference. May be used multiple times.",
+ },
+ ],
+ [
+ ["--setenv"],
+ {
+ "action": "append",
+ "metavar": "NAME=VALUE",
+ "dest": "environment",
+ "default": [],
+ "help": "Set a variable in the test environment. May be used multiple times.",
+ },
+ ],
+ [
+ ["--skip-preflight"],
+ {
+ "action": "store_true",
+ "dest": "skip_preflight",
+ "default": False,
+ "help": "skip preflight commands to prepare machine.",
+ },
+ ],
+ [
+ ["--cold"],
+ {
+ "action": "store_true",
+ "dest": "cold",
+ "default": False,
+ "help": "Enable cold page-load for browsertime tp6",
+ },
+ ],
+ [
+ ["--verbose"],
+ {
+ "action": "store_true",
+ "dest": "verbose",
+ "default": False,
+ "help": "Verbose output",
+ },
+ ],
+ [
+ ["--enable-marionette-trace"],
+ {
+ "action": "store_true",
+ "dest": "enable_marionette_trace",
+ "default": False,
+ "help": "Enable marionette tracing",
+ },
+ ],
+ [
+ ["--clean"],
+ {
+ "action": "store_true",
+ "dest": "clean",
+ "default": False,
+ "help": (
+ "Clean the python virtualenv (remove, and rebuild) for "
+ "Raptor before running tests."
+ ),
+ },
+ ],
+ [
+ ["--webext"],
+ {
+ "action": "store_true",
+ "dest": "webext",
+ "default": False,
+ "help": (
+ "Whether to use webextension to execute pageload tests "
+ "(WebExtension is being deprecated).",
+ ),
+ },
+ ],
+ [
+ ["--collect-perfstats"],
+ {
+ "action": "store_true",
+ "dest": "collect_perfstats",
+ "default": False,
+ "help": (
+ "If set, the test will collect perfstats in addition to "
+ "the regular metrics it gathers."
+ ),
+ },
+ ],
+ [
+ ["--extra-summary-methods"],
+ {
+ "action": "append",
+ "metavar": "OPTION",
+ "dest": "extra_summary_methods",
+ "default": [],
+ "help": (
+ "Alternative methods for summarizing technical and visual"
+ "pageload metrics."
+ "Options: geomean, mean."
+ ),
+ },
+ ],
+ [
+ ["--benchmark-repository"],
+ {
+ "dest": "benchmark_repository",
+ "type": "str",
+ "default": None,
+ "help": (
+ "Repository that should be used for a particular benchmark test. "
+ "e.g. https://github.com/mozilla-mobile/firefox-android"
+ ),
+ },
+ ],
+ [
+ ["--benchmark-revision"],
+ {
+ "dest": "benchmark_revision",
+ "type": "str",
+ "default": None,
+ "help": (
+ "Repository revision that should be used for a particular "
+ "benchmark test."
+ ),
+ },
+ ],
+ [
+ ["--benchmark-branch"],
+ {
+ "dest": "benchmark_branch",
+ "type": "str",
+ "default": None,
+ "help": (
+ "Repository branch that should be used for a particular benchmark test."
+ ),
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ + browsertime_options
+ )
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chrome-android",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(Raptor, self).__init__(**kwargs)
+
+ # Convenience
+ self.workdir = self.query_abs_dirs()["abs_work_dir"]
+
+ self.run_local = self.config.get("run_local")
+
+ # App (browser testing on) defaults to firefox
+ self.app = "firefox"
+
+ if self.run_local:
+ # Get app from command-line args, passed in from mach, inside 'raptor_cmd_line_args'
+ # Command-line args can be in two formats depending on how the user entered them
+ # i.e. "--app=geckoview" or separate as "--app", "geckoview" so we have to
+ # parse carefully. It's simplest to use `argparse` to parse partially.
+ self.app = "firefox"
+ if "raptor_cmd_line_args" in self.config:
+ sub_parser = argparse.ArgumentParser()
+ # It's not necessary to limit the allowed values: each value
+ # will be parsed and verifed by raptor/raptor.py.
+ sub_parser.add_argument("--app", default=None, dest="app")
+ sub_parser.add_argument("-i", "--intent", default=None, dest="intent")
+ sub_parser.add_argument(
+ "-a", "--activity", default=None, dest="activity"
+ )
+
+ # We'd prefer to use `parse_known_intermixed_args`, but that's
+ # new in Python 3.7.
+ known, unknown = sub_parser.parse_known_args(
+ self.config["raptor_cmd_line_args"]
+ )
+
+ if known.app:
+ self.app = known.app
+ if known.intent:
+ self.intent = known.intent
+ if known.activity:
+ self.activity = known.activity
+ else:
+ # Raptor initiated in production via mozharness
+ self.test = self.config["test"]
+ self.app = self.config.get("app", "firefox")
+ self.binary_path = self.config.get("binary_path", None)
+
+ if self.app in ("refbrow", "fenix"):
+ self.app_name = self.binary_path
+
+ self.installer_url = self.config.get("installer_url")
+ self.raptor_json_url = self.config.get("raptor_json_url")
+ self.raptor_json = self.config.get("raptor_json")
+ self.raptor_json_config = self.config.get("raptor_json_config")
+ self.repo_path = self.config.get("repo_path")
+ self.obj_path = self.config.get("obj_path")
+ self.mozbuild_path = self.config.get("mozbuild_path")
+ self.test = None
+ self.gecko_profile = self.config.get(
+ "gecko_profile"
+ ) or "--geckoProfile" in self.config.get("raptor_cmd_line_args", [])
+ self.gecko_profile_interval = self.config.get("gecko_profile_interval")
+ self.gecko_profile_entries = self.config.get("gecko_profile_entries")
+ self.gecko_profile_threads = self.config.get("gecko_profile_threads")
+ self.gecko_profile_features = self.config.get("gecko_profile_features")
+ self.extra_profiler_run = self.config.get("extra_profiler_run")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url_params = self.config.get("test_url_params")
+ self.host = self.config.get("host")
+ if self.host == "HOST_IP":
+ self.host = os.environ["HOST_IP"]
+ self.power_test = self.config.get("power_test")
+ self.memory_test = self.config.get("memory_test")
+ self.cpu_test = self.config.get("cpu_test")
+ self.live_sites = self.config.get("live_sites")
+ self.chimera = self.config.get("chimera")
+ self.disable_perf_tuning = self.config.get("disable_perf_tuning")
+ self.conditioned_profile = self.config.get("conditioned_profile")
+ self.extra_prefs = self.config.get("extra_prefs")
+ self.environment = self.config.get("environment")
+ self.is_release_build = self.config.get("is_release_build")
+ self.debug_mode = self.config.get("debug_mode", False)
+ self.chromium_dist_path = None
+ self.firefox_android_browsers = ["fennec", "geckoview", "refbrow", "fenix"]
+ self.android_browsers = self.firefox_android_browsers + ["chrome-m"]
+ self.browsertime_visualmetrics = self.config.get("browsertime_visualmetrics")
+ self.browsertime_node = self.config.get("browsertime_node")
+ self.browsertime_user_args = self.config.get("browsertime_user_args")
+ self.browsertime_video = False
+ self.enable_marionette_trace = self.config.get("enable_marionette_trace")
+ self.browser_cycles = self.config.get("browser_cycles")
+ self.clean = self.config.get("clean")
+
+ for (arg,), details in Raptor.browsertime_options:
+ # Allow overriding defaults on the `./mach raptor-test ...` command-line.
+ value = self.config.get(details["dest"])
+ if value and arg not in self.config.get("raptor_cmd_line_args", []):
+ setattr(self, details["dest"], value)
+
+ # We accept some configuration options from the try commit message in the
+ # format mozharness: <options>. Example try commit message: mozharness:
+ # --geckoProfile try: <stuff>
+ def query_gecko_profile_options(self):
+ gecko_results = []
+ # If gecko_profile is set, we add that to Raptor's options
+ if self.gecko_profile:
+ gecko_results.append("--gecko-profile")
+ if self.gecko_profile_interval:
+ gecko_results.extend(
+ ["--gecko-profile-interval", str(self.gecko_profile_interval)]
+ )
+ if self.gecko_profile_entries:
+ gecko_results.extend(
+ ["--gecko-profile-entries", str(self.gecko_profile_entries)]
+ )
+ if self.gecko_profile_features:
+ gecko_results.extend(
+ ["--gecko-profile-features", self.gecko_profile_features]
+ )
+ if self.gecko_profile_threads:
+ gecko_results.extend(
+ ["--gecko-profile-threads", self.gecko_profile_threads]
+ )
+ else:
+ if self.extra_profiler_run:
+ gecko_results.append("--extra-profiler-run")
+ return gecko_results
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Raptor, self).query_abs_dirs()
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ abs_dirs["abs_test_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests"
+ )
+
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def install_chrome_android(self):
+ """Install Google Chrome for Android in production from tooltool"""
+ if self.app != "chrome-m":
+ self.info("Google Chrome for Android not required")
+ return
+ if self.config.get("run_local"):
+ self.info(
+ "Google Chrome for Android will not be installed "
+ "from tooltool when running locally"
+ )
+ return
+ self.info("Fetching and installing Google Chrome for Android")
+ self.device.shell_output("cmd package install-existing com.android.chrome")
+ self.info("Google Chrome for Android successfully installed")
+
+ def download_chrome_android(self):
+ # Fetch the APK
+ tmpdir = tempfile.mkdtemp()
+ self.tooltool_fetch(
+ os.path.join(
+ self.raptor_path,
+ "raptor",
+ "tooltool-manifests",
+ "chrome-android",
+ "chrome87.manifest",
+ ),
+ output_dir=tmpdir,
+ )
+ files = os.listdir(tmpdir)
+ if len(files) > 1:
+ raise Exception(
+ "Found more than one chrome APK file after tooltool download"
+ )
+ chromeapk = os.path.join(tmpdir, files[0])
+
+ # Disable verification and install the APK
+ self.device.shell_output("settings put global verifier_verify_adb_installs 0")
+ self.install_android_app(chromeapk, replace=True)
+
+ # Re-enable verification and delete the temporary directory
+ self.device.shell_output("settings put global verifier_verify_adb_installs 1")
+ rmtree(tmpdir)
+
+ def install_chromium_distribution(self):
+ """Install Google Chromium distribution in production"""
+ linux, mac, win = "linux", "mac", "win"
+ chrome, chromium, chromium_release = "chrome", "chromium", "custom-car"
+
+ available_chromium_dists = [chrome, chromium, chromium_release]
+ binary_location = {
+ chromium: {
+ linux: ["chrome-linux", "chrome"],
+ mac: ["chrome-mac", "Chromium.app", "Contents", "MacOS", "Chromium"],
+ win: ["chrome-win", "Chrome.exe"],
+ },
+ chromium_release: {
+ linux: ["chromium", "Default", "chrome"],
+ win: ["chromium", "Default", "chrome.exe"],
+ },
+ }
+
+ if self.app not in available_chromium_dists:
+ self.info("Google Chrome or Chromium distributions are not required.")
+ return
+
+ if self.app == "chrome":
+ self.info("Chrome should be preinstalled.")
+ if win in self.platform_name():
+ base_path = "C:\\%s\\Google\\Chrome\\Application\\chrome.exe"
+ self.chromium_dist_path = base_path % "Progra~1"
+ if not os.path.exists(self.chromium_dist_path):
+ self.chromium_dist_path = base_path % "Progra~2"
+ elif linux in self.platform_name():
+ self.chromium_dist_path = "/usr/bin/google-chrome"
+ elif mac in self.platform_name():
+ self.chromium_dist_path = (
+ "/Applications/Google Chrome.app/" "Contents/MacOS/Google Chrome"
+ )
+ else:
+ self.error(
+ "Chrome is not installed on the platform %s yet."
+ % self.platform_name()
+ )
+
+ if os.path.exists(self.chromium_dist_path):
+ self.info(
+ "Google Chrome found in expected location %s"
+ % self.chromium_dist_path
+ )
+ else:
+ self.error("Cannot find Google Chrome at %s" % self.chromium_dist_path)
+
+ return
+
+ chromium_dist = self.app
+
+ if self.config.get("run_local"):
+ self.info("Expecting %s to be pre-installed locally" % chromium_dist)
+ return
+
+ self.info("Getting fetched %s build" % chromium_dist)
+ self.chromium_dist_dest = os.path.normpath(
+ os.path.abspath(os.environ["MOZ_FETCHES_DIR"])
+ )
+
+ if mac in self.platform_name():
+ self.chromium_dist_path = os.path.join(
+ self.chromium_dist_dest, *binary_location[chromium_dist][mac]
+ )
+
+ elif linux in self.platform_name():
+ self.chromium_dist_path = os.path.join(
+ self.chromium_dist_dest, *binary_location[chromium_dist][linux]
+ )
+
+ else:
+ self.chromium_dist_path = os.path.join(
+ self.chromium_dist_dest, *binary_location[chromium_dist][win]
+ )
+
+ self.info("%s dest is: %s" % (chromium_dist, self.chromium_dist_dest))
+ self.info("%s path is: %s" % (chromium_dist, self.chromium_dist_path))
+
+ # Now ensure Chromium binary exists
+ if os.path.exists(self.chromium_dist_path):
+ self.info(
+ "Successfully installed %s to: %s"
+ % (chromium_dist, self.chromium_dist_path)
+ )
+ else:
+ self.info("Abort: failed to install %s" % chromium_dist)
+
+ def raptor_options(self, args=None, **kw):
+ """Return options to Raptor"""
+ options = []
+ kw_options = {}
+
+ # Get the APK location to be able to get the browser version
+ # through mozversion
+ if self.app in self.firefox_android_browsers and not self.run_local:
+ kw_options["installerpath"] = self.installer_path
+
+ # If testing on Firefox, the binary path already came from mozharness/pro;
+ # otherwise the binary path is forwarded from command-line arg (raptor_cmd_line_args).
+ kw_options["app"] = self.app
+ if self.app == "firefox" or (
+ self.app in self.firefox_android_browsers and not self.run_local
+ ):
+ binary_path = self.binary_path or self.config.get("binary_path")
+ if not binary_path:
+ self.fatal("Raptor requires a path to the binary.")
+ kw_options["binary"] = binary_path
+ if self.app in self.firefox_android_browsers:
+ # In production ensure we have correct app name,
+ # i.e. fennec_aurora or fennec_release etc.
+ kw_options["binary"] = self.query_package_name()
+ self.info(
+ "Set binary to %s instead of %s"
+ % (kw_options["binary"], binary_path)
+ )
+ elif self.app == "safari" and not self.run_local:
+ binary_path = "/Applications/Safari.app/Contents/MacOS/Safari"
+ kw_options["binary"] = binary_path
+ else: # Running on Chromium
+ if not self.run_local:
+ # When running locally we already set the Chromium binary above, in init.
+ # In production, we already installed Chromium, so set the binary path
+ # to our install.
+ kw_options["binary"] = self.chromium_dist_path or ""
+
+ # Options overwritten from **kw
+ if "test" in self.config:
+ kw_options["test"] = self.config["test"]
+ if "binary" in self.config:
+ kw_options["binary"] = self.config["binary"]
+ if self.symbols_path:
+ kw_options["symbolsPath"] = self.symbols_path
+ if self.config.get("obj_path", None) is not None:
+ kw_options["obj-path"] = self.config["obj_path"]
+ if self.config.get("mozbuild_path", None) is not None:
+ kw_options["mozbuild-path"] = self.config["mozbuild_path"]
+ if self.test_url_params:
+ kw_options["test-url-params"] = self.test_url_params
+ if self.config.get("device_name") is not None:
+ kw_options["device-name"] = self.config["device_name"]
+ if self.config.get("activity") is not None:
+ kw_options["activity"] = self.config["activity"]
+ if self.config.get("conditioned_profile") is not None:
+ kw_options["conditioned-profile"] = self.config["conditioned_profile"]
+ if self.config.get("benchmark_repository"):
+ kw_options["benchmark_repository"] = self.config["benchmark_repository"]
+ if self.config.get("benchmark_revision"):
+ kw_options["benchmark_revision"] = self.config["benchmark_revision"]
+ if self.config.get("benchmark_repository"):
+ kw_options["benchmark_branch"] = self.config["benchmark_branch"]
+
+ kw_options.update(kw)
+ if self.host:
+ kw_options["host"] = self.host
+ # Configure profiling options
+ options.extend(self.query_gecko_profile_options())
+ # Extra arguments
+ if args is not None:
+ options += args
+ if os.getenv("PERF_FLAGS"):
+ for option in os.getenv("PERF_FLAGS").split():
+ if "=" in option:
+ kw_option, value = option.split("=")
+ kw_options[kw_option] = value
+ else:
+ options.extend(["--" + option])
+
+ if self.config.get("run_local", False):
+ options.extend(["--run-local"])
+ if "raptor_cmd_line_args" in self.config:
+ options += self.config["raptor_cmd_line_args"]
+ if self.config.get("code_coverage", False):
+ options.extend(["--code-coverage"])
+ if self.config.get("is_release_build", False):
+ options.extend(["--is-release-build"])
+ if self.config.get("power_test", False):
+ options.extend(["--power-test"])
+ if self.config.get("memory_test", False):
+ options.extend(["--memory-test"])
+ if self.config.get("cpu_test", False):
+ options.extend(["--cpu-test"])
+ if self.config.get("live_sites", False):
+ options.extend(["--live-sites"])
+ if self.config.get("chimera", False):
+ options.extend(["--chimera"])
+ if self.config.get("disable_perf_tuning", False):
+ options.extend(["--disable-perf-tuning"])
+ if self.config.get("cold", False):
+ options.extend(["--cold"])
+ if not self.config.get("fission", True):
+ options.extend(["--disable-fission"])
+ if self.config.get("verbose", False):
+ options.extend(["--verbose"])
+ if self.config.get("extra_prefs"):
+ options.extend(
+ ["--setpref={}".format(i) for i in self.config.get("extra_prefs")]
+ )
+ if self.config.get("environment"):
+ options.extend(
+ ["--setenv={}".format(i) for i in self.config.get("environment")]
+ )
+ if self.config.get("enable_marionette_trace", False):
+ options.extend(["--enable-marionette-trace"])
+ if self.config.get("browser_cycles"):
+ options.extend(
+ ["--browser-cycles={}".format(self.config.get("browser_cycles"))]
+ )
+ if self.config.get("test_bytecode_cache", False):
+ options.extend(["--test-bytecode-cache"])
+ if self.config.get("collect_perfstats", False):
+ options.extend(["--collect-perfstats"])
+ if self.config.get("extra_summary_methods"):
+ options.extend(
+ [
+ "--extra-summary-methods={}".format(method)
+ for method in self.config.get("extra_summary_methods")
+ ]
+ )
+ if self.config.get("webext", False):
+ options.extend(["--webext"])
+ else:
+ for (arg,), details in Raptor.browsertime_options:
+ # Allow overriding defaults on the `./mach raptor-test ...` command-line
+ value = self.config.get(details["dest"])
+ if value is None or value != getattr(self, details["dest"], None):
+ # Check for modifications done to the instance variables
+ value = getattr(self, details["dest"], None)
+ if value and arg not in self.config.get("raptor_cmd_line_args", []):
+ if isinstance(value, string_types):
+ options.extend([arg, os.path.expandvars(value)])
+ elif isinstance(value, (tuple, list)):
+ for val in value:
+ options.extend([arg, val])
+ else:
+ options.extend([arg])
+
+ for key, value in kw_options.items():
+ options.extend(["--%s" % key, value])
+
+ return options
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.raptor_path = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"], "raptor"
+ )
+ if self.config.get("run_local"):
+ self.raptor_path = os.path.join(self.repo_path, "testing", "raptor")
+
+ def clobber(self):
+ # Recreate the upload directory for storing the logcat collected
+ # during APK installation.
+ super(Raptor, self).clobber()
+ upload_dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not os.path.isdir(upload_dir):
+ self.mkdir_p(upload_dir)
+
+ def install_android_app(self, apk, replace=False):
+ # Override AndroidMixin's install_android_app in order to capture
+ # logcat during the installation. If the installation fails,
+ # the logcat file will be left in the upload directory.
+ self.logcat_start()
+ try:
+ super(Raptor, self).install_android_app(apk, replace=replace)
+ finally:
+ self.logcat_stop()
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ # Use in-tree wptserve for Python 3.10 compatibility
+ extract_dirs = [
+ "tools/wptserve/*",
+ "tools/wpt_third_party/pywebsocket3/*",
+ ]
+ return super(Raptor, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=["common", "condprof", "raptor"]
+ )
+
+ def create_virtualenv(self, **kwargs):
+ """VirtualenvMixin.create_virtualenv() assumes we're using
+ self.config['virtualenv_modules']. Since we're installing
+ raptor from its source, we have to wrap that method here."""
+ # If virtualenv already exists, just add to path and don't re-install.
+ # We need it in-path to import jsonschema later when validating output for perfherder.
+ _virtualenv_path = self.config.get("virtualenv_path")
+
+ if self.clean:
+ rmtree(_virtualenv_path, ignore_errors=True)
+
+ _python_interp = self.query_exe("python")
+ if "win" in self.platform_name() and os.path.exists(_python_interp):
+ multiprocessing.set_executable(_python_interp)
+
+ if self.run_local and os.path.exists(_virtualenv_path):
+ self.info("Virtualenv already exists, skipping creation")
+ # ffmpeg exists outside of this virtual environment so
+ # we re-add it to the platform environment on repeated
+ # local runs of browsertime visual metric tests
+ self.setup_local_ffmpeg()
+
+ if "win" in self.platform_name():
+ _path = os.path.join(_virtualenv_path, "Lib", "site-packages")
+ else:
+ _path = os.path.join(
+ _virtualenv_path,
+ "lib",
+ os.path.basename(_python_interp),
+ "site-packages",
+ )
+
+ sys.path.append(_path)
+ return
+
+ # virtualenv doesn't already exist so create it
+ # Install mozbase first, so we use in-tree versions
+ # Additionally, decide where to pull raptor requirements from.
+ if not self.run_local:
+ mozbase_requirements = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"],
+ "config",
+ "mozbase_requirements.txt",
+ )
+ raptor_requirements = os.path.join(self.raptor_path, "requirements.txt")
+ else:
+ mozbase_requirements = os.path.join(
+ os.path.dirname(self.raptor_path),
+ "config",
+ "mozbase_source_requirements.txt",
+ )
+ raptor_requirements = os.path.join(
+ self.raptor_path, "source_requirements.txt"
+ )
+ self.register_virtualenv_module(
+ requirements=[mozbase_requirements],
+ two_pass=True,
+ editable=True,
+ )
+
+ modules = ["pip>=1.5"]
+
+ # Add modules required for visual metrics
+ py3_minor = sys.version_info.minor
+ if py3_minor <= 7:
+ modules.extend(
+ [
+ "numpy==1.16.1",
+ "Pillow==6.1.0",
+ "scipy==1.2.3",
+ "pyssim==0.4",
+ "opencv-python==4.5.4.60",
+ ]
+ )
+ else: # python version >= 3.8
+ modules.extend(
+ [
+ "numpy==1.22.0",
+ "Pillow==9.0.0",
+ "scipy==1.7.3",
+ "pyssim==0.4",
+ "opencv-python==4.5.4.60",
+ ]
+ )
+
+ if self.run_local:
+ self.setup_local_ffmpeg()
+
+ # Require pip >= 1.5 so pip will prefer .whl files to install
+ super(Raptor, self).create_virtualenv(modules=modules)
+
+ # Install Raptor dependencies
+ self.install_module(requirements=[raptor_requirements])
+
+ def setup_local_ffmpeg(self):
+ """Make use of the users local ffmpeg when running browsertime visual
+ metrics tests.
+ """
+
+ if "ffmpeg" in os.environ["PATH"]:
+ return
+
+ platform = self.platform_name()
+ btime_cache = os.path.join(self.config["mozbuild_path"], "browsertime")
+ if "mac" in platform:
+ path_to_ffmpeg = os.path.join(
+ btime_cache,
+ FFMPEG_LOCAL_CACHE["mac"],
+ )
+ elif "linux" in platform:
+ path_to_ffmpeg = os.path.join(
+ btime_cache,
+ FFMPEG_LOCAL_CACHE["linux"],
+ )
+ elif "win" in platform:
+ path_to_ffmpeg = os.path.join(
+ btime_cache,
+ FFMPEG_LOCAL_CACHE["win"],
+ "bin",
+ )
+
+ if os.path.exists(path_to_ffmpeg):
+ os.environ["PATH"] += os.pathsep + path_to_ffmpeg
+ self.browsertime_ffmpeg = path_to_ffmpeg
+ self.info(
+ "Added local ffmpeg found at: %s to environment." % path_to_ffmpeg
+ )
+ else:
+ raise Exception(
+ "No local ffmpeg binary found. Expected it to be here: %s"
+ % path_to_ffmpeg
+ )
+
+ def install(self):
+ if not self.config.get("noinstall", False):
+ if self.app in self.firefox_android_browsers:
+ self.device.uninstall_app(self.binary_path)
+
+ # Check if the user supplied their own APK, and install
+ # that instead
+ installer_path = pathlib.Path(
+ self.raptor_path, "raptor", "user_upload.apk"
+ )
+ if not installer_path.exists():
+ installer_path = self.installer_path
+
+ self.info(f"Installing APK from: {installer_path}")
+ self.install_android_app(str(installer_path))
+ else:
+ super(Raptor, self).install()
+
+ def _artifact_perf_data(self, src, dest):
+ if not os.path.isdir(os.path.dirname(dest)):
+ # create upload dir if it doesn't already exist
+ self.info("Creating dir: %s" % os.path.dirname(dest))
+ os.makedirs(os.path.dirname(dest))
+ self.info("Copying raptor results from %s to %s" % (src, dest))
+ try:
+ copyfile(src, dest)
+ except Exception as e:
+ self.critical("Error copying results %s to upload dir %s" % (src, dest))
+ self.info(str(e))
+
+ def run_tests(self, args=None, **kw):
+ """Run raptor tests"""
+
+ # Get Raptor options
+ options = self.raptor_options(args=args, **kw)
+
+ # Python version check
+ python = self.query_python_path()
+ self.run_command([python, "--version"])
+ parser = RaptorOutputParser(
+ config=self.config, log_obj=self.log_obj, error_list=RaptorErrorList
+ )
+ env = {}
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.run_local:
+ env["MINIDUMP_STACKWALK"] = self.query_minidump_stackwalk()
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ env = self.query_env(partial_env=env, log_level=INFO)
+ # adjust PYTHONPATH to be able to use raptor as a python package
+ if "PYTHONPATH" in env:
+ env["PYTHONPATH"] = self.raptor_path + os.pathsep + env["PYTHONPATH"]
+ else:
+ env["PYTHONPATH"] = self.raptor_path
+
+ # mitmproxy needs path to mozharness when installing the cert, and tooltool
+ env["SCRIPTSPATH"] = scripts_path
+ env["EXTERNALTOOLSPATH"] = external_tools_path
+
+ # Needed to load unsigned Raptor WebExt on release builds
+ if self.is_release_build:
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+ if self.repo_path is not None:
+ env["MOZ_DEVELOPER_REPO_DIR"] = self.repo_path
+ if self.obj_path is not None:
+ env["MOZ_DEVELOPER_OBJ_DIR"] = self.obj_path
+ if self.mozbuild_path is not None:
+ env["MOZ_MOZBUILD_DIR"] = self.mozbuild_path
+
+ # Sets a timeout for how long Raptor should run without output
+ output_timeout = self.config.get("raptor_output_timeout", 3600)
+ # Run Raptor tests
+ run_tests = os.path.join(self.raptor_path, "raptor", "raptor.py")
+
+ # Dynamically set the log level based on the raptor config for consistency
+ # throughout the test
+ mozlog_opts = [f"--log-tbpl-level={self.config['log_level']}"]
+
+ if not self.run_local and "suite" in self.config:
+ fname_pattern = "%s_%%s.log" % self.config["test"]
+ mozlog_opts.append(
+ "--log-errorsummary=%s"
+ % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "errorsummary")
+ )
+
+ def launch_in_debug_mode(cmdline):
+ cmdline = set(cmdline)
+ debug_opts = {"--debug", "--debugger", "--debugger_args"}
+
+ return bool(debug_opts.intersection(cmdline))
+
+ if self.app in self.android_browsers:
+ self.logcat_start()
+
+ command = [python, run_tests] + options + mozlog_opts
+ if launch_in_debug_mode(command):
+ raptor_process = subprocess.Popen(command, cwd=self.workdir, env=env)
+ raptor_process.wait()
+ else:
+ self.return_code = self.run_command(
+ command,
+ cwd=self.workdir,
+ output_timeout=output_timeout,
+ output_parser=parser,
+ env=env,
+ )
+
+ if self.app in self.android_browsers:
+ self.logcat_stop()
+
+ if parser.minidump_output:
+ self.info("Looking at the minidump files for debugging purposes...")
+ for item in parser.minidump_output:
+ self.run_command(["ls", "-l", item])
+
+ elif not self.run_local:
+ # Copy results to upload dir so they are included as an artifact
+ self.info("Copying Raptor results to upload dir:")
+
+ src = os.path.join(self.query_abs_dirs()["abs_work_dir"], "raptor.json")
+ dest = os.path.join(env["MOZ_UPLOAD_DIR"], "perfherder-data.json")
+ self.info(str(dest))
+ self._artifact_perf_data(src, dest)
+
+ # Make individual perfherder data JSON's for each supporting data type
+ for file in glob.glob(
+ os.path.join(self.query_abs_dirs()["abs_work_dir"], "*")
+ ):
+ path, filename = os.path.split(file)
+
+ if not filename.startswith("raptor-"):
+ continue
+
+ # filename is expected to contain a unique data name
+ # i.e. raptor-os-baseline-power.json would result in
+ # the data name os-baseline-power
+ data_name = "-".join(filename.split("-")[1:])
+ data_name = ".".join(data_name.split(".")[:-1])
+
+ src = file
+ dest = os.path.join(
+ env["MOZ_UPLOAD_DIR"], "perfherder-data-%s.json" % data_name
+ )
+ self._artifact_perf_data(src, dest)
+
+ src = os.path.join(
+ self.query_abs_dirs()["abs_work_dir"], "screenshots.html"
+ )
+ if os.path.exists(src):
+ dest = os.path.join(env["MOZ_UPLOAD_DIR"], "screenshots.html")
+ self.info(str(dest))
+ self._artifact_perf_data(src, dest)
+
+ # Allow log failures to over-ride successful runs of the test harness and
+ # give log failures priority, so that, for instance, log failures resulting
+ # in TBPL_RETRY cause a retry rather than simply reporting an error.
+ if parser.tbpl_status != TBPL_SUCCESS:
+ parser_status = EXIT_STATUS_DICT[parser.tbpl_status]
+ self.info(
+ "return code %s changed to %s due to log output"
+ % (str(self.return_code), str(parser_status))
+ )
+ self.return_code = parser_status
+
+
+class RaptorOutputParser(OutputParser):
+ minidump_regex = re.compile(
+ r'''raptorError: "error executing: '(\S+) (\S+) (\S+)'"'''
+ )
+ RE_PERF_DATA = re.compile(r".*PERFHERDER_DATA:\s+(\{.*\})")
+
+ def __init__(self, **kwargs):
+ super(RaptorOutputParser, self).__init__(**kwargs)
+ self.minidump_output = None
+ self.found_perf_data = []
+ self.tbpl_status = TBPL_SUCCESS
+ self.worst_log_level = INFO
+ self.harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+
+ def parse_single_line(self, line):
+ m = self.minidump_regex.search(line)
+ if m:
+ self.minidump_output = (m.group(1), m.group(2), m.group(3))
+
+ m = self.RE_PERF_DATA.match(line)
+ if m:
+ self.found_perf_data.append(m.group(1))
+
+ if self.harness_retry_re.search(line):
+ self.critical(" %s" % line)
+ self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_RETRY, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+ return # skip base parse_single_line
+ super(RaptorOutputParser, self).parse_single_line(line)
diff --git a/testing/mozharness/mozharness/mozilla/testing/talos.py b/testing/mozharness/mozharness/mozilla/testing/talos.py
new file mode 100755
index 0000000000..b6827cd3d2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -0,0 +1,893 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""
+run talos tests in a virtualenv
+"""
+
+import copy
+import io
+import json
+import multiprocessing
+import os
+import pprint
+import re
+import shutil
+import subprocess
+import sys
+
+import six
+
+import mozharness
+from mozharness.base.config import parse_config_file
+from mozharness.base.errors import PythonErrorList
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, INFO, WARNING, OutputParser
+from mozharness.base.python import Python3Virtualenv
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import (
+ TBPL_FAILURE,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WARNING,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import TinderBoxPrintRe
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+
+TalosErrorList = PythonErrorList + [
+ {"regex": re.compile(r"""run-as: Package '.*' is unknown"""), "level": DEBUG},
+ {"substr": r"""FAIL: Graph server unreachable""", "level": CRITICAL},
+ {"substr": r"""FAIL: Busted:""", "level": CRITICAL},
+ {"substr": r"""FAIL: failed to cleanup""", "level": ERROR},
+ {"substr": r"""erfConfigurator.py: Unknown error""", "level": CRITICAL},
+ {"substr": r"""talosError""", "level": CRITICAL},
+ {
+ "regex": re.compile(r"""No machine_name called '.*' can be found"""),
+ "level": CRITICAL,
+ },
+ {
+ "substr": r"""No such file or directory: 'browser_output.txt'""",
+ "level": CRITICAL,
+ "explanation": "Most likely the browser failed to launch, or the test was otherwise "
+ "unsuccessful in even starting.",
+ },
+]
+
+GeckoProfilerSettings = (
+ "gecko_profile_interval",
+ "gecko_profile_entries",
+ "gecko_profile_features",
+ "gecko_profile_threads",
+)
+
+# TODO: check for running processes on script invocation
+
+
+class TalosOutputParser(OutputParser):
+ minidump_regex = re.compile(
+ r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"'''
+ )
+ RE_PERF_DATA = re.compile(r".*PERFHERDER_DATA:\s+(\{.*\})")
+ worst_tbpl_status = TBPL_SUCCESS
+
+ def __init__(self, **kwargs):
+ super(TalosOutputParser, self).__init__(**kwargs)
+ self.minidump_output = None
+ self.found_perf_data = []
+
+ def update_worst_log_and_tbpl_levels(self, log_level, tbpl_level):
+ self.worst_log_level = self.worst_level(log_level, self.worst_log_level)
+ self.worst_tbpl_status = self.worst_level(
+ tbpl_level, self.worst_tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ def parse_single_line(self, line):
+ """In Talos land, every line that starts with RETURN: needs to be
+ printed with a TinderboxPrint:"""
+ if line.startswith("RETURN:"):
+ line.replace("RETURN:", "TinderboxPrint:")
+ m = self.minidump_regex.search(line)
+ if m:
+ self.minidump_output = (m.group(1), m.group(2), m.group(3))
+
+ m = self.RE_PERF_DATA.match(line)
+ if m:
+ self.found_perf_data.append(m.group(1))
+
+ # now let's check if we should retry
+ harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+ if harness_retry_re.search(line):
+ self.critical(" %s" % line)
+ self.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_RETRY)
+ return # skip base parse_single_line
+ super(TalosOutputParser, self).parse_single_line(line)
+
+
+class Talos(
+ TestingMixin, MercurialScript, TooltoolMixin, Python3Virtualenv, CodeCoverageMixin
+):
+ """
+ install and run Talos tests
+ """
+
+ config_options = (
+ [
+ [
+ ["--use-talos-json"],
+ {
+ "action": "store_true",
+ "dest": "use_talos_json",
+ "default": False,
+ "help": "Use talos config from talos.json",
+ },
+ ],
+ [
+ ["--suite"],
+ {
+ "action": "store",
+ "dest": "suite",
+ "help": "Talos suite to run (from talos json)",
+ },
+ ],
+ [
+ ["--system-bits"],
+ {
+ "action": "store",
+ "dest": "system_bits",
+ "type": "choice",
+ "default": "32",
+ "choices": ["32", "64"],
+ "help": "Testing 32 or 64 (for talos json plugins)",
+ },
+ ],
+ [
+ ["--add-option"],
+ {
+ "action": "extend",
+ "dest": "talos_extra_options",
+ "default": None,
+ "help": "extra options to talos",
+ },
+ ],
+ [
+ ["--gecko-profile"],
+ {
+ "dest": "gecko_profile",
+ "action": "store_true",
+ "default": False,
+ "help": "Whether or not to profile the test run and save the profile results",
+ },
+ ],
+ [
+ ["--gecko-profile-interval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": "int",
+ "help": "The interval between samples taken by the profiler (milliseconds)",
+ },
+ ],
+ [
+ ["--gecko-profile-entries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": "int",
+ "help": "How many samples to take with the profiler",
+ },
+ ],
+ [
+ ["--gecko-profile-features"],
+ {
+ "dest": "gecko_profile_features",
+ "type": "str",
+ "default": None,
+ "help": "The features to enable in the profiler (comma-separated)",
+ },
+ ],
+ [
+ ["--gecko-profile-threads"],
+ {
+ "dest": "gecko_profile_threads",
+ "type": "str",
+ "help": "Comma-separated list of threads to sample.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Run without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_false",
+ "dest": "fission",
+ "default": True,
+ "help": "Disable Fission (site isolation) in Gecko.",
+ },
+ ],
+ [
+ ["--project"],
+ {
+ "dest": "project",
+ "type": "str",
+ "help": "The project branch we're running tests on. Used for "
+ "disabling/skipping tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Set a browser preference. May be used multiple times.",
+ },
+ ],
+ [
+ ["--skip-preflight"],
+ {
+ "action": "store_true",
+ "dest": "skip_preflight",
+ "default": False,
+ "help": "skip preflight commands to prepare machine.",
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(Talos, self).__init__(**kwargs)
+
+ self.workdir = self.query_abs_dirs()["abs_work_dir"] # convenience
+
+ self.run_local = self.config.get("run_local")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.talos_json_url = self.config.get("talos_json_url")
+ self.talos_json = self.config.get("talos_json")
+ self.talos_json_config = self.config.get("talos_json_config")
+ self.repo_path = self.config.get("repo_path")
+ self.obj_path = self.config.get("obj_path")
+ self.tests = None
+ extra_opts = self.config.get("talos_extra_options", [])
+ self.gecko_profile = (
+ self.config.get("gecko_profile") or "--gecko-profile" in extra_opts
+ )
+ for setting in GeckoProfilerSettings:
+ value = self.config.get(setting)
+ arg = "--" + setting.replace("_", "-")
+ if value is None:
+ try:
+ value = extra_opts[extra_opts.index(arg) + 1]
+ except ValueError:
+ pass # Not found
+ if value is not None:
+ setattr(self, setting, value)
+ if not self.gecko_profile:
+ self.warning("enabling Gecko profiler for %s setting!" % setting)
+ self.gecko_profile = True
+ self.pagesets_name = None
+ self.benchmark_zip = None
+ self.webextensions_zip = None
+
+ # We accept some configuration options from the try commit message in the format
+ # mozharness: <options>
+ # Example try commit message:
+ # mozharness: --gecko-profile try: <stuff>
+ def query_gecko_profile_options(self):
+ gecko_results = []
+ # finally, if gecko_profile is set, we add that to the talos options
+ if self.gecko_profile:
+ gecko_results.append("--gecko-profile")
+ for setting in GeckoProfilerSettings:
+ value = getattr(self, setting, None)
+ if value:
+ arg = "--" + setting.replace("_", "-")
+ gecko_results.extend([arg, str(value)])
+ return gecko_results
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Talos, self).query_abs_dirs()
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ abs_dirs["abs_test_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests"
+ )
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def query_talos_json_config(self):
+ """Return the talos json config."""
+ if self.talos_json_config:
+ return self.talos_json_config
+ if not self.talos_json:
+ self.talos_json = os.path.join(self.talos_path, "talos.json")
+ self.talos_json_config = parse_config_file(self.talos_json)
+ self.info(pprint.pformat(self.talos_json_config))
+ return self.talos_json_config
+
+ def make_talos_domain(self, host):
+ return host + "-talos"
+
+ def split_path(self, path):
+ result = []
+ while True:
+ path, folder = os.path.split(path)
+ if folder:
+ result.append(folder)
+ continue
+ elif path:
+ result.append(path)
+ break
+
+ result.reverse()
+ return result
+
+ def merge_paths(self, lhs, rhs):
+ backtracks = 0
+ for subdir in rhs:
+ if subdir == "..":
+ backtracks += 1
+ else:
+ break
+ return lhs[:-backtracks] + rhs[backtracks:]
+
+ def replace_relative_iframe_paths(self, directory, filename):
+ """This will find iframes with relative paths and replace them with
+ absolute paths containing domains derived from the original source's
+ domain. This helps us better simulate real-world cases for fission
+ """
+ if not filename.endswith(".html"):
+ return
+
+ directory_pieces = self.split_path(directory)
+ while directory_pieces and directory_pieces[0] != "fis":
+ directory_pieces = directory_pieces[1:]
+ path = os.path.join(directory, filename)
+
+ # XXX: ugh, is there a better way to account for multiple encodings than just
+ # trying each of them?
+ encodings = ["utf-8", "latin-1"]
+ iframe_pattern = re.compile(r'(iframe.*")(\.\./.*\.html)"')
+ for encoding in encodings:
+ try:
+ with io.open(path, "r", encoding=encoding) as f:
+ content = f.read()
+
+ def replace_iframe_src(match):
+ src = match.group(2)
+ split = self.split_path(src)
+ merged = self.merge_paths(directory_pieces, split)
+ host = merged[3]
+ site_origin_hash = self.make_talos_domain(host)
+ new_url = 'http://%s/%s"' % (
+ site_origin_hash,
+ "/".join(merged), # pylint --py3k: W1649
+ )
+ self.info(
+ "Replacing %s with %s in iframe inside %s"
+ % (match.group(2), new_url, path)
+ )
+ return match.group(1) + new_url
+
+ content = re.sub(iframe_pattern, replace_iframe_src, content)
+ with io.open(path, "w", encoding=encoding) as f:
+ f.write(content)
+ break
+ except UnicodeDecodeError:
+ pass
+
+ def query_pagesets_name(self):
+ """Certain suites require external pagesets to be downloaded and
+ extracted.
+ """
+ if self.pagesets_name:
+ return self.pagesets_name
+ if self.query_talos_json_config() and self.suite is not None:
+ self.pagesets_name = self.talos_json_config["suites"][self.suite].get(
+ "pagesets_name"
+ )
+ self.pagesets_name_manifest = "tp5n-pageset.manifest"
+ return self.pagesets_name
+
+ def query_benchmark_zip(self):
+ """Certain suites require external benchmarks to be downloaded and
+ extracted.
+ """
+ if self.benchmark_zip:
+ return self.benchmark_zip
+ if self.query_talos_json_config() and self.suite is not None:
+ self.benchmark_zip = self.talos_json_config["suites"][self.suite].get(
+ "benchmark_zip"
+ )
+ self.benchmark_zip_manifest = "jetstream-benchmark.manifest"
+ return self.benchmark_zip
+
+ def query_webextensions_zip(self):
+ """Certain suites require external WebExtension sets to be downloaded and
+ extracted.
+ """
+ if self.webextensions_zip:
+ return self.webextensions_zip
+ if self.query_talos_json_config() and self.suite is not None:
+ self.webextensions_zip = self.talos_json_config["suites"][self.suite].get(
+ "webextensions_zip"
+ )
+ self.webextensions_zip_manifest = "webextensions.manifest"
+ return self.webextensions_zip
+
+ def get_suite_from_test(self):
+ """Retrieve the talos suite name from a given talos test name."""
+ # running locally, single test name provided instead of suite; go through tests and
+ # find suite name
+ suite_name = None
+ if self.query_talos_json_config():
+ if "-a" in self.config["talos_extra_options"]:
+ test_name_index = self.config["talos_extra_options"].index("-a") + 1
+ if "--activeTests" in self.config["talos_extra_options"]:
+ test_name_index = (
+ self.config["talos_extra_options"].index("--activeTests") + 1
+ )
+ if test_name_index < len(self.config["talos_extra_options"]):
+ test_name = self.config["talos_extra_options"][test_name_index]
+ for talos_suite in self.talos_json_config["suites"]:
+ if test_name in self.talos_json_config["suites"][talos_suite].get(
+ "tests"
+ ):
+ suite_name = talos_suite
+ if not suite_name:
+ # no suite found to contain the specified test, error out
+ self.fatal("Test name is missing or invalid")
+ else:
+ self.fatal("Talos json config not found, cannot verify suite")
+ return suite_name
+
+ def query_suite_extra_prefs(self):
+ if self.query_talos_json_config() and self.suite is not None:
+ return self.talos_json_config["suites"][self.suite].get("extra_prefs", [])
+
+ return []
+
+ def validate_suite(self):
+ """Ensure suite name is a valid talos suite."""
+ if self.query_talos_json_config() and self.suite is not None:
+ if self.suite not in self.talos_json_config.get("suites"):
+ self.fatal(
+ "Suite '%s' is not valid (not found in talos json config)"
+ % self.suite
+ )
+
+ def talos_options(self, args=None, **kw):
+ """return options to talos"""
+ # binary path
+ binary_path = self.binary_path or self.config.get("binary_path")
+ if not binary_path:
+ msg = """Talos requires a path to the binary. You can specify binary_path or add
+ download-and-extract to your action list."""
+ self.fatal(msg)
+
+ # talos options
+ options = []
+ # talos can't gather data if the process name ends with '.exe'
+ if binary_path.endswith(".exe"):
+ binary_path = binary_path[:-4]
+ # options overwritten from **kw
+ kw_options = {"executablePath": binary_path}
+ if "suite" in self.config:
+ kw_options["suite"] = self.config["suite"]
+ if self.config.get("title"):
+ kw_options["title"] = self.config["title"]
+ if self.symbols_path:
+ kw_options["symbolsPath"] = self.symbols_path
+ if self.config.get("project", None):
+ kw_options["project"] = self.config["project"]
+
+ kw_options.update(kw)
+ # talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
+ tests = kw_options.get("activeTests")
+ if tests and not isinstance(tests, six.string_types):
+ tests = ":".join(tests) # Talos expects this format
+ kw_options["activeTests"] = tests
+ for key, value in kw_options.items():
+ options.extend(["--%s" % key, value])
+ # configure profiling options
+ options.extend(self.query_gecko_profile_options())
+ # extra arguments
+ if args is not None:
+ options += args
+ if "talos_extra_options" in self.config:
+ options += self.config["talos_extra_options"]
+ if self.config.get("code_coverage", False):
+ options.extend(["--code-coverage"])
+
+ # Add extra_prefs defined by individual test suites in talos.json
+ extra_prefs = self.query_suite_extra_prefs()
+ # Add extra_prefs from the configuration
+ if self.config["extra_prefs"]:
+ extra_prefs.extend(self.config["extra_prefs"])
+
+ options.extend(["--setpref={}".format(p) for p in extra_prefs])
+
+ # disabling fission can come from the --disable-fission cmd line argument; or in CI
+ # it comes from a taskcluster transform which adds a --setpref for fission.autostart
+ if (not self.config["fission"]) or "fission.autostart=false" in self.config[
+ "extra_prefs"
+ ]:
+ options.extend(["--disable-fission"])
+
+ return options
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.talos_path = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"], "talos"
+ )
+
+ # need to determine if talos pageset is required to be downloaded
+ if self.config.get("run_local") and "talos_extra_options" in self.config:
+ # talos initiated locally, get and verify test/suite from cmd line
+ self.talos_path = os.path.dirname(self.talos_json)
+ if (
+ "-a" in self.config["talos_extra_options"]
+ or "--activeTests" in self.config["talos_extra_options"]
+ ):
+ # test name (-a or --activeTests) specified, find out what suite it is a part of
+ self.suite = self.get_suite_from_test()
+ elif "--suite" in self.config["talos_extra_options"]:
+ # --suite specified, get suite from cmd line and ensure is valid
+ suite_name_index = (
+ self.config["talos_extra_options"].index("--suite") + 1
+ )
+ if suite_name_index < len(self.config["talos_extra_options"]):
+ self.suite = self.config["talos_extra_options"][suite_name_index]
+ self.validate_suite()
+ else:
+ self.fatal("Suite name not provided")
+ else:
+ # talos initiated in production via mozharness
+ self.suite = self.config["suite"]
+
+ tooltool_artifacts = []
+ src_talos_pageset_dest = os.path.join(self.talos_path, "talos", "tests")
+ # unfortunately this path has to be short and can't be descriptive, because
+ # on Windows we tend to already push the boundaries of the max path length
+ # constraint. This will contain the tp5 pageset, but adjusted to have
+ # absolute URLs on iframes for the purposes of better modeling things for
+ # fission.
+ src_talos_pageset_multidomain_dest = os.path.join(
+ self.talos_path, "talos", "fis"
+ )
+ webextension_dest = os.path.join(self.talos_path, "talos", "webextensions")
+
+ if self.query_pagesets_name():
+ tooltool_artifacts.append(
+ {
+ "name": self.pagesets_name,
+ "manifest": self.pagesets_name_manifest,
+ "dest": src_talos_pageset_dest,
+ }
+ )
+ tooltool_artifacts.append(
+ {
+ "name": self.pagesets_name,
+ "manifest": self.pagesets_name_manifest,
+ "dest": src_talos_pageset_multidomain_dest,
+ "postprocess": self.replace_relative_iframe_paths,
+ }
+ )
+
+ if self.query_benchmark_zip():
+ tooltool_artifacts.append(
+ {
+ "name": self.benchmark_zip,
+ "manifest": self.benchmark_zip_manifest,
+ "dest": src_talos_pageset_dest,
+ }
+ )
+
+ if self.query_webextensions_zip():
+ tooltool_artifacts.append(
+ {
+ "name": self.webextensions_zip,
+ "manifest": self.webextensions_zip_manifest,
+ "dest": webextension_dest,
+ }
+ )
+
+ # now that have the suite name, check if artifact is required, if so download it
+ # the --no-download option will override this
+ for artifact in tooltool_artifacts:
+ if "--no-download" not in self.config.get("talos_extra_options", []):
+ self.info("Downloading %s with tooltool..." % artifact)
+
+ archive = os.path.join(artifact["dest"], artifact["name"])
+ output_dir_path = re.sub(r"\.zip$", "", archive)
+ if not os.path.exists(archive):
+ manifest_file = os.path.join(self.talos_path, artifact["manifest"])
+ self.tooltool_fetch(
+ manifest_file,
+ output_dir=artifact["dest"],
+ cache=self.config.get("tooltool_cache"),
+ )
+ unzip = self.query_exe("unzip")
+ unzip_cmd = [unzip, "-q", "-o", archive, "-d", artifact["dest"]]
+ self.run_command(unzip_cmd, halt_on_failure=True)
+
+ if "postprocess" in artifact:
+ for subdir, dirs, files in os.walk(output_dir_path):
+ for file in files:
+ artifact["postprocess"](subdir, file)
+ else:
+ self.info("%s already available" % artifact)
+
+ else:
+ self.info(
+ "Not downloading %s because the no-download option was specified"
+ % artifact
+ )
+
+ # if running webkit tests locally, need to copy webkit source into talos/tests
+ if self.config.get("run_local") and (
+ "stylebench" in self.suite or "motionmark" in self.suite
+ ):
+ self.get_webkit_source()
+
+ def get_webkit_source(self):
+ # in production the build system auto copies webkit source into place;
+ # but when run locally we need to do this manually, so that talos can find it
+ src = os.path.join(self.repo_path, "third_party", "webkit", "PerformanceTests")
+ dest = os.path.join(
+ self.talos_path, "talos", "tests", "webkit", "PerformanceTests"
+ )
+
+ if os.path.exists(dest):
+ shutil.rmtree(dest)
+
+ self.info("Copying webkit benchmarks from %s to %s" % (src, dest))
+ try:
+ shutil.copytree(src, dest)
+ except Exception:
+ self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
+
+ # Action methods. {{{1
+ # clobber defined in BaseScript
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ # Use in-tree wptserve for Python 3.10 compatibility
+ extract_dirs = [
+ "tools/wptserve/*",
+ "tools/wpt_third_party/pywebsocket3/*",
+ ]
+ return super(Talos, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=["common", "talos"]
+ )
+
+ def create_virtualenv(self, **kwargs):
+ """VirtualenvMixin.create_virtualenv() assuemes we're using
+ self.config['virtualenv_modules']. Since we are installing
+ talos from its source, we have to wrap that method here."""
+ # if virtualenv already exists, just add to path and don't re-install, need it
+ # in path so can import jsonschema later when validating output for perfherder
+ _virtualenv_path = self.config.get("virtualenv_path")
+
+ _python_interp = self.query_exe("python")
+ if "win" in self.platform_name() and os.path.exists(_python_interp):
+ multiprocessing.set_executable(_python_interp)
+
+ if self.run_local and os.path.exists(_virtualenv_path):
+ self.info("Virtualenv already exists, skipping creation")
+
+ if "win" in self.platform_name():
+ _path = os.path.join(_virtualenv_path, "Lib", "site-packages")
+ else:
+ _path = os.path.join(
+ _virtualenv_path,
+ "lib",
+ os.path.basename(_python_interp),
+ "site-packages",
+ )
+
+ sys.path.append(_path)
+ return
+
+ # virtualenv doesn't already exist so create it
+ # install mozbase first, so we use in-tree versions
+ # Additionally, decide where to pull talos requirements from.
+ if not self.run_local:
+ mozbase_requirements = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"],
+ "config",
+ "mozbase_requirements.txt",
+ )
+ talos_requirements = os.path.join(self.talos_path, "requirements.txt")
+ else:
+ mozbase_requirements = os.path.join(
+ os.path.dirname(self.talos_path),
+ "config",
+ "mozbase_source_requirements.txt",
+ )
+ talos_requirements = os.path.join(
+ self.talos_path, "source_requirements.txt"
+ )
+ self.register_virtualenv_module(
+ requirements=[mozbase_requirements],
+ two_pass=True,
+ editable=True,
+ )
+ super(Talos, self).create_virtualenv()
+ # talos in harness requires what else is
+ # listed in talos requirements.txt file.
+ self.install_module(requirements=[talos_requirements])
+
+ def _validate_treeherder_data(self, parser):
+ # late import is required, because install is done in create_virtualenv
+ import jsonschema
+
+ if len(parser.found_perf_data) != 1:
+ self.critical(
+ "PERFHERDER_DATA was seen %d times, expected 1."
+ % len(parser.found_perf_data)
+ )
+ parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
+ return
+
+ schema_path = os.path.join(
+ external_tools_path, "performance-artifact-schema.json"
+ )
+ self.info("Validating PERFHERDER_DATA against %s" % schema_path)
+ try:
+ with open(schema_path) as f:
+ schema = json.load(f)
+ data = json.loads(parser.found_perf_data[0])
+ jsonschema.validate(data, schema)
+ except Exception:
+ self.exception("Error while validating PERFHERDER_DATA")
+ parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
+
+ def _artifact_perf_data(self, parser, dest):
+ src = os.path.join(self.query_abs_dirs()["abs_work_dir"], "local.json")
+ try:
+ shutil.copyfile(src, dest)
+ except Exception:
+ self.critical("Error copying results %s to upload dir %s" % (src, dest))
+ parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
+
+ def run_tests(self, args=None, **kw):
+ """run Talos tests"""
+
+ # get talos options
+ options = self.talos_options(args=args, **kw)
+
+ # XXX temporary python version check
+ python = self.query_python_path()
+ self.run_command([python, "--version"])
+ parser = TalosOutputParser(
+ config=self.config, log_obj=self.log_obj, error_list=TalosErrorList
+ )
+ env = {}
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.run_local:
+ env["MINIDUMP_STACKWALK"] = self.query_minidump_stackwalk()
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ env = self.query_env(partial_env=env, log_level=INFO)
+ # adjust PYTHONPATH to be able to use talos as a python package
+ if "PYTHONPATH" in env:
+ env["PYTHONPATH"] = self.talos_path + os.pathsep + env["PYTHONPATH"]
+ else:
+ env["PYTHONPATH"] = self.talos_path
+
+ if self.repo_path is not None:
+ env["MOZ_DEVELOPER_REPO_DIR"] = self.repo_path
+ if self.obj_path is not None:
+ env["MOZ_DEVELOPER_OBJ_DIR"] = self.obj_path
+
+ # sets a timeout for how long talos should run without output
+ output_timeout = self.config.get("talos_output_timeout", 3600)
+ # run talos tests
+ run_tests = os.path.join(self.talos_path, "talos", "run_tests.py")
+
+ # Dynamically set the log level based on the talos config for consistency
+ # throughout the test
+ mozlog_opts = [f"--log-tbpl-level={self.config['log_level']}"]
+
+ if not self.run_local and "suite" in self.config:
+ fname_pattern = "%s_%%s.log" % self.config["suite"]
+ mozlog_opts.append(
+ "--log-errorsummary=%s"
+ % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "errorsummary")
+ )
+
+ def launch_in_debug_mode(cmdline):
+ cmdline = set(cmdline)
+ debug_opts = {"--debug", "--debugger", "--debugger_args"}
+
+ return bool(debug_opts.intersection(cmdline))
+
+ command = [python, run_tests] + options + mozlog_opts
+ if launch_in_debug_mode(command):
+ talos_process = subprocess.Popen(
+ command, cwd=self.workdir, env=env, bufsize=0
+ )
+ talos_process.wait()
+ else:
+ self.return_code = self.run_command(
+ command,
+ cwd=self.workdir,
+ output_timeout=output_timeout,
+ output_parser=parser,
+ env=env,
+ )
+ if parser.minidump_output:
+ self.info("Looking at the minidump files for debugging purposes...")
+ for item in parser.minidump_output:
+ self.run_command(["ls", "-l", item])
+
+ if self.return_code not in [0]:
+ # update the worst log level and tbpl status
+ log_level = ERROR
+ tbpl_level = TBPL_FAILURE
+ if self.return_code == 1:
+ log_level = WARNING
+ tbpl_level = TBPL_WARNING
+ if self.return_code == 4:
+ log_level = WARNING
+ tbpl_level = TBPL_RETRY
+
+ parser.update_worst_log_and_tbpl_levels(log_level, tbpl_level)
+ elif "--no-upload-results" not in options:
+ if not self.gecko_profile:
+ self._validate_treeherder_data(parser)
+ if not self.run_local:
+ # copy results to upload dir so they are included as an artifact
+ dest = os.path.join(env["MOZ_UPLOAD_DIR"], "perfherder-data.json")
+ self._artifact_perf_data(parser, dest)
+
+ self.record_status(parser.worst_tbpl_status, level=parser.worst_log_level)
diff --git a/testing/mozharness/mozharness/mozilla/testing/testbase.py b/testing/mozharness/mozharness/mozilla/testing/testbase.py
new file mode 100755
index 0000000000..e8f37ceb8b
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/testbase.py
@@ -0,0 +1,767 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import json
+import os
+import platform
+import ssl
+
+from six.moves import urllib
+from six.moves.urllib.parse import ParseResult, urlparse
+
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import FATAL, WARNING
+from mozharness.base.python import (
+ ResourceMonitoringMixin,
+ VirtualenvMixin,
+ virtualenv_config_options,
+)
+from mozharness.lib.python.authentication import get_credentials
+from mozharness.mozilla.automation import TBPL_WARNING, AutomationMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.try_tools import TryToolsMixin, try_config_options
+from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
+from mozharness.mozilla.testing.verify_tools import (
+ VerifyToolsMixin,
+ verify_config_options,
+)
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+INSTALLER_SUFFIXES = (
+ ".apk", # Android
+ ".tar.bz2",
+ ".tar.gz", # Linux
+ ".dmg", # Mac
+ ".installer-stub.exe",
+ ".installer.exe",
+ ".exe",
+ ".zip", # Windows
+)
+
+# https://searchfox.org/mozilla-central/source/testing/config/tooltool-manifests
+TOOLTOOL_PLATFORM_DIR = {
+ "linux": "linux32",
+ "linux64": "linux64",
+ "win32": "win32",
+ "win64": "win32",
+ "macosx": "macosx64",
+}
+
+
+testing_config_options = (
+ [
+ [
+ ["--installer-url"],
+ {
+ "action": "store",
+ "dest": "installer_url",
+ "default": None,
+ "help": "URL to the installer to install",
+ },
+ ],
+ [
+ ["--installer-path"],
+ {
+ "action": "store",
+ "dest": "installer_path",
+ "default": None,
+ "help": "Path to the installer to install. "
+ "This is set automatically if run with --download-and-extract.",
+ },
+ ],
+ [
+ ["--binary-path"],
+ {
+ "action": "store",
+ "dest": "binary_path",
+ "default": None,
+ "help": "Path to installed binary. This is set automatically if run with --install.", # NOQA: E501
+ },
+ ],
+ [
+ ["--exe-suffix"],
+ {
+ "action": "store",
+ "dest": "exe_suffix",
+ "default": None,
+ "help": "Executable suffix for binaries on this platform",
+ },
+ ],
+ [
+ ["--test-url"],
+ {
+ "action": "store",
+ "dest": "test_url",
+ "default": None,
+ "help": "URL to the zip file containing the actual tests",
+ },
+ ],
+ [
+ ["--test-packages-url"],
+ {
+ "action": "store",
+ "dest": "test_packages_url",
+ "default": None,
+ "help": "URL to a json file describing which tests archives to download",
+ },
+ ],
+ [
+ ["--jsshell-url"],
+ {
+ "action": "store",
+ "dest": "jsshell_url",
+ "default": None,
+ "help": "URL to the jsshell to install",
+ },
+ ],
+ [
+ ["--download-symbols"],
+ {
+ "action": "store",
+ "dest": "download_symbols",
+ "type": "choice",
+ "choices": ["ondemand", "true"],
+ "help": "Download and extract crash reporter symbols.",
+ },
+ ],
+ ]
+ + copy.deepcopy(virtualenv_config_options)
+ + copy.deepcopy(try_config_options)
+ + copy.deepcopy(verify_config_options)
+)
+
+
+# TestingMixin {{{1
+class TestingMixin(
+ VirtualenvMixin,
+ AutomationMixin,
+ ResourceMonitoringMixin,
+ TooltoolMixin,
+ TryToolsMixin,
+ VerifyToolsMixin,
+):
+ """
+ The steps to identify + download the proper bits for [browser] unit
+ tests and Talos.
+ """
+
+ installer_url = None
+ installer_path = None
+ binary_path = None
+ test_url = None
+ test_packages_url = None
+ symbols_url = None
+ symbols_path = None
+ jsshell_url = None
+ minidump_stackwalk_path = None
+ ssl_context = None
+
+ def query_build_dir_url(self, file_name):
+ """
+ Resolve a file name to a potential url in the build upload directory where
+ that file can be found.
+ """
+ if self.test_packages_url:
+ reference_url = self.test_packages_url
+ elif self.installer_url:
+ reference_url = self.installer_url
+ else:
+ self.fatal(
+ "Can't figure out build directory urls without an installer_url "
+ "or test_packages_url!"
+ )
+
+ reference_url = urllib.parse.unquote(reference_url)
+ parts = list(urlparse(reference_url))
+
+ last_slash = parts[2].rfind("/")
+ parts[2] = "/".join([parts[2][:last_slash], file_name])
+
+ url = ParseResult(*parts).geturl()
+
+ return url
+
+ def query_prefixed_build_dir_url(self, suffix):
+ """Resolve a file name prefixed with platform and build details to a potential url
+ in the build upload directory where that file can be found.
+ """
+ if self.test_packages_url:
+ reference_suffixes = [".test_packages.json"]
+ reference_url = self.test_packages_url
+ elif self.installer_url:
+ reference_suffixes = INSTALLER_SUFFIXES
+ reference_url = self.installer_url
+ else:
+ self.fatal(
+ "Can't figure out build directory urls without an installer_url "
+ "or test_packages_url!"
+ )
+
+ url = None
+ for reference_suffix in reference_suffixes:
+ if reference_url.endswith(reference_suffix):
+ url = reference_url[: -len(reference_suffix)] + suffix
+ break
+
+ return url
+
+ def query_symbols_url(self, raise_on_failure=False):
+ if self.symbols_url:
+ return self.symbols_url
+
+ elif self.installer_url:
+ symbols_url = self.query_prefixed_build_dir_url(
+ ".crashreporter-symbols.zip"
+ )
+
+ # Check if the URL exists. If not, use none to allow mozcrash to auto-check for symbols
+ try:
+ if symbols_url:
+ self._urlopen(symbols_url, timeout=120)
+ self.symbols_url = symbols_url
+ except Exception as ex:
+ self.warning(
+ "Cannot open symbols url %s (installer url: %s): %s"
+ % (symbols_url, self.installer_url, ex)
+ )
+ if raise_on_failure:
+ raise
+
+ # If no symbols URL can be determined let minidump-stackwalk query the symbols.
+ # As of now this only works for Nightly and release builds.
+ if not self.symbols_url:
+ self.warning(
+ "No symbols_url found. Let minidump-stackwalk query for symbols."
+ )
+
+ return self.symbols_url
+
+ def _pre_config_lock(self, rw_config):
+ for i, (target_file, target_dict) in enumerate(
+ rw_config.all_cfg_files_and_dicts
+ ):
+ if "developer_config" in target_file:
+ self._developer_mode_changes(rw_config)
+
+ def _developer_mode_changes(self, rw_config):
+ """This function is called when you append the config called
+ developer_config.py. This allows you to run a job
+ outside of the Release Engineering infrastructure.
+
+ What this functions accomplishes is:
+ * --installer-url is set
+ * --test-url is set if needed
+ * every url is substituted by another external to the
+ Release Engineering network
+ """
+ c = self.config
+ orig_config = copy.deepcopy(c)
+ self.actions = tuple(rw_config.actions)
+
+ def _replace_url(url, changes):
+ for from_, to_ in changes:
+ if url.startswith(from_):
+ new_url = url.replace(from_, to_)
+ self.info("Replacing url %s -> %s" % (url, new_url))
+ return new_url
+ return url
+
+ if c.get("installer_url") is None:
+ self.exception("You must use --installer-url with developer_config.py")
+ if c.get("require_test_zip"):
+ if not c.get("test_url") and not c.get("test_packages_url"):
+ self.exception(
+ "You must use --test-url or --test-packages-url with "
+ "developer_config.py"
+ )
+
+ c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
+ if c.get("test_url"):
+ c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
+ if c.get("test_packages_url"):
+ c["test_packages_url"] = _replace_url(
+ c["test_packages_url"], c["replace_urls"]
+ )
+
+ for key, value in self.config.items():
+ if type(value) == str and value.startswith("http"):
+ self.config[key] = _replace_url(value, c["replace_urls"])
+
+ # Any changes to c means that we need credentials
+ if not c == orig_config:
+ get_credentials()
+
+ def _urlopen(self, url, **kwargs):
+ """
+ This function helps dealing with downloading files while outside
+ of the releng network.
+ """
+ # Code based on http://code.activestate.com/recipes/305288-http-basic-authentication
+ def _urlopen_basic_auth(url, **kwargs):
+ self.info("We want to download this file %s" % url)
+ if not hasattr(self, "https_username"):
+ self.info(
+ "NOTICE: Files downloaded from outside of "
+ "Release Engineering network require LDAP "
+ "credentials."
+ )
+
+ self.https_username, self.https_password = get_credentials()
+ # This creates a password manager
+ passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
+ # Because we have put None at the start it will use this username/password
+ # combination from here on
+ passman.add_password(None, url, self.https_username, self.https_password)
+ authhandler = urllib.request.HTTPBasicAuthHandler(passman)
+
+ return urllib.request.build_opener(authhandler).open(url, **kwargs)
+
+ # If we have the developer_run flag enabled then we will switch
+ # URLs to the right place and enable http authentication
+ if "developer_config.py" in self.config["config_files"]:
+ return _urlopen_basic_auth(url, **kwargs)
+ else:
+ # windows certificates need to be refreshed (https://bugs.python.org/issue36011)
+ if self.platform_name() in ("win64",) and platform.architecture()[0] in (
+ "x64",
+ ):
+ if self.ssl_context is None:
+ self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+ self.ssl_context.load_default_certs()
+ return urllib.request.urlopen(url, context=self.ssl_context, **kwargs)
+ else:
+ return urllib.request.urlopen(url, **kwargs)
+
+ def _query_binary_version(self, regex, cmd):
+ output = self.get_output_from_command(cmd, silent=False)
+ return regex.search(output).group(0)
+
+ def preflight_download_and_extract(self):
+ message = ""
+ if not self.installer_url:
+ message += """installer_url isn't set!
+
+You can set this by specifying --installer-url URL
+"""
+ if (
+ self.config.get("require_test_zip")
+ and not self.test_url
+ and not self.test_packages_url
+ ):
+ message += """test_url isn't set!
+
+You can set this by specifying --test-url URL
+"""
+ if message:
+ self.fatal(message + "Can't run download-and-extract... exiting")
+
+ def _read_packages_manifest(self):
+ dirs = self.query_abs_dirs()
+ source = self.download_file(
+ self.test_packages_url, parent_dir=dirs["abs_work_dir"], error_level=FATAL
+ )
+
+ with self.opened(os.path.realpath(source)) as (fh, err):
+ package_requirements = json.load(fh)
+ if not package_requirements or err:
+ self.fatal(
+ "There was an error reading test package requirements from %s "
+ "requirements: `%s` - error: `%s`"
+ % (source, package_requirements or "None", err or "No error")
+ )
+ return package_requirements
+
+ def _download_test_packages(self, suite_categories, extract_dirs):
+ # Some platforms define more suite categories/names than others.
+ # This is a difference in the convention of the configs more than
+ # to how these tests are run, so we pave over these differences here.
+ aliases = {
+ "mochitest-chrome": "mochitest",
+ "mochitest-media": "mochitest",
+ "mochitest-plain": "mochitest",
+ "mochitest-plain-gpu": "mochitest",
+ "mochitest-webgl1-core": "mochitest",
+ "mochitest-webgl1-ext": "mochitest",
+ "mochitest-webgl2-core": "mochitest",
+ "mochitest-webgl2-ext": "mochitest",
+ "mochitest-webgl2-deqp": "mochitest",
+ "mochitest-webgpu": "mochitest",
+ "geckoview": "mochitest",
+ "geckoview-junit": "mochitest",
+ "reftest-qr": "reftest",
+ "crashtest": "reftest",
+ "crashtest-qr": "reftest",
+ "reftest-debug": "reftest",
+ "crashtest-debug": "reftest",
+ }
+ suite_categories = [aliases.get(name, name) for name in suite_categories]
+
+ dirs = self.query_abs_dirs()
+ test_install_dir = dirs.get(
+ "abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests")
+ )
+ self.mkdir_p(test_install_dir)
+ package_requirements = self._read_packages_manifest()
+ target_packages = []
+ c = self.config
+ for category in suite_categories:
+ specified_suites = c.get("specified_{}_suites".format(category))
+ if specified_suites:
+ found = False
+ for specified_suite in specified_suites:
+ if specified_suite in package_requirements:
+ target_packages.extend(package_requirements[specified_suite])
+ found = True
+ if found:
+ continue
+
+ if category in package_requirements:
+ target_packages.extend(package_requirements[category])
+ else:
+ # If we don't harness specific requirements, assume the common zip
+ # has everything we need to run tests for this suite.
+ target_packages.extend(package_requirements["common"])
+
+ # eliminate duplicates -- no need to download anything twice
+ target_packages = list(set(target_packages))
+ self.info(
+ "Downloading packages: %s for test suite categories: %s"
+ % (target_packages, suite_categories)
+ )
+ for file_name in target_packages:
+ target_dir = test_install_dir
+ unpack_dirs = extract_dirs
+
+ if "common.tests" in file_name and isinstance(unpack_dirs, list):
+ # Ensure that the following files are always getting extracted
+ required_files = [
+ "mach",
+ "mozinfo.json",
+ ]
+ for req_file in required_files:
+ if req_file not in unpack_dirs:
+ self.info(
+ "Adding '{}' for extraction from common.tests archive".format(
+ req_file
+ )
+ )
+ unpack_dirs.append(req_file)
+
+ if "jsshell-" in file_name or file_name == "target.jsshell.zip":
+ self.info("Special-casing the jsshell zip file")
+ unpack_dirs = None
+ target_dir = dirs["abs_test_bin_dir"]
+
+ if "web-platform" in file_name:
+ self.info("Extracting everything from web-platform archive")
+ unpack_dirs = None
+
+ url = self.query_build_dir_url(file_name)
+ self.download_unpack(url, target_dir, extract_dirs=unpack_dirs)
+
+ def _download_test_zip(self, extract_dirs=None):
+ dirs = self.query_abs_dirs()
+ test_install_dir = dirs.get(
+ "abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests")
+ )
+ self.download_unpack(self.test_url, test_install_dir, extract_dirs=extract_dirs)
+
+ def structured_output(self, suite_category):
+ """Defines whether structured logging is in use in this configuration. This
+ may need to be replaced with data from a different config at the resolution
+ of bug 1070041 and related bugs.
+ """
+ return (
+ "structured_suites" in self.config
+ and suite_category in self.config["structured_suites"]
+ )
+
+ def get_test_output_parser(
+ self,
+ suite_category,
+ strict=False,
+ fallback_parser_class=DesktopUnittestOutputParser,
+ **kwargs
+ ):
+ """Derive and return an appropriate output parser, either the structured
+ output parser or a fallback based on the type of logging in use as determined by
+ configuration.
+ """
+ if not self.structured_output(suite_category):
+ if fallback_parser_class is DesktopUnittestOutputParser:
+ return DesktopUnittestOutputParser(
+ suite_category=suite_category, **kwargs
+ )
+ return fallback_parser_class(**kwargs)
+ self.info("Structured output parser in use for %s." % suite_category)
+ return StructuredOutputParser(
+ suite_category=suite_category, strict=strict, **kwargs
+ )
+
+ def _download_installer(self):
+ file_name = None
+ if self.installer_path:
+ file_name = self.installer_path
+ dirs = self.query_abs_dirs()
+ source = self.download_file(
+ self.installer_url,
+ file_name=file_name,
+ parent_dir=dirs["abs_work_dir"],
+ error_level=FATAL,
+ )
+ self.installer_path = os.path.realpath(source)
+
+ def _download_and_extract_symbols(self):
+ dirs = self.query_abs_dirs()
+ if self.config.get("download_symbols") == "ondemand":
+ self.symbols_url = self.retry(
+ action=self.query_symbols_url,
+ kwargs={"raise_on_failure": True},
+ sleeptime=10,
+ failure_status=None,
+ )
+ self.symbols_path = self.symbols_url
+ return
+
+ else:
+ # In the case for 'ondemand', we're OK to proceed without getting a hold of the
+ # symbols right this moment, however, in other cases we need to at least retry
+ # before being unable to proceed (e.g. debug tests need symbols)
+ self.symbols_url = self.retry(
+ action=self.query_symbols_url,
+ kwargs={"raise_on_failure": True},
+ sleeptime=20,
+ error_level=FATAL,
+ error_message="We can't proceed without downloading symbols.",
+ )
+ if not self.symbols_path:
+ self.symbols_path = os.path.join(dirs["abs_work_dir"], "symbols")
+
+ if self.symbols_url:
+ self.download_unpack(self.symbols_url, self.symbols_path)
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ """
+ download and extract test zip / download installer
+ """
+ # Swap plain http for https when we're downloading from ftp
+ # See bug 957502 and friends
+ from_ = "http://ftp.mozilla.org"
+ to_ = "https://ftp-ssl.mozilla.org"
+ for attr in "symbols_url", "installer_url", "test_packages_url", "test_url":
+ url = getattr(self, attr)
+ if url and url.startswith(from_):
+ new_url = url.replace(from_, to_)
+ self.info("Replacing url %s -> %s" % (url, new_url))
+ setattr(self, attr, new_url)
+
+ if "test_url" in self.config:
+ # A user has specified a test_url directly, any test_packages_url will
+ # be ignored.
+ if self.test_packages_url:
+ self.error(
+ 'Test data will be downloaded from "%s", the specified test '
+ ' package data at "%s" will be ignored.'
+ % (self.config.get("test_url"), self.test_packages_url)
+ )
+
+ self._download_test_zip(extract_dirs)
+ else:
+ if not self.test_packages_url:
+ # The caller intends to download harness specific packages, but doesn't know
+ # where the packages manifest is located. This is the case when the
+ # test package manifest isn't set as a property, which is true
+ # for some self-serve jobs and platforms using parse_make_upload.
+ self.test_packages_url = self.query_prefixed_build_dir_url(
+ ".test_packages.json"
+ )
+
+ suite_categories = suite_categories or ["common"]
+ self._download_test_packages(suite_categories, extract_dirs)
+
+ self._download_installer()
+ if self.config.get("download_symbols"):
+ self._download_and_extract_symbols()
+
+ # create_virtualenv is in VirtualenvMixin.
+
+ def preflight_install(self):
+ if not self.installer_path:
+ if self.config.get("installer_path"):
+ self.installer_path = self.config["installer_path"]
+ else:
+ self.fatal(
+ """installer_path isn't set!
+
+You can set this by:
+
+1. specifying --installer-path PATH, or
+2. running the download-and-extract action
+"""
+ )
+ if not self.is_python_package_installed("mozInstall"):
+ self.fatal(
+ """Can't call install() without mozinstall!
+Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?"""
+ )
+
+ def install_app(self, app=None, target_dir=None, installer_path=None):
+ """Dependent on mozinstall"""
+ # install the application
+ cmd = [self.query_python_path("mozinstall")]
+ if app:
+ cmd.extend(["--app", app])
+ dirs = self.query_abs_dirs()
+ if not target_dir:
+ target_dir = dirs.get(
+ "abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application")
+ )
+ self.mkdir_p(target_dir)
+ if not installer_path:
+ installer_path = self.installer_path
+ cmd.extend([installer_path, "--destination", target_dir])
+ # TODO we'll need some error checking here
+ return self.get_output_from_command(
+ cmd, halt_on_failure=True, fatal_exit_code=3
+ )
+
+ def install(self):
+ self.binary_path = self.install_app(app=self.config.get("application"))
+ self.install_dir = os.path.dirname(self.binary_path)
+
+ def uninstall_app(self, install_dir=None):
+ """Dependent on mozinstall"""
+ # uninstall the application
+ cmd = self.query_exe(
+ "mozuninstall",
+ default=self.query_python_path("mozuninstall"),
+ return_type="list",
+ )
+ dirs = self.query_abs_dirs()
+ if not install_dir:
+ install_dir = dirs.get(
+ "abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application")
+ )
+ cmd.append(install_dir)
+ # TODO we'll need some error checking here
+ self.get_output_from_command(cmd, halt_on_failure=True, fatal_exit_code=3)
+
+ def uninstall(self):
+ self.uninstall_app()
+
+ def query_minidump_stackwalk(self, manifest=None):
+ if self.minidump_stackwalk_path:
+ return self.minidump_stackwalk_path
+
+ minidump_stackwalk_path = None
+
+ if "MOZ_FETCHES_DIR" in os.environ:
+ minidump_stackwalk_path = os.path.join(
+ os.environ["MOZ_FETCHES_DIR"],
+ "minidump-stackwalk",
+ "minidump-stackwalk",
+ )
+
+ if self.platform_name() in ("win32", "win64"):
+ minidump_stackwalk_path += ".exe"
+
+ if not minidump_stackwalk_path or not os.path.isfile(minidump_stackwalk_path):
+ self.error("minidump-stackwalk path was not fetched?")
+ # don't burn the job but we should at least turn them orange so it is caught
+ self.record_status(TBPL_WARNING, WARNING)
+ return None
+
+ self.minidump_stackwalk_path = minidump_stackwalk_path
+ return self.minidump_stackwalk_path
+
+ def query_options(self, *args, **kwargs):
+ if "str_format_values" in kwargs:
+ str_format_values = kwargs.pop("str_format_values")
+ else:
+ str_format_values = {}
+
+ arguments = []
+
+ for arg in args:
+ if arg is not None:
+ arguments.extend(argument % str_format_values for argument in arg)
+
+ return arguments
+
+ def query_tests_args(self, *args, **kwargs):
+ if "str_format_values" in kwargs:
+ str_format_values = kwargs.pop("str_format_values")
+ else:
+ str_format_values = {}
+
+ arguments = []
+
+ for arg in reversed(args):
+ if arg:
+ arguments.append("--")
+ arguments.extend(argument % str_format_values for argument in arg)
+ break
+
+ return arguments
+
+ def _run_cmd_checks(self, suites):
+ if not suites:
+ return
+ dirs = self.query_abs_dirs()
+ for suite in suites:
+ # XXX platform.architecture() may give incorrect values for some
+ # platforms like mac as excutable files may be universal
+ # files containing multiple architectures
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ if not suite["enabled"]:
+ continue
+ if suite.get("architectures"):
+ arch = platform.architecture()[0]
+ if arch not in suite["architectures"]:
+ continue
+ cmd = suite["cmd"]
+ name = suite["name"]
+ self.info(
+ "Running pre test command %(name)s with '%(cmd)s'"
+ % {"name": name, "cmd": " ".join(cmd)}
+ )
+ self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ error_list=BaseErrorList,
+ halt_on_failure=suite["halt_on_failure"],
+ fatal_exit_code=suite.get("fatal_exit_code", 3),
+ )
+
+ def preflight_run_tests(self):
+ """preflight commands for all tests"""
+ c = self.config
+ if c.get("skip_preflight"):
+ self.info("skipping preflight")
+ return
+
+ if c.get("run_cmd_checks_enabled"):
+ self._run_cmd_checks(c.get("preflight_run_cmd_suites", []))
+ elif c.get("preflight_run_cmd_suites"):
+ self.warning(
+ "Proceeding without running prerun test commands."
+ " These are often OS specific and disabling them may"
+ " result in spurious test results!"
+ )
+
+ def postflight_run_tests(self):
+ """preflight commands for all tests"""
+ c = self.config
+ if c.get("run_cmd_checks_enabled"):
+ self._run_cmd_checks(c.get("postflight_run_cmd_suites", []))
+
+ def query_abs_dirs(self):
+ abs_dirs = super(TestingMixin, self).query_abs_dirs()
+ if "MOZ_FETCHES_DIR" in os.environ:
+ abs_dirs["abs_fetches_dir"] = os.environ["MOZ_FETCHES_DIR"]
+ return abs_dirs
diff --git a/testing/mozharness/mozharness/mozilla/testing/try_tools.py b/testing/mozharness/mozharness/mozilla/testing/try_tools.py
new file mode 100644
index 0000000000..ac92ef534c
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/try_tools.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import argparse
+import os
+import re
+from collections import defaultdict
+
+import six
+
+from mozharness.base.script import PostScriptAction
+from mozharness.base.transfer import TransferMixin
+
+try_config_options = [
+ [
+ ["--try-message"],
+ {
+ "action": "store",
+ "dest": "try_message",
+ "default": None,
+ "help": "try syntax string to select tests to run",
+ },
+ ],
+]
+
+test_flavors = {
+ "browser-chrome": {},
+ "browser-a11y": {},
+ "browser-media": {},
+ "chrome": {},
+ "devtools-chrome": {},
+ "mochitest": {},
+ "xpcshell": {},
+ "reftest": {"path": lambda x: os.path.join("tests", "reftest", "tests", x)},
+ "crashtest": {"path": lambda x: os.path.join("tests", "reftest", "tests", x)},
+ "remote": {"path": lambda x: os.path.join("remote", "test", "browser", x)},
+ "web-platform-tests": {
+ "path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
+ },
+ "web-platform-tests-reftests": {
+ "path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
+ },
+ "web-platform-tests-wdspec": {
+ "path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
+ },
+}
+
+
+class TryToolsMixin(TransferMixin):
+ """Utility functions for an interface between try syntax and out test harnesses.
+ Requires log and script mixins."""
+
+ harness_extra_args = None
+ try_test_paths = {}
+ known_try_arguments = {
+ "--tag": (
+ {
+ "action": "append",
+ "dest": "tags",
+ "default": None,
+ },
+ (
+ "browser-chrome",
+ "browser-a11y",
+ "browser-media",
+ "chrome",
+ "devtools-chrome",
+ "marionette",
+ "mochitest",
+ "web-plaftform-tests",
+ "xpcshell",
+ ),
+ ),
+ }
+
+ def _extract_try_message(self):
+ msg = None
+ if "try_message" in self.config and self.config["try_message"]:
+ msg = self.config["try_message"]
+ elif "TRY_COMMIT_MSG" in os.environ:
+ msg = os.environ["TRY_COMMIT_MSG"]
+
+ if not msg:
+ self.warning("Try message not found.")
+ return msg
+
+ def _extract_try_args(self, msg):
+ """Returns a list of args from a try message, for parsing"""
+ if not msg:
+ return None
+ all_try_args = None
+ for line in msg.splitlines():
+ if "try: " in line:
+ # Autoland adds quotes to try strings that will confuse our
+ # args later on.
+ if line.startswith('"') and line.endswith('"'):
+ line = line[1:-1]
+ # Allow spaces inside of [filter expressions]
+ try_message = line.strip().split("try: ", 1)
+ all_try_args = re.findall(r"(?:\[.*?\]|\S)+", try_message[1])
+ break
+ if not all_try_args:
+ self.warning("Try syntax not found in: %s." % msg)
+ return all_try_args
+
+ def try_message_has_flag(self, flag, message=None):
+ """
+ Returns True if --`flag` is present in message.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--" + flag, action="store_true")
+ message = message or self._extract_try_message()
+ if not message:
+ return False
+ msg_list = self._extract_try_args(message)
+ args, _ = parser.parse_known_args(msg_list)
+ return getattr(args, flag, False)
+
+ def _is_try(self):
+ repo_path = None
+ get_branch = self.config.get("branch", repo_path)
+ if get_branch is not None:
+ on_try = "try" in get_branch or "Try" in get_branch
+ elif os.environ is not None:
+ on_try = "TRY_COMMIT_MSG" in os.environ
+ else:
+ on_try = False
+ return on_try
+
+ @PostScriptAction("download-and-extract")
+ def set_extra_try_arguments(self, action, success=None):
+ """Finds a commit message and parses it for extra arguments to pass to the test
+ harness command line and test paths used to filter manifests.
+
+ Extracting arguments from a commit message taken directly from the try_parser.
+ """
+ if not self._is_try():
+ return
+
+ msg = self._extract_try_message()
+ if not msg:
+ return
+
+ all_try_args = self._extract_try_args(msg)
+ if not all_try_args:
+ return
+
+ parser = argparse.ArgumentParser(
+ description=(
+ "Parse an additional subset of arguments passed to try syntax"
+ " and forward them to the underlying test harness command."
+ )
+ )
+
+ label_dict = {}
+
+ def label_from_val(val):
+ if val in label_dict:
+ return label_dict[val]
+ return "--%s" % val.replace("_", "-")
+
+ for label, (opts, _) in six.iteritems(self.known_try_arguments):
+ if "action" in opts and opts["action"] not in (
+ "append",
+ "store",
+ "store_true",
+ "store_false",
+ ):
+ self.fatal(
+ "Try syntax does not support passing custom or store_const "
+ "arguments to the harness process."
+ )
+ if "dest" in opts:
+ label_dict[opts["dest"]] = label
+
+ parser.add_argument(label, **opts)
+
+ parser.add_argument("--try-test-paths", nargs="*")
+ (args, _) = parser.parse_known_args(all_try_args)
+ self.try_test_paths = self._group_test_paths(args.try_test_paths)
+ del args.try_test_paths
+
+ out_args = defaultdict(list)
+ # This is a pretty hacky way to echo arguments down to the harness.
+ # Hopefully this can be improved once we have a configuration system
+ # in tree for harnesses that relies less on a command line.
+ for arg, value in six.iteritems(vars(args)):
+ if value:
+ label = label_from_val(arg)
+ _, flavors = self.known_try_arguments[label]
+
+ for f in flavors:
+ if isinstance(value, bool):
+ # A store_true or store_false argument.
+ out_args[f].append(label)
+ elif isinstance(value, list):
+ out_args[f].extend(["%s=%s" % (label, el) for el in value])
+ else:
+ out_args[f].append("%s=%s" % (label, value))
+
+ self.harness_extra_args = dict(out_args)
+
+ def _group_test_paths(self, args):
+ rv = defaultdict(list)
+
+ if args is None:
+ return rv
+
+ for item in args:
+ suite, path = item.split(":", 1)
+ rv[suite].append(path)
+ return rv
+
+ def try_args(self, flavor):
+ """Get arguments, test_list derived from try syntax to apply to a command"""
+ args = []
+ if self.harness_extra_args:
+ args = self.harness_extra_args.get(flavor, [])[:]
+
+ if self.try_test_paths.get(flavor):
+ self.info(
+ "TinderboxPrint: Tests will be run from the following "
+ "files: %s." % ",".join(self.try_test_paths[flavor])
+ )
+ args.extend(["--this-chunk=1", "--total-chunks=1"])
+
+ path_func = test_flavors[flavor].get("path", lambda x: x)
+ tests = [
+ path_func(os.path.normpath(item))
+ for item in self.try_test_paths[flavor]
+ ]
+ else:
+ tests = []
+
+ if args or tests:
+ self.info(
+ "TinderboxPrint: The following arguments were forwarded from mozharness "
+ "to the test command:\nTinderboxPrint: \t%s -- %s"
+ % (" ".join(args), " ".join(tests))
+ )
+
+ return args, tests
diff --git a/testing/mozharness/mozharness/mozilla/testing/unittest.py b/testing/mozharness/mozharness/mozilla/testing/unittest.py
new file mode 100755
index 0000000000..be144bbe1f
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/unittest.py
@@ -0,0 +1,255 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import os
+import re
+
+from mozharness.base.log import CRITICAL, ERROR, INFO, WARNING, OutputParser
+from mozharness.mozilla.automation import (
+ TBPL_FAILURE,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WARNING,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.errors import TinderBoxPrintRe
+
+SUITE_CATEGORIES = ["mochitest", "reftest", "xpcshell"]
+
+
+def tbox_print_summary(
+ pass_count, fail_count, known_fail_count=None, crashed=False, leaked=False
+):
+ emphasize_fail_text = '<em class="testfail">%s</em>'
+
+ if (
+ pass_count < 0
+ or fail_count < 0
+ or (known_fail_count is not None and known_fail_count < 0)
+ ):
+ summary = emphasize_fail_text % "T-FAIL"
+ elif (
+ pass_count == 0
+ and fail_count == 0
+ and (known_fail_count == 0 or known_fail_count is None)
+ ):
+ summary = emphasize_fail_text % "T-FAIL"
+ else:
+ str_fail_count = str(fail_count)
+ if fail_count > 0:
+ str_fail_count = emphasize_fail_text % str_fail_count
+ summary = "%d/%s" % (pass_count, str_fail_count)
+ if known_fail_count is not None:
+ summary += "/%d" % known_fail_count
+ # Format the crash status.
+ if crashed:
+ summary += "&nbsp;%s" % emphasize_fail_text % "CRASH"
+ # Format the leak status.
+ if leaked is not False:
+ summary += "&nbsp;%s" % emphasize_fail_text % ((leaked and "LEAK") or "L-FAIL")
+ return summary
+
+
+class TestSummaryOutputParserHelper(OutputParser):
+ def __init__(self, regex=re.compile(r"(passed|failed|todo): (\d+)"), **kwargs):
+ self.regex = regex
+ self.failed = 0
+ self.passed = 0
+ self.todo = 0
+ self.last_line = None
+ self.tbpl_status = TBPL_SUCCESS
+ self.worst_log_level = INFO
+ super(TestSummaryOutputParserHelper, self).__init__(**kwargs)
+
+ def parse_single_line(self, line):
+ super(TestSummaryOutputParserHelper, self).parse_single_line(line)
+ self.last_line = line
+ m = self.regex.search(line)
+ if m:
+ try:
+ setattr(self, m.group(1), int(m.group(2)))
+ except ValueError:
+ # ignore bad values
+ pass
+
+ def evaluate_parser(self, return_code, success_codes=None, previous_summary=None):
+ # TestSummaryOutputParserHelper is for Marionette, which doesn't support test-verify
+ # When it does we can reset the internal state variables as needed
+ joined_summary = previous_summary
+
+ if return_code == 0 and self.passed > 0 and self.failed == 0:
+ self.tbpl_status = TBPL_SUCCESS
+ elif return_code == 10 and self.failed > 0:
+ self.tbpl_status = TBPL_WARNING
+ else:
+ self.tbpl_status = TBPL_FAILURE
+ self.worst_log_level = ERROR
+
+ return (self.tbpl_status, self.worst_log_level, joined_summary)
+
+ def print_summary(self, suite_name):
+ # generate the TinderboxPrint line for TBPL
+ emphasize_fail_text = '<em class="testfail">%s</em>'
+ failed = "0"
+ if self.passed == 0 and self.failed == 0:
+ self.tsummary = emphasize_fail_text % "T-FAIL"
+ else:
+ if self.failed > 0:
+ failed = emphasize_fail_text % str(self.failed)
+ self.tsummary = "%d/%s/%d" % (self.passed, failed, self.todo)
+
+ self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, self.tsummary))
+
+ def append_tinderboxprint_line(self, suite_name):
+ self.print_summary(suite_name)
+
+
+class DesktopUnittestOutputParser(OutputParser):
+ """
+ A class that extends OutputParser such that it can parse the number of
+ passed/failed/todo tests from the output.
+ """
+
+ def __init__(self, suite_category, **kwargs):
+ # worst_log_level defined already in DesktopUnittestOutputParser
+ # but is here to make pylint happy
+ self.worst_log_level = INFO
+ super(DesktopUnittestOutputParser, self).__init__(**kwargs)
+ self.summary_suite_re = TinderBoxPrintRe.get("%s_summary" % suite_category, {})
+ self.harness_error_re = TinderBoxPrintRe["harness_error"]["minimum_regex"]
+ self.full_harness_error_re = TinderBoxPrintRe["harness_error"]["full_regex"]
+ self.harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+ self.fail_count = -1
+ self.pass_count = -1
+ # known_fail_count does not exist for some suites
+ self.known_fail_count = self.summary_suite_re.get("known_fail_group") and -1
+ self.crashed, self.leaked = False, False
+ self.tbpl_status = TBPL_SUCCESS
+
+ def parse_single_line(self, line):
+ if self.summary_suite_re:
+ summary_m = self.summary_suite_re["regex"].match(line) # pass/fail/todo
+ if summary_m:
+ message = " %s" % line
+ log_level = INFO
+ # remove all the none values in groups() so this will work
+ # with all suites including mochitest browser-chrome
+ summary_match_list = [
+ group for group in summary_m.groups() if group is not None
+ ]
+ r = summary_match_list[0]
+ if self.summary_suite_re["pass_group"] in r:
+ if len(summary_match_list) > 1:
+ self.pass_count = int(summary_match_list[-1])
+ else:
+ # This handles suites that either pass or report
+ # number of failures. We need to set both
+ # pass and fail count in the pass case.
+ self.pass_count = 1
+ self.fail_count = 0
+ elif self.summary_suite_re["fail_group"] in r:
+ self.fail_count = int(summary_match_list[-1])
+ if self.fail_count > 0:
+ message += "\n One or more unittests failed."
+ log_level = WARNING
+ # If self.summary_suite_re['known_fail_group'] == None,
+ # then r should not match it, # so this test is fine as is.
+ elif self.summary_suite_re["known_fail_group"] in r:
+ self.known_fail_count = int(summary_match_list[-1])
+ self.log(message, log_level)
+ return # skip harness check and base parse_single_line
+ harness_match = self.harness_error_re.search(line)
+ if harness_match:
+ self.warning(" %s" % line)
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_WARNING, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+ full_harness_match = self.full_harness_error_re.search(line)
+ if full_harness_match:
+ r = full_harness_match.group(1)
+ if r == "application crashed":
+ self.crashed = True
+ elif r == "missing output line for total leaks!":
+ self.leaked = None
+ else:
+ self.leaked = True
+ return # skip base parse_single_line
+ if self.harness_retry_re.search(line):
+ self.critical(" %s" % line)
+ self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_RETRY, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+ return # skip base parse_single_line
+ super(DesktopUnittestOutputParser, self).parse_single_line(line)
+
+ def evaluate_parser(self, return_code, success_codes=None, previous_summary=None):
+ success_codes = success_codes or [0]
+
+ if self.num_errors: # mozharness ran into a script error
+ self.tbpl_status = self.worst_level(
+ TBPL_FAILURE, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ """
+ We can run evaluate_parser multiple times, it will duplicate failures
+ and status which can mean that future tests will fail if a previous test fails.
+ When we have a previous summary, we want to do:
+ 1) reset state so we only evaluate the current results
+ """
+ joined_summary = {"pass_count": self.pass_count}
+ if previous_summary:
+ self.tbpl_status = TBPL_SUCCESS
+ self.worst_log_level = INFO
+ self.crashed = False
+ self.leaked = False
+
+ # I have to put this outside of parse_single_line because this checks not
+ # only if fail_count was more then 0 but also if fail_count is still -1
+ # (no fail summary line was found)
+ if self.fail_count != 0:
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_WARNING, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ # Account for the possibility that no test summary was output.
+ if (
+ self.pass_count <= 0
+ and self.fail_count <= 0
+ and (self.known_fail_count is None or self.known_fail_count <= 0)
+ and os.environ.get("TRY_SELECTOR") != "coverage"
+ ):
+ self.error("No tests run or test summary not found")
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_WARNING, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ if return_code not in success_codes:
+ self.tbpl_status = self.worst_level(
+ TBPL_FAILURE, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ # we can trust in parser.worst_log_level in either case
+ return (self.tbpl_status, self.worst_log_level, joined_summary)
+
+ def append_tinderboxprint_line(self, suite_name):
+ # We are duplicating a condition (fail_count) from evaluate_parser and
+ # parse parse_single_line but at little cost since we are not parsing
+ # the log more then once. I figured this method should stay isolated as
+ # it is only here for tbpl highlighted summaries and is not part of
+ # result status IIUC.
+ summary = tbox_print_summary(
+ self.pass_count,
+ self.fail_count,
+ self.known_fail_count,
+ self.crashed,
+ self.leaked,
+ )
+ self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, summary))
diff --git a/testing/mozharness/mozharness/mozilla/testing/verify_tools.py b/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
new file mode 100644
index 0000000000..3cf19351c5
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from mozharness.base.script import PostScriptAction
+from mozharness.mozilla.testing.per_test_base import SingleTestMixin
+
+verify_config_options = [
+ [
+ ["--verify"],
+ {
+ "action": "store_true",
+ "dest": "verify",
+ "default": False,
+ "help": "Run additional verification on modified tests.",
+ },
+ ],
+]
+
+
+class VerifyToolsMixin(SingleTestMixin):
+ """Utility functions for test verification."""
+
+ def __init__(self):
+ super(VerifyToolsMixin, self).__init__()
+
+ @property
+ def verify_enabled(self):
+ try:
+ return bool(self.config.get("verify"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @PostScriptAction("download-and-extract")
+ def find_tests_for_verification(self, action, success=None):
+ """
+ For each file modified on this push, determine if the modified file
+ is a test, by searching test manifests. Populate self.verify_suites
+ with test files, organized by suite.
+
+ This depends on test manifests, so can only run after test zips have
+ been downloaded and extracted.
+ """
+
+ if not self.verify_enabled:
+ return
+
+ self.find_modified_tests()
+
+ @property
+ def verify_args(self):
+ if not self.verify_enabled:
+ return []
+
+ # Limit each test harness run to 15 minutes, to avoid task timeouts
+ # when executing long-running tests.
+ MAX_TIME_PER_TEST = 900
+
+ if self.config.get("per_test_category") == "web-platform":
+ args = ["--verify-log-full"]
+ else:
+ args = ["--verify-max-time=%d" % MAX_TIME_PER_TEST]
+
+ args.append("--verify")
+
+ return args