summaryrefslogtreecommitdiffstats
path: root/testing/mozharness/scripts
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--testing/mozharness/scripts/android_emulator_pgo.py331
-rw-r--r--testing/mozharness/scripts/android_emulator_unittest.py550
-rw-r--r--testing/mozharness/scripts/android_hardware_unittest.py477
-rw-r--r--testing/mozharness/scripts/android_wrench.py287
-rw-r--r--testing/mozharness/scripts/awsy_script.py322
-rwxr-xr-xtesting/mozharness/scripts/configtest.py160
-rwxr-xr-xtesting/mozharness/scripts/desktop_l10n.py481
-rwxr-xr-xtesting/mozharness/scripts/desktop_partner_repacks.py213
-rwxr-xr-xtesting/mozharness/scripts/desktop_unittest.py1331
-rwxr-xr-xtesting/mozharness/scripts/does_it_crash.py146
-rw-r--r--testing/mozharness/scripts/firefox_ui_tests.py299
-rwxr-xr-xtesting/mozharness/scripts/fx_desktop_build.py101
-rwxr-xr-xtesting/mozharness/scripts/l10n_bumper.py380
-rwxr-xr-xtesting/mozharness/scripts/marionette.py455
-rwxr-xr-xtesting/mozharness/scripts/multil10n.py21
-rwxr-xr-xtesting/mozharness/scripts/openh264_build.py472
-rw-r--r--testing/mozharness/scripts/raptor_script.py20
-rw-r--r--testing/mozharness/scripts/release/bouncer_check.py202
-rw-r--r--testing/mozharness/scripts/release/generate-checksums.py263
-rw-r--r--testing/mozharness/scripts/release/update-verify-config-creator.py642
-rw-r--r--testing/mozharness/scripts/repackage.py175
-rwxr-xr-xtesting/mozharness/scripts/talos_script.py21
-rwxr-xr-xtesting/mozharness/scripts/telemetry/telemetry_client.py277
-rwxr-xr-xtesting/mozharness/scripts/web_platform_tests.py700
24 files changed, 8326 insertions, 0 deletions
diff --git a/testing/mozharness/scripts/android_emulator_pgo.py b/testing/mozharness/scripts/android_emulator_pgo.py
new file mode 100644
index 0000000000..4c8309b303
--- /dev/null
+++ b/testing/mozharness/scripts/android_emulator_pgo.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import glob
+import json
+import os
+import posixpath
+import subprocess
+import sys
+import time
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+PAGES = [
+ "js-input/webkit/PerformanceTests/Speedometer/index.html",
+ "blueprint/sample.html",
+ "blueprint/forms.html",
+ "blueprint/grid.html",
+ "blueprint/elements.html",
+ "js-input/3d-thingy.html",
+ "js-input/crypto-otp.html",
+ "js-input/sunspider/3d-cube.html",
+ "js-input/sunspider/3d-morph.html",
+ "js-input/sunspider/3d-raytrace.html",
+ "js-input/sunspider/access-binary-trees.html",
+ "js-input/sunspider/access-fannkuch.html",
+ "js-input/sunspider/access-nbody.html",
+ "js-input/sunspider/access-nsieve.html",
+ "js-input/sunspider/bitops-3bit-bits-in-byte.html",
+ "js-input/sunspider/bitops-bits-in-byte.html",
+ "js-input/sunspider/bitops-bitwise-and.html",
+ "js-input/sunspider/bitops-nsieve-bits.html",
+ "js-input/sunspider/controlflow-recursive.html",
+ "js-input/sunspider/crypto-aes.html",
+ "js-input/sunspider/crypto-md5.html",
+ "js-input/sunspider/crypto-sha1.html",
+ "js-input/sunspider/date-format-tofte.html",
+ "js-input/sunspider/date-format-xparb.html",
+ "js-input/sunspider/math-cordic.html",
+ "js-input/sunspider/math-partial-sums.html",
+ "js-input/sunspider/math-spectral-norm.html",
+ "js-input/sunspider/regexp-dna.html",
+ "js-input/sunspider/string-base64.html",
+ "js-input/sunspider/string-fasta.html",
+ "js-input/sunspider/string-tagcloud.html",
+ "js-input/sunspider/string-unpack-code.html",
+ "js-input/sunspider/string-validate-input.html",
+]
+
+
+class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin, AndroidMixin):
+ """
+ Mozharness script to generate an android PGO profile using the emulator
+ """
+
+ config_options = copy.deepcopy(testing_config_options)
+
+ def __init__(self, require_config_file=False):
+ super(AndroidProfileRun, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "download",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ "mozbase_requirements": "mozbase_source_requirements.txt",
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_path = c.get("installer_path")
+ self.device_serial = "emulator-5554"
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidProfileRun, self).query_abs_dirs()
+ dirs = {}
+
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_src_dir"], "testing")
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_blob_upload_dir"] = "/builds/worker/artifacts/blobber_upload_dir"
+ work_dir = os.environ.get("MOZ_FETCHES_DIR") or abs_dirs["abs_work_dir"]
+ dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ ##########################################
+ # Actions for AndroidProfileRun #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ self.register_virtualenv_module(
+ "marionette",
+ os.path.join(dirs["abs_test_install_dir"], "marionette", "client"),
+ )
+
+ def download(self):
+ """
+ Download host utilities
+ """
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.install_android_app(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_serial)
+
+ def run_tests(self):
+ """
+ Generate the PGO profile data
+ """
+ from marionette_driver.marionette import Marionette
+ from mozdevice import ADBDeviceFactory, ADBTimeoutError
+ from mozhttpd import MozHttpd
+ from mozprofile import Preferences
+ from six import string_types
+
+ app = self.query_package_name()
+
+ IP = "10.0.2.2"
+ PORT = 8888
+
+ PATH_MAPPINGS = {
+ "/js-input/webkit/PerformanceTests": "third_party/webkit/PerformanceTests",
+ }
+
+ dirs = self.query_abs_dirs()
+ topsrcdir = dirs["abs_src_dir"]
+ adb = self.query_exe("adb")
+
+ path_mappings = {
+ k: os.path.join(topsrcdir, v) for k, v in PATH_MAPPINGS.items()
+ }
+ httpd = MozHttpd(
+ port=PORT,
+ docroot=os.path.join(topsrcdir, "build", "pgo"),
+ path_mappings=path_mappings,
+ )
+ httpd.start(block=False)
+
+ profile_data_dir = os.path.join(topsrcdir, "testing", "profiles")
+ with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
+ base_profiles = json.load(fh)["profileserver"]
+
+ prefpaths = [
+ os.path.join(profile_data_dir, profile, "user.js")
+ for profile in base_profiles
+ ]
+
+ prefs = {}
+ for path in prefpaths:
+ prefs.update(Preferences.read_prefs(path))
+
+ interpolation = {"server": "%s:%d" % httpd.httpd.server_address, "OOP": "false"}
+ for k, v in prefs.items():
+ if isinstance(v, string_types):
+ v = v.format(**interpolation)
+ prefs[k] = Preferences.cast(v)
+
+ outputdir = self.config.get("output_directory", "/sdcard/pgo_profile")
+ jarlog = posixpath.join(outputdir, "en-US.log")
+ profdata = posixpath.join(outputdir, "default_%p_random_%m.profraw")
+
+ env = {}
+ env["XPCOM_DEBUG_BREAK"] = "warn"
+ env["MOZ_IN_AUTOMATION"] = "1"
+ env["MOZ_JAR_LOG_FILE"] = jarlog
+ env["LLVM_PROFILE_FILE"] = profdata
+
+ if self.query_minidump_stackwalk():
+ os.environ["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ os.environ["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.symbols_path:
+ self.symbols_path = os.environ.get("MOZ_FETCHES_DIR")
+
+ # Force test_root to be on the sdcard for android pgo
+ # builds which fail for Android 4.3 when profiles are located
+ # in /data/local/tmp/test_root with
+ # E AndroidRuntime: FATAL EXCEPTION: Gecko
+ # E AndroidRuntime: java.lang.IllegalArgumentException: \
+ # Profile directory must be writable if specified: /data/local/tmp/test_root/profile
+ # This occurs when .can-write-sentinel is written to
+ # the profile in
+ # mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoProfile.java.
+ # This is not a problem on later versions of Android. This
+ # over-ride of test_root should be removed when Android 4.3 is no
+ # longer supported.
+ sdcard_test_root = "/sdcard/test_root"
+ adbdevice = ADBDeviceFactory(
+ adb=adb, device="emulator-5554", test_root=sdcard_test_root
+ )
+ if adbdevice.test_root != sdcard_test_root:
+ # If the test_root was previously set and shared
+ # the initializer will not have updated the shared
+ # value. Force it to match the sdcard_test_root.
+ adbdevice.test_root = sdcard_test_root
+ adbdevice.mkdir(outputdir, parents=True)
+
+ try:
+ # Run Fennec a first time to initialize its profile
+ driver = Marionette(
+ app="fennec",
+ package_name=app,
+ adb_path=adb,
+ bin="geckoview-androidTest.apk",
+ prefs=prefs,
+ connect_to_running_emulator=True,
+ startup_timeout=1000,
+ env=env,
+ symbols_path=self.symbols_path,
+ )
+ driver.start_session()
+
+ # Now generate the profile and wait for it to complete
+ for page in PAGES:
+ driver.navigate("http://%s:%d/%s" % (IP, PORT, page))
+ timeout = 2
+ if "Speedometer/index.html" in page:
+ # The Speedometer test actually runs many tests internally in
+ # javascript, so it needs extra time to run through them. The
+ # emulator doesn't get very far through the whole suite, but
+ # this extra time at least lets some of them process.
+ timeout = 360
+ time.sleep(timeout)
+
+ driver.set_context("chrome")
+ driver.execute_script(
+ """
+ let cancelQuit = Components.classes["@mozilla.org/supports-PRBool;1"]
+ .createInstance(Components.interfaces.nsISupportsPRBool);
+ Services.obs.notifyObservers(cancelQuit, "quit-application-requested", null);
+ return cancelQuit.data;
+ """
+ )
+ driver.execute_script(
+ """
+ Services.startup.quit(Ci.nsIAppStartup.eAttemptQuit)
+ """
+ )
+
+ # There is a delay between execute_script() returning and the profile data
+ # actually getting written out, so poll the device until we get a profile.
+ for i in range(50):
+ if not adbdevice.process_exist(app):
+ break
+ time.sleep(2)
+ else:
+ raise Exception("Android App (%s) never quit" % app)
+
+ # Pull all the profraw files and en-US.log
+ adbdevice.pull(outputdir, "/builds/worker/workspace/")
+ except ADBTimeoutError:
+ self.fatal(
+ "INFRA-ERROR: Failed with an ADBTimeoutError",
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ profraw_files = glob.glob("/builds/worker/workspace/*.profraw")
+ if not profraw_files:
+ self.fatal("Could not find any profraw files in /builds/worker/workspace")
+ merge_cmd = [
+ os.path.join(os.environ["MOZ_FETCHES_DIR"], "clang/bin/llvm-profdata"),
+ "merge",
+ "-o",
+ "/builds/worker/workspace/merged.profdata",
+ ] + profraw_files
+ rc = subprocess.call(merge_cmd)
+ if rc != 0:
+ self.fatal(
+ "INFRA-ERROR: Failed to merge profile data. Corrupt profile?",
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ # tarfile doesn't support xz in this version of Python
+ tar_cmd = [
+ "tar",
+ "-acvf",
+ "/builds/worker/artifacts/profdata.tar.xz",
+ "-C",
+ "/builds/worker/workspace",
+ "merged.profdata",
+ "en-US.log",
+ ]
+ subprocess.check_call(tar_cmd)
+
+ httpd.stop()
+
+
+if __name__ == "__main__":
+ test = AndroidProfileRun()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_emulator_unittest.py b/testing/mozharness/scripts/android_emulator_unittest.py
new file mode 100644
index 0000000000..47cf13dde3
--- /dev/null
+++ b/testing/mozharness/scripts/android_emulator_unittest.py
@@ -0,0 +1,550 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import datetime
+import json
+import os
+import subprocess
+import sys
+
+# load modules from parent dir
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(1, os.path.dirname(here))
+
+from mozharness.base.log import WARNING
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+SUITE_DEFAULT_E10S = ["geckoview-junit", "mochitest", "reftest"]
+SUITE_NO_E10S = ["cppunittest", "gtest", "jittest", "xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest", "xpcshell"]
+
+
+class AndroidEmulatorTest(
+ TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMixin, AndroidMixin
+):
+ """
+ A mozharness script for Android functional tests (like mochitests and reftests)
+ run on an Android emulator. This script starts and manages an Android emulator
+ for the duration of the required tests. This is like desktop_unittest.py, but
+ for Android emulator test platforms.
+ """
+
+ config_options = (
+ [
+ [
+ ["--test-suite"],
+ {"action": "store", "dest": "test_suite", "default": None},
+ ],
+ [
+ ["--total-chunk"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "default": None,
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "default": None,
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--enable-xorigin-tests"],
+ {
+ "action": "store_true",
+ "dest": "enable_xorigin_tests",
+ "default": False,
+ "help": "Run tests in a cross origin iframe.",
+ },
+ ],
+ [
+ ["--gpu-required"],
+ {
+ "action": "store_true",
+ "dest": "gpu_required",
+ "default": False,
+ "help": "Run additional verification on modified tests using gpu instances.",
+ },
+ ],
+ [
+ ["--log-raw-level"],
+ {
+ "action": "store",
+ "dest": "log_raw_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--log-tbpl-level"],
+ {
+ "action": "store",
+ "dest": "log_tbpl_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run without Fission enabled.",
+ },
+ ],
+ [
+ ["--web-content-isolation-strategy"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "web_content_isolation_strategy",
+ "help": "Strategy used to determine whether or not a particular site should"
+ "load into a webIsolated content process, see "
+ "fission.webContentIsolationStrategy.",
+ },
+ ],
+ [
+ ["--repeat"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=False):
+ super(AndroidEmulatorTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.test_manifest = c.get("test_manifest")
+ suite = c.get("test_suite")
+ self.test_suite = suite
+ self.this_chunk = c.get("this_chunk")
+ self.total_chunks = c.get("total_chunks")
+ self.xre_path = None
+ self.device_serial = "emulator-5554"
+ self.log_raw_level = c.get("log_raw_level")
+ self.log_tbpl_level = c.get("log_tbpl_level")
+ # AndroidMixin uses this when launching the emulator. We only want
+ # GLES3 if we're running WebRender (default)
+ self.use_gles3 = True
+ self.disable_e10s = c.get("disable_e10s")
+ self.disable_fission = c.get("disable_fission")
+ self.web_content_isolation_strategy = c.get("web_content_isolation_strategy")
+ self.extra_prefs = c.get("extra_prefs")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidEmulatorTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests", "bin"
+ )
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_modules_dir"] = os.path.join(dirs["abs_test_install_dir"], "modules")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+ work_dir = os.environ.get("MOZ_FETCHES_DIR") or abs_dirs["abs_work_dir"]
+ dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def _query_tests_dir(self, test_suite):
+ dirs = self.query_abs_dirs()
+ try:
+ test_dir = self.config["suite_definitions"][test_suite]["testsdir"]
+ except Exception:
+ test_dir = test_suite
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def _get_mozharness_test_paths(self, suite):
+ test_paths = os.environ.get("MOZHARNESS_TEST_PATHS")
+ if not test_paths:
+ return
+
+ return json.loads(test_paths).get(suite)
+
+ def _build_command(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("Key '%s' not defined in the config!" % self.test_suite)
+
+ cmd = [
+ self.query_python_path("python"),
+ "-u",
+ os.path.join(
+ self._query_tests_dir(self.test_suite),
+ self.config["suite_definitions"][self.test_suite]["run_filename"],
+ ),
+ ]
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], self.test_suite
+ )
+
+ str_format_values = {
+ "device_serial": self.device_serial,
+ # IP address of the host as seen from the emulator
+ "remote_webserver": "10.0.2.2",
+ "xre_path": self.xre_path,
+ "utility_path": self.xre_path,
+ "http_port": "8854", # starting http port to use for the mochitest server
+ "ssl_port": "4454", # starting ssl port to use for the server
+ "certs_path": os.path.join(dirs["abs_work_dir"], "tests/certs"),
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ "symbols_path": self.symbols_path,
+ "modules_dir": dirs["abs_modules_dir"],
+ "installer_path": self.installer_path,
+ "raw_log_file": raw_log_file,
+ "log_tbpl_level": self.log_tbpl_level,
+ "log_raw_level": self.log_raw_level,
+ "error_summary_file": error_summary_file,
+ "xpcshell_extra": c.get("xpcshell_extra", ""),
+ "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"),
+ }
+
+ user_paths = self._get_mozharness_test_paths(self.test_suite)
+
+ for option in self.config["suite_definitions"][self.test_suite]["options"]:
+ opt = option.split("=")[0]
+ # override configured chunk options with script args, if specified
+ if opt in ("--this-chunk", "--total-chunks"):
+ if (
+ user_paths
+ or getattr(self, opt.replace("-", "_").strip("_"), None) is not None
+ ):
+ continue
+
+ if "%(app)" in option:
+ # only query package name if requested
+ cmd.extend([option % {"app": self.query_package_name()}])
+ else:
+ option = option % str_format_values
+ if option:
+ cmd.extend([option])
+
+ if "mochitest" in self.test_suite:
+ category = "mochitest"
+ elif "reftest" in self.test_suite or "crashtest" in self.test_suite:
+ category = "reftest"
+ else:
+ category = self.test_suite
+ if c.get("repeat"):
+ if category in SUITE_REPEATABLE:
+ cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log("--repeat not supported in {}".format(category), level=WARNING)
+
+ # do not add --disable fission if we don't have --disable-e10s
+ if c["disable_fission"] and category not in ["gtest", "cppunittest"]:
+ cmd.append("--disable-fission")
+
+ if "web_content_isolation_strategy" in c:
+ cmd.append(
+ "--web-content-isolation-strategy=%s"
+ % c["web_content_isolation_strategy"]
+ )
+ cmd.extend(["--setpref={}".format(p) for p in self.extra_prefs])
+
+ if not (self.verify_enabled or self.per_test_coverage):
+ if user_paths:
+ cmd.extend(user_paths)
+ elif not (self.verify_enabled or self.per_test_coverage):
+ if self.this_chunk is not None:
+ cmd.extend(["--this-chunk", self.this_chunk])
+ if self.total_chunks is not None:
+ cmd.extend(["--total-chunks", self.total_chunks])
+
+ if category not in SUITE_NO_E10S:
+ if category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ cmd.append("--disable-e10s")
+ elif category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ cmd.append("--e10s")
+
+ if c.get("enable_xorigin_tests"):
+ cmd.extend(["--enable-xorigin-tests"])
+
+ try_options, try_tests = self.try_args(self.test_suite)
+ cmd.extend(try_options)
+ if not self.verify_enabled and not self.per_test_coverage:
+ cmd.extend(
+ self.query_tests_args(
+ self.config["suite_definitions"][self.test_suite].get("tests"),
+ None,
+ try_tests,
+ )
+ )
+
+ if self.java_code_coverage_enabled:
+ cmd.extend(
+ [
+ "--enable-coverage",
+ "--coverage-output-dir",
+ self.java_coverage_output_dir,
+ ]
+ )
+
+ return cmd
+
+ def _query_suites(self):
+ if self.test_suite:
+ return [(self.test_suite, self.test_suite)]
+ # per-test mode: determine test suites to run
+
+ # For each test category, provide a list of supported sub-suites and a mapping
+ # between the per_test_base suite name and the android suite name.
+ all = [
+ (
+ "mochitest",
+ {
+ "mochitest-plain": "mochitest-plain",
+ "mochitest-media": "mochitest-media",
+ "mochitest-plain-gpu": "mochitest-plain-gpu",
+ },
+ ),
+ (
+ "reftest",
+ {
+ "reftest": "reftest",
+ "crashtest": "crashtest",
+ "jsreftest": "jsreftest",
+ },
+ ),
+ ("xpcshell", {"xpcshell": "xpcshell"}),
+ ]
+ suites = []
+ for (category, all_suites) in all:
+ cat_suites = self.query_per_test_category_suites(category, all_suites)
+ for k in cat_suites.keys():
+ suites.append((k, cat_suites[k]))
+ return suites
+
+ def _query_suite_categories(self):
+ if self.test_suite:
+ categories = [self.test_suite]
+ else:
+ # per-test mode
+ categories = ["mochitest", "reftest", "xpcshell"]
+ return categories
+
+ ##########################################
+ # Actions for AndroidEmulatorTest #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = None
+ suites = self._query_suites()
+ if ("mochitest-media", "mochitest-media") in suites:
+ # mochitest-media is the only thing that needs this
+ requirements = os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ "websocketprocessbridge_requirements_3.txt",
+ )
+ if requirements:
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """
+ Download and extract product APK, tests.zip, and host utils.
+ """
+ super(AndroidEmulatorTest, self).download_and_extract(
+ suite_categories=self._query_suite_categories()
+ )
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ install_needed = (not self.test_suite) or self.config["suite_definitions"][
+ self.test_suite
+ ].get("install")
+ if install_needed is False:
+ self.info("Skipping apk installation for %s" % self.test_suite)
+ return
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.install_android_app(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_serial)
+
+ def run_tests(self):
+ """
+ Run the tests
+ """
+ self.start_time = datetime.datetime.now()
+ max_per_test_time = datetime.timedelta(minutes=60)
+
+ per_test_args = []
+ suites = self._query_suites()
+ minidump = self.query_minidump_stackwalk()
+ for (per_test_suite, suite) in suites:
+ self.test_suite = suite
+
+ try:
+ cwd = self._query_tests_dir(self.test_suite)
+ except Exception:
+ self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
+
+ env = self.query_env()
+ if minidump:
+ env["MINIDUMP_STACKWALK"] = minidump
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if self.config["nodejs_path"]:
+ env["MOZ_NODE_PATH"] = self.config["nodejs_path"]
+
+ summary = {}
+ for per_test_args in self.query_args(per_test_suite):
+ if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: "
+ "Not all tests were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return
+
+ cmd = self._build_command()
+ final_cmd = copy.copy(cmd)
+ if len(per_test_args) > 0:
+ # in per-test mode, remove any chunk arguments from command
+ for arg in final_cmd:
+ if "total-chunk" in arg or "this-chunk" in arg:
+ final_cmd.remove(arg)
+ final_cmd.extend(per_test_args)
+
+ self.info("Running the command %s" % subprocess.list2cmdline(final_cmd))
+ self.info("##### %s log begins" % self.test_suite)
+
+ suite_category = self.test_suite
+ parser = self.get_test_output_parser(
+ suite_category,
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=[],
+ )
+ self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ 0, previous_summary=summary
+ )
+ parser.append_tinderboxprint_line(self.test_suite)
+
+ self.info("##### %s log ends" % self.test_suite)
+
+ if len(per_test_args) > 0:
+ self.record_status(tbpl_status, level=log_level)
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+ else:
+ self.record_status(tbpl_status, level=log_level)
+ # report as INFO instead of log_level to avoid extra Treeherder lines
+ self.info(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ )
+
+
+if __name__ == "__main__":
+ test = AndroidEmulatorTest()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_hardware_unittest.py b/testing/mozharness/scripts/android_hardware_unittest.py
new file mode 100644
index 0000000000..065d74557b
--- /dev/null
+++ b/testing/mozharness/scripts/android_hardware_unittest.py
@@ -0,0 +1,477 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import datetime
+import json
+import os
+import subprocess
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.log import WARNING
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import CodeCoverageMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+SUITE_DEFAULT_E10S = ["geckoview-junit", "mochitest", "reftest"]
+SUITE_NO_E10S = ["cppunittest", "gtest", "jittest"]
+SUITE_REPEATABLE = ["mochitest", "reftest", "xpcshell"]
+
+
+class AndroidHardwareTest(
+ TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMixin, AndroidMixin
+):
+ config_options = [
+ [["--test-suite"], {"action": "store", "dest": "test_suite", "default": None}],
+ [
+ ["--adb-path"],
+ {
+ "action": "store",
+ "dest": "adb_path",
+ "default": None,
+ "help": "Path to adb",
+ },
+ ],
+ [
+ ["--total-chunk"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "default": None,
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "default": None,
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--log-raw-level"],
+ {
+ "action": "store",
+ "dest": "log_raw_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--log-tbpl-level"],
+ {
+ "action": "store",
+ "dest": "log_tbpl_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run with Fission disabled.",
+ },
+ ],
+ [
+ ["--repeat"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ [
+ "--setpref",
+ ],
+ {
+ "action": "append",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--jittest-flags"],
+ {
+ "action": "store",
+ "dest": "jittest_flags",
+ "default": "debug",
+ "help": "Flags to run with jittest (all, debug, etc.).",
+ },
+ ],
+ ] + copy.deepcopy(testing_config_options)
+
+ def __init__(self, require_config_file=False):
+ super(AndroidHardwareTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ # IP address of the host as seen from the device.
+ "remote_webserver": os.environ["HOST_IP"],
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.test_manifest = c.get("test_manifest")
+ suite = c.get("test_suite")
+ self.test_suite = suite
+ self.this_chunk = c.get("this_chunk")
+ self.total_chunks = c.get("total_chunks")
+ self.xre_path = None
+ self.log_raw_level = c.get("log_raw_level")
+ self.log_tbpl_level = c.get("log_tbpl_level")
+ self.disable_e10s = c.get("disable_e10s")
+ self.disable_fission = c.get("disable_fission")
+ self.extra_prefs = c.get("extra_prefs")
+ self.jittest_flags = c.get("jittest_flags")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidHardwareTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests", "bin"
+ )
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_modules_dir"] = os.path.join(dirs["abs_test_install_dir"], "modules")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def _query_tests_dir(self):
+ dirs = self.query_abs_dirs()
+ try:
+ test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
+ except Exception:
+ test_dir = self.test_suite
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def _build_command(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("Key '%s' not defined in the config!" % self.test_suite)
+
+ cmd = [
+ self.query_python_path("python"),
+ "-u",
+ os.path.join(
+ self._query_tests_dir(),
+ self.config["suite_definitions"][self.test_suite]["run_filename"],
+ ),
+ ]
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], self.test_suite
+ )
+
+ str_format_values = {
+ "device_serial": self.device_serial,
+ "remote_webserver": c["remote_webserver"],
+ "xre_path": self.xre_path,
+ "utility_path": self.xre_path,
+ "http_port": "8854", # starting http port to use for the mochitest server
+ "ssl_port": "4454", # starting ssl port to use for the server
+ "certs_path": os.path.join(dirs["abs_work_dir"], "tests/certs"),
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ "symbols_path": self.symbols_path,
+ "modules_dir": dirs["abs_modules_dir"],
+ "installer_path": self.installer_path,
+ "raw_log_file": raw_log_file,
+ "log_tbpl_level": self.log_tbpl_level,
+ "log_raw_level": self.log_raw_level,
+ "error_summary_file": error_summary_file,
+ "xpcshell_extra": c.get("xpcshell_extra", ""),
+ "jittest_flags": self.jittest_flags,
+ }
+
+ user_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ for option in self.config["suite_definitions"][self.test_suite]["options"]:
+ opt = option.split("=")[0]
+ # override configured chunk options with script args, if specified
+ if opt in ("--this-chunk", "--total-chunks"):
+ if (
+ user_paths
+ or getattr(self, opt.replace("-", "_").strip("_"), None) is not None
+ ):
+ continue
+
+ if "%(app)" in option:
+ # only query package name if requested
+ cmd.extend([option % {"app": self.query_package_name()}])
+ else:
+ option = option % str_format_values
+ if option:
+ cmd.extend([option])
+
+ if user_paths:
+ if self.test_suite in user_paths:
+ cmd.extend(user_paths[self.test_suite])
+ elif not self.verify_enabled:
+ if self.this_chunk is not None:
+ cmd.extend(["--this-chunk", self.this_chunk])
+ if self.total_chunks is not None:
+ cmd.extend(["--total-chunks", self.total_chunks])
+
+ if "mochitest" in self.test_suite:
+ category = "mochitest"
+ elif "reftest" in self.test_suite or "crashtest" in self.test_suite:
+ category = "reftest"
+ else:
+ category = self.test_suite
+ if c.get("repeat"):
+ if category in SUITE_REPEATABLE:
+ cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log("--repeat not supported in {}".format(category), level=WARNING)
+
+ if category not in SUITE_NO_E10S:
+ if category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ cmd.append("--disable-e10s")
+ elif category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ cmd.append("--e10s")
+
+ if self.disable_fission and category not in SUITE_NO_E10S:
+ cmd.append("--disable-fission")
+
+ cmd.extend(["--setpref={}".format(p) for p in self.extra_prefs])
+
+ try_options, try_tests = self.try_args(self.test_suite)
+ cmd.extend(try_options)
+ if not self.verify_enabled and not self.per_test_coverage:
+ cmd.extend(
+ self.query_tests_args(
+ self.config["suite_definitions"][self.test_suite].get("tests"),
+ None,
+ try_tests,
+ )
+ )
+
+ return cmd
+
+ def _query_suites(self):
+ if self.test_suite:
+ return [(self.test_suite, self.test_suite)]
+ # per-test mode: determine test suites to run
+ all = [
+ (
+ "mochitest",
+ {
+ "mochitest-plain": "mochitest-plain",
+ "mochitest-plain-gpu": "mochitest-plain-gpu",
+ },
+ ),
+ ("reftest", {"reftest": "reftest", "crashtest": "crashtest"}),
+ ("xpcshell", {"xpcshell": "xpcshell"}),
+ ]
+ suites = []
+ for (category, all_suites) in all:
+ cat_suites = self.query_per_test_category_suites(category, all_suites)
+ for k in cat_suites.keys():
+ suites.append((k, cat_suites[k]))
+ return suites
+
+ def _query_suite_categories(self):
+ if self.test_suite:
+ categories = [self.test_suite]
+ else:
+ # per-test mode
+ categories = ["mochitest", "reftest", "xpcshell"]
+ return categories
+
+ ##########################################
+ # Actions for AndroidHardwareTest #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = None
+ suites = self._query_suites()
+ if ("mochitest-media", "mochitest-media") in suites:
+ # mochitest-media is the only thing that needs this
+ requirements = os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ "websocketprocessbridge_requirements_3.txt",
+ )
+ if requirements:
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """
+ Download and extract product APK, tests.zip, and host utils.
+ """
+ super(AndroidHardwareTest, self).download_and_extract(
+ suite_categories=self._query_suite_categories()
+ )
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ install_needed = (not self.test_suite) or self.config["suite_definitions"][
+ self.test_suite
+ ].get("install")
+ if install_needed is False:
+ self.info("Skipping apk installation for %s" % self.test_suite)
+ return
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.uninstall_android_app()
+ self.install_android_app(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_name)
+
+ def run_tests(self):
+ """
+ Run the tests
+ """
+ self.start_time = datetime.datetime.now()
+ max_per_test_time = datetime.timedelta(minutes=60)
+
+ per_test_args = []
+ suites = self._query_suites()
+ minidump = self.query_minidump_stackwalk()
+ for (per_test_suite, suite) in suites:
+ self.test_suite = suite
+
+ try:
+ cwd = self._query_tests_dir()
+ except Exception:
+ self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
+ env = self.query_env()
+ if minidump:
+ env["MINIDUMP_STACKWALK"] = minidump
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+
+ summary = None
+ for per_test_args in self.query_args(per_test_suite):
+ if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: "
+ "Not all tests were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return
+
+ cmd = self._build_command()
+ final_cmd = copy.copy(cmd)
+ if len(per_test_args) > 0:
+ # in per-test mode, remove any chunk arguments from command
+ for arg in final_cmd:
+ if "total-chunk" in arg or "this-chunk" in arg:
+ final_cmd.remove(arg)
+ final_cmd.extend(per_test_args)
+
+ self.info(
+ "Running on %s the command %s"
+ % (self.device_name, subprocess.list2cmdline(final_cmd))
+ )
+ self.info("##### %s log begins" % self.test_suite)
+
+ suite_category = self.test_suite
+ parser = self.get_test_output_parser(
+ suite_category,
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=[],
+ )
+ self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
+ tbpl_status, log_level, summary = parser.evaluate_parser(0, summary)
+ parser.append_tinderboxprint_line(self.test_suite)
+
+ self.info("##### %s log ends" % self.test_suite)
+
+ if len(per_test_args) > 0:
+ self.record_status(tbpl_status, level=log_level)
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+ else:
+ self.record_status(tbpl_status, level=log_level)
+ # report as INFO instead of log_level to avoid extra Treeherder lines
+ self.info(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ )
+
+
+if __name__ == "__main__":
+ test = AndroidHardwareTest()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_wrench.py b/testing/mozharness/scripts/android_wrench.py
new file mode 100644
index 0000000000..623371431c
--- /dev/null
+++ b/testing/mozharness/scripts/android_wrench.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import datetime
+import enum
+import os
+import subprocess
+import sys
+import tempfile
+import time
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_FAILURE
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin
+
+
+class TestMode(enum.Enum):
+ OPTIMIZED_SHADER_COMPILATION = 0
+ UNOPTIMIZED_SHADER_COMPILATION = 1
+ SHADER_TEST = 2
+ REFTEST = 3
+
+
+class AndroidWrench(TestingMixin, BaseScript, MozbaseMixin, AndroidMixin):
+ def __init__(self, require_config_file=False):
+ # code in BaseScript.__init__ iterates all the properties to attach
+ # pre- and post-flight listeners, so we need _is_emulator be defined
+ # before that happens. Doesn't need to be a real value though.
+ self._is_emulator = None
+
+ # Directory for wrench input and output files. Note that we hard-code
+ # the path here, rather than using something like self.device.test_root,
+ # because it needs to be kept in sync with the path hard-coded inside
+ # the wrench source code.
+ self.wrench_dir = "/data/data/org.mozilla.wrench/files/wrench"
+
+ super(AndroidWrench, self).__init__()
+
+ # Override AndroidMixin's use_root to ensure we use run-as instead of
+ # root to push and pull files from the device, as the latter fails due
+ # to permission errors on recent Android versions.
+ self.use_root = False
+
+ if self.device_serial is None:
+ # Running on an emulator.
+ self._is_emulator = True
+ self.device_serial = "emulator-5554"
+ self.use_gles3 = True
+ else:
+ # Running on a device, ensure self.is_emulator returns False.
+ # The adb binary is preinstalled on the bitbar image and is
+ # already on the $PATH.
+ self._is_emulator = False
+ self._adb_path = "adb"
+ self._errored = False
+
+ @property
+ def is_emulator(self):
+ """Overrides the is_emulator property on AndroidMixin."""
+ if self._is_emulator is None:
+ self._is_emulator = self.device_serial is None
+ return self._is_emulator
+
+ def activate_virtualenv(self):
+ """Overrides the method on AndroidMixin to be a no-op, because the
+ setup for wrench doesn't require a special virtualenv."""
+ pass
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = {}
+
+ abs_dirs["abs_work_dir"] = os.path.expanduser("~/.wrench")
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ # In automation use the standard work dir if there is one
+ parent_abs_dirs = super(AndroidWrench, self).query_abs_dirs()
+ if "abs_work_dir" in parent_abs_dirs:
+ abs_dirs["abs_work_dir"] = parent_abs_dirs["abs_work_dir"]
+
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(abs_dirs["abs_work_dir"], "logs")
+ abs_dirs["abs_apk_path"] = os.environ.get(
+ "WRENCH_APK", "gfx/wr/target/debug/apk/wrench.apk"
+ )
+ abs_dirs["abs_reftests_path"] = os.environ.get(
+ "WRENCH_REFTESTS", "gfx/wr/wrench/reftests"
+ )
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
+ work_dir = (
+ fetches_dir
+ if fetches_dir and self.is_emulator
+ else abs_dirs["abs_work_dir"]
+ )
+ abs_dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ abs_dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ abs_dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+ else:
+ mozbuild_path = os.environ.get(
+ "MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild")
+ )
+ mozbuild_sdk = os.environ.get(
+ "ANDROID_SDK_HOME", os.path.join(mozbuild_path, "android-sdk-linux")
+ )
+ abs_dirs["abs_sdk_dir"] = mozbuild_sdk
+ avds_dir = os.environ.get(
+ "ANDROID_EMULATOR_HOME", os.path.join(mozbuild_path, "android-device")
+ )
+ abs_dirs["abs_avds_dir"] = avds_dir
+ abs_dirs["abs_bundletool_path"] = os.path.join(
+ mozbuild_path, "bundletool.jar"
+ )
+
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def logcat_start(self):
+ """Overrides logcat_start in android.py - ensures any pre-existing logcat
+ is cleared before starting to record the new logcat. This is helpful
+ when running multiple times in a local emulator."""
+ logcat_cmd = [self.adb_path, "-s", self.device_serial, "logcat", "-c"]
+ self.info(" ".join(logcat_cmd))
+ subprocess.check_call(logcat_cmd)
+ super(AndroidWrench, self).logcat_start()
+
+ def wait_until_process_done(self, process_name, timeout):
+ """Waits until the specified process has exited. Polls the process list
+ every 5 seconds until the process disappears.
+
+ :param process_name: string containing the package name of the
+ application.
+ :param timeout: integer specifying the maximum time in seconds
+ to wait for the application to finish.
+ :returns: boolean - True if the process exited within the indicated
+ timeout, False if the process had not exited by the timeout.
+ """
+ end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
+ while self.device.process_exist(process_name, timeout=timeout):
+ if datetime.datetime.now() > end_time:
+ stop_cmd = [
+ self.adb_path,
+ "-s",
+ self.device_serial,
+ "shell",
+ "am",
+ "force-stop",
+ process_name,
+ ]
+ subprocess.check_call(stop_cmd)
+ return False
+ time.sleep(5)
+
+ return True
+
+ def setup_sdcard(self, test_mode):
+ self.device.rm(self.wrench_dir, recursive=True, force=True)
+ self.device.mkdir(self.wrench_dir, parents=True)
+ if test_mode == TestMode.REFTEST:
+ self.device.push(
+ self.query_abs_dirs()["abs_reftests_path"],
+ self.wrench_dir + "/reftests",
+ )
+ args_file = os.path.join(self.query_abs_dirs()["abs_work_dir"], "wrench_args")
+ with open(args_file, "w") as argfile:
+ if self.is_emulator:
+ argfile.write("env: WRENCH_REFTEST_CONDITION_EMULATOR=1\n")
+ else:
+ argfile.write("env: WRENCH_REFTEST_CONDITION_DEVICE=1\n")
+ if test_mode == TestMode.OPTIMIZED_SHADER_COMPILATION:
+ argfile.write("--precache test_init")
+ elif test_mode == TestMode.UNOPTIMIZED_SHADER_COMPILATION:
+ argfile.write("--precache --use-unoptimized-shaders test_init")
+ elif test_mode == TestMode.SHADER_TEST:
+ argfile.write("--precache test_shaders")
+ elif test_mode == TestMode.REFTEST:
+ argfile.write("reftest")
+ self.device.push(args_file, self.wrench_dir + "/args")
+
+ def run_tests(self, timeout):
+ self.timed_screenshots(None)
+ self.device.launch_application(
+ app_name="org.mozilla.wrench",
+ activity_name="android.app.NativeActivity",
+ intent=None,
+ )
+ self.info("App launched")
+ done = self.wait_until_process_done("org.mozilla.wrench", timeout=timeout)
+ if not done:
+ self._errored = True
+ self.error("Wrench still running after timeout")
+
+ def scrape_log(self):
+ """Wrench dumps stdout to a file rather than logcat because logcat
+ truncates long lines, and the base64 reftest images therefore get
+ truncated. In the past we split long lines and stitched them together
+ again, but this was unreliable. This scrapes the output file and dumps
+ it into our main log.
+ """
+ logfile = tempfile.NamedTemporaryFile()
+ self.device.pull(self.wrench_dir + "/stdout", logfile.name)
+ with open(logfile.name, "r", encoding="utf-8") as f:
+ self.info("=== scraped log output ===")
+ for line in f:
+ if "UNEXPECTED-FAIL" in line or "panicked" in line:
+ self._errored = True
+ self.error(line)
+ else:
+ self.info(line)
+ self.info("=== end scraped log output ===")
+
+ def setup_emulator(self):
+ avds_dir = self.query_abs_dirs()["abs_avds_dir"]
+ if not os.path.exists(avds_dir):
+ self.error("Unable to find android AVDs at %s" % avds_dir)
+ return
+
+ sdk_path = self.query_abs_dirs()["abs_sdk_dir"]
+ if not os.path.exists(sdk_path):
+ self.error("Unable to find android SDK at %s" % sdk_path)
+ return
+ self.start_emulator()
+
+ def do_test(self):
+ if self.is_emulator:
+ self.setup_emulator()
+
+ self.verify_device()
+ self.info("Logging device properties...")
+ self.info(self.shell_output("getprop"))
+ self.info("Installing APK...")
+ self.install_android_app(self.query_abs_dirs()["abs_apk_path"], replace=True)
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.OPTIMIZED_SHADER_COMPILATION)
+ self.info("Running optimized shader compilation tests...")
+ self.run_tests(60)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.UNOPTIMIZED_SHADER_COMPILATION)
+ self.info("Running unoptimized shader compilation tests...")
+ self.run_tests(60)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.SHADER_TEST)
+ self.info("Running shader tests...")
+ self.run_tests(60 * 5)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.REFTEST)
+ self.info("Running reftests...")
+ self.run_tests(60 * 30)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ self.logcat_stop()
+ self.info("All done!")
+
+ def check_errors(self):
+ if self._errored:
+ self.info("Errors encountered, terminating with error code...")
+ exit(EXIT_STATUS_DICT[TBPL_FAILURE])
+
+
+if __name__ == "__main__":
+ test = AndroidWrench()
+ test.do_test()
+ test.check_errors()
diff --git a/testing/mozharness/scripts/awsy_script.py b/testing/mozharness/scripts/awsy_script.py
new file mode 100644
index 0000000000..9071dab75d
--- /dev/null
+++ b/testing/mozharness/scripts/awsy_script.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""
+run awsy tests in a virtualenv
+"""
+
+import copy
+import json
+import os
+import re
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozharness
+import mozinfo
+from mozharness.base.log import ERROR, INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+PY2 = sys.version_info.major == 2
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+
+
+class AWSY(TestingMixin, MercurialScript, TooltoolMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s). (Desktop builds only)",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--base"],
+ {
+ "action": "store_true",
+ "dest": "test_about_blank",
+ "default": False,
+ "help": "Runs the about:blank base case memory test.",
+ },
+ ],
+ [
+ ["--dmd"],
+ {
+ "action": "store_true",
+ "dest": "dmd",
+ "default": False,
+ "help": "Runs tests with DMD enabled.",
+ },
+ ],
+ [
+ ["--tp6"],
+ {
+ "action": "store_true",
+ "dest": "tp6",
+ "default": False,
+ "help": "Runs tests with the tp6 pageset.",
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ error_list = [
+ {"regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""), "level": ERROR},
+ ]
+
+ def __init__(self, **kwargs):
+
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(AWSY, self).__init__(**kwargs)
+ self.installer_url = self.config.get("installer_url")
+ self.tests = None
+
+ self.testdir = self.query_abs_dirs()["abs_test_install_dir"]
+ self.awsy_path = os.path.join(self.testdir, "awsy")
+ self.awsy_libdir = os.path.join(self.awsy_path, "awsy")
+ self.webroot_dir = os.path.join(self.testdir, "html")
+ self.results_dir = os.path.join(self.testdir, "results")
+ self.binary_path = self.config.get("binary_path")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AWSY, self).query_abs_dirs()
+
+ dirs = {}
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ ret = super(AWSY, self).download_and_extract(
+ suite_categories=["common", "awsy"]
+ )
+ return ret
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ requirements_files = [
+ os.path.join(self.testdir, "config", "marionette_requirements.txt")
+ ]
+
+ for requirements_file in requirements_files:
+ self.register_virtualenv_module(
+ requirements=[requirements_file], two_pass=True
+ )
+
+ self.register_virtualenv_module("awsy", self.awsy_path)
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.info("Downloading pageset with tooltool...")
+ manifest_file = os.path.join(self.awsy_path, "tp5n-pageset.manifest")
+ page_load_test_dir = os.path.join(self.webroot_dir, "page_load_test")
+ if not os.path.isdir(page_load_test_dir):
+ self.mkdir_p(page_load_test_dir)
+ self.tooltool_fetch(
+ manifest_file,
+ output_dir=page_load_test_dir,
+ cache=self.config.get("tooltool_cache"),
+ )
+ archive = os.path.join(page_load_test_dir, "tp5n.zip")
+ unzip = self.query_exe("unzip")
+ unzip_cmd = [unzip, "-q", "-o", archive, "-d", page_load_test_dir]
+ self.run_command(unzip_cmd, halt_on_failure=False)
+ self.run_command("ls %s" % page_load_test_dir)
+
+ def run_tests(self, args=None, **kw):
+ """
+ AWSY test should be implemented here
+ """
+ dirs = self.abs_dirs
+ env = {}
+ error_summary_file = os.path.join(
+ dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
+ )
+
+ runtime_testvars = {
+ "webRootDir": self.webroot_dir,
+ "resultsDir": self.results_dir,
+ "bin": self.binary_path,
+ }
+
+ # Check if this is a DMD build and if so enable it.
+ dmd_enabled = False
+ dmd_py_lib_dir = os.path.dirname(self.binary_path)
+ if mozinfo.os == "mac":
+ # On mac binary is in MacOS and dmd.py is in Resources, ie:
+ # Name.app/Contents/MacOS/libdmd.dylib
+ # Name.app/Contents/Resources/dmd.py
+ dmd_py_lib_dir = os.path.join(dmd_py_lib_dir, "../Resources/")
+
+ dmd_path = os.path.join(dmd_py_lib_dir, "dmd.py")
+ if self.config["dmd"] and os.path.isfile(dmd_path):
+ dmd_enabled = True
+ runtime_testvars["dmd"] = True
+
+ # Allow the child process to import dmd.py
+ python_path = os.environ.get("PYTHONPATH")
+
+ if python_path:
+ os.environ["PYTHONPATH"] = "%s%s%s" % (
+ python_path,
+ os.pathsep,
+ dmd_py_lib_dir,
+ )
+ else:
+ os.environ["PYTHONPATH"] = dmd_py_lib_dir
+
+ env["DMD"] = "--mode=dark-matter --stacks=full"
+
+ runtime_testvars["tp6"] = self.config["tp6"]
+ if self.config["tp6"]:
+ # mitmproxy needs path to mozharness when installing the cert, and tooltool
+ env["SCRIPTSPATH"] = scripts_path
+ env["EXTERNALTOOLSPATH"] = external_tools_path
+
+ runtime_testvars_path = os.path.join(self.awsy_path, "runtime-testvars.json")
+ runtime_testvars_file = open(runtime_testvars_path, "wb" if PY2 else "w")
+ runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
+ runtime_testvars_file.close()
+
+ cmd = ["marionette"]
+
+ test_vars_file = None
+ if self.config["test_about_blank"]:
+ test_vars_file = "base-testvars.json"
+ else:
+ if self.config["tp6"]:
+ test_vars_file = "tp6-testvars.json"
+ else:
+ test_vars_file = "testvars.json"
+
+ cmd.append(
+ "--testvars=%s" % os.path.join(self.awsy_path, "conf", test_vars_file)
+ )
+ cmd.append("--testvars=%s" % runtime_testvars_path)
+ cmd.append("--log-raw=-")
+ cmd.append("--log-errorsummary=%s" % error_summary_file)
+ cmd.append("--binary=%s" % self.binary_path)
+ cmd.append("--profile=%s" % (os.path.join(dirs["abs_work_dir"], "profile")))
+ if not self.config["e10s"]:
+ cmd.append("--disable-e10s")
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+ cmd.append(
+ "--gecko-log=%s" % os.path.join(dirs["abs_blob_upload_dir"], "gecko.log")
+ )
+ # TestingMixin._download_and_extract_symbols() should set
+ # self.symbols_path
+ cmd.append("--symbols-path=%s" % self.symbols_path)
+
+ if self.config["test_about_blank"]:
+ test_file = os.path.join(self.awsy_libdir, "test_base_memory_usage.py")
+ prefs_file = "base-prefs.json"
+ else:
+ test_file = os.path.join(self.awsy_libdir, "test_memory_usage.py")
+ if self.config["tp6"]:
+ prefs_file = "tp6-prefs.json"
+ else:
+ prefs_file = "prefs.json"
+
+ cmd.append(
+ "--preferences=%s" % os.path.join(self.awsy_path, "conf", prefs_file)
+ )
+ if dmd_enabled:
+ cmd.append("--setpref=security.sandbox.content.level=0")
+ cmd.append("--setpref=layout.css.stylo-threads=4")
+
+ cmd.append(test_file)
+
+ env["MOZ_UPLOAD_DIR"] = dirs["abs_blob_upload_dir"]
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ env["MINIDUMP_SAVE_PATH"] = dirs["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "1"
+ env = self.query_env(partial_env=env)
+ parser = StructuredOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=self.error_list,
+ strict=False,
+ )
+ return_code = self.run_command(
+ command=cmd,
+ cwd=self.awsy_path,
+ output_timeout=self.config.get("cmd_timeout"),
+ env=env,
+ output_parser=parser,
+ )
+
+ level = INFO
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code=return_code
+ )
+
+ self.log(
+ "AWSY exited with return code %s: %s" % (return_code, tbpl_status),
+ level=level,
+ )
+ self.record_status(tbpl_status)
+
+
+if __name__ == "__main__":
+ awsy_test = AWSY()
+ awsy_test.run_and_exit()
diff --git a/testing/mozharness/scripts/configtest.py b/testing/mozharness/scripts/configtest.py
new file mode 100755
index 0000000000..f846cba0d4
--- /dev/null
+++ b/testing/mozharness/scripts/configtest.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""configtest.py
+
+Verify the .json and .py files in the configs/ directory are well-formed.
+Further tests to verify validity would be desirable.
+
+This is also a good example script to look at to understand mozharness.
+"""
+
+import os
+import pprint
+import sys
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+
+
+# ConfigTest {{{1
+class ConfigTest(BaseScript):
+ config_options = [
+ [
+ [
+ "--test-file",
+ ],
+ {
+ "action": "extend",
+ "dest": "test_files",
+ "help": "Specify which config files to test",
+ },
+ ]
+ ]
+
+ def __init__(self, require_config_file=False):
+ self.config_files = []
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ all_actions=[
+ "list-config-files",
+ "test-json-configs",
+ "test-python-configs",
+ "summary",
+ ],
+ default_actions=[
+ "test-json-configs",
+ "test-python-configs",
+ "summary",
+ ],
+ require_config_file=require_config_file,
+ )
+
+ def query_config_files(self):
+ """This query method, much like others, caches its runtime
+ settings in self.VAR so we don't have to figure out config_files
+ multiple times.
+ """
+ if self.config_files:
+ return self.config_files
+ c = self.config
+ if "test_files" in c:
+ self.config_files = c["test_files"]
+ return self.config_files
+ self.debug(
+ "No --test-file(s) specified; defaulting to crawling the configs/ directory."
+ )
+ config_files = []
+ for root, dirs, files in os.walk(os.path.join(sys.path[0], "..", "configs")):
+ for name in files:
+ # Hardcode =P
+ if name.endswith(".json") or name.endswith(".py"):
+ if not name.startswith("test_malformed"):
+ config_files.append(os.path.join(root, name))
+ self.config_files = config_files
+ return self.config_files
+
+ def list_config_files(self):
+ """Non-default action that is mainly here to demonstrate how
+ non-default actions work in a mozharness script.
+ """
+ config_files = self.query_config_files()
+ for config_file in config_files:
+ self.info(config_file)
+
+ def test_json_configs(self):
+ """Currently only "is this well-formed json?" """
+ config_files = self.query_config_files()
+ filecount = [0, 0]
+ for config_file in config_files:
+ if config_file.endswith(".json"):
+ filecount[0] += 1
+ self.info("Testing %s." % config_file)
+ contents = self.read_from_file(config_file, verbose=False)
+ try:
+ json.loads(contents)
+ except ValueError:
+ self.add_summary("%s is invalid json." % config_file, level="error")
+ self.error(pprint.pformat(sys.exc_info()[1]))
+ else:
+ self.info("Good.")
+ filecount[1] += 1
+ if filecount[0]:
+ self.add_summary(
+ "%d of %d json config files were good." % (filecount[1], filecount[0])
+ )
+ else:
+ self.add_summary("No json config files to test.")
+
+ def test_python_configs(self):
+ """Currently only "will this give me a config dictionary?" """
+ config_files = self.query_config_files()
+ filecount = [0, 0]
+ for config_file in config_files:
+ if config_file.endswith(".py"):
+ filecount[0] += 1
+ self.info("Testing %s." % config_file)
+ global_dict = {}
+ local_dict = {}
+ try:
+ with open(config_file, "r") as f:
+ exec(f.read(), global_dict, local_dict)
+ except Exception:
+ self.add_summary(
+ "%s is invalid python." % config_file, level="error"
+ )
+ self.error(pprint.pformat(sys.exc_info()[1]))
+ else:
+ if "config" in local_dict and isinstance(
+ local_dict["config"], dict
+ ):
+ self.info("Good.")
+ filecount[1] += 1
+ else:
+ self.add_summary(
+ "%s is valid python, "
+ "but doesn't create a config dictionary." % config_file,
+ level="error",
+ )
+ if filecount[0]:
+ self.add_summary(
+ "%d of %d python config files were good." % (filecount[1], filecount[0])
+ )
+ else:
+ self.add_summary("No python config files to test.")
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ config_test = ConfigTest()
+ config_test.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_l10n.py b/testing/mozharness/scripts/desktop_l10n.py
new file mode 100755
index 0000000000..6e401caa8b
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_l10n.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_l10n.py
+
+This script manages Desktop repacks for nightly builds.
+"""
+import glob
+import os
+import shlex
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0])) # noqa
+
+from mozharness.base.errors import MakefileErrorList
+from mozharness.base.script import BaseScript
+from mozharness.base.vcs.vcsbase import VCSMixin
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.building.buildbase import (
+ MakeUploadOutputParser,
+ get_mozconfig_path,
+)
+from mozharness.mozilla.l10n.locales import LocalesMixin
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+
+# needed by _map
+SUCCESS = 0
+FAILURE = 1
+
+SUCCESS_STR = "Success"
+FAILURE_STR = "Failed"
+
+
+# DesktopSingleLocale {{{1
+class DesktopSingleLocale(LocalesMixin, AutomationMixin, VCSMixin, BaseScript):
+ """Manages desktop repacks"""
+
+ config_options = [
+ [
+ [
+ "--locale",
+ ],
+ {
+ "action": "extend",
+ "dest": "locales",
+ "type": "string",
+ "help": "Specify the locale(s) to sign and update. Optionally pass"
+ " revision separated by colon, en-GB:default.",
+ },
+ ],
+ [
+ [
+ "--tag-override",
+ ],
+ {
+ "action": "store",
+ "dest": "tag_override",
+ "type": "string",
+ "help": "Override the tags set for all repos",
+ },
+ ],
+ [
+ [
+ "--en-us-installer-url",
+ ],
+ {
+ "action": "store",
+ "dest": "en_us_installer_url",
+ "type": "string",
+ "help": "Specify the url of the en-us binary",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ # fxbuild style:
+ buildscript_kwargs = {
+ "all_actions": [
+ "clone-locales",
+ "list-locales",
+ "setup",
+ "repack",
+ "summary",
+ ],
+ "config": {
+ "ignore_locales": ["en-US"],
+ "locales_dir": "browser/locales",
+ "log_name": "single_locale",
+ "hg_l10n_base": "https://hg.mozilla.org/l10n-central",
+ },
+ }
+
+ LocalesMixin.__init__(self)
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ **buildscript_kwargs
+ )
+
+ self.bootstrap_env = None
+ self.upload_env = None
+ self.upload_urls = {}
+ self.pushdate = None
+ # upload_files is a dictionary of files to upload, keyed by locale.
+ self.upload_files = {}
+
+ # Helper methods {{{2
+ def query_bootstrap_env(self):
+ """returns the env for repacks"""
+ if self.bootstrap_env:
+ return self.bootstrap_env
+ config = self.config
+ abs_dirs = self.query_abs_dirs()
+
+ bootstrap_env = self.query_env(
+ partial_env=config.get("bootstrap_env"), replace_dict=abs_dirs
+ )
+
+ bootstrap_env["L10NBASEDIR"] = abs_dirs["abs_l10n_dir"]
+ if self.query_is_nightly():
+ # we might set update_channel explicitly
+ if config.get("update_channel"):
+ update_channel = config["update_channel"]
+ else: # Let's just give the generic channel based on branch.
+ update_channel = "nightly-%s" % (config["branch"],)
+ if not isinstance(update_channel, bytes):
+ update_channel = update_channel.encode("utf-8")
+ bootstrap_env["MOZ_UPDATE_CHANNEL"] = update_channel
+ self.info(
+ "Update channel set to: {}".format(bootstrap_env["MOZ_UPDATE_CHANNEL"])
+ )
+ self.bootstrap_env = bootstrap_env
+ return self.bootstrap_env
+
+ def _query_upload_env(self):
+ """returns the environment used for the upload step"""
+ if self.upload_env:
+ return self.upload_env
+ config = self.config
+
+ upload_env = self.query_env(partial_env=config.get("upload_env"))
+ # check if there are any extra option from the platform configuration
+ # and append them to the env
+
+ if "upload_env_extra" in config:
+ for extra in config["upload_env_extra"]:
+ upload_env[extra] = config["upload_env_extra"][extra]
+
+ self.upload_env = upload_env
+ return self.upload_env
+
+ def query_l10n_env(self):
+ l10n_env = self._query_upload_env().copy()
+ l10n_env.update(self.query_bootstrap_env())
+ return l10n_env
+
+ def _query_make_variable(self, variable, make_args=None):
+ """returns the value of make echo-variable-<variable>
+ it accepts extra make arguements (make_args)
+ """
+ dirs = self.query_abs_dirs()
+ make_args = make_args or []
+ target = ["echo-variable-%s" % variable] + make_args
+ cwd = dirs["abs_locales_dir"]
+ raw_output = self._get_output_from_make(
+ target, cwd=cwd, env=self.query_bootstrap_env()
+ )
+ # we want to log all the messages from make
+ output = []
+ for line in raw_output.split("\n"):
+ output.append(line.strip())
+ output = " ".join(output).strip()
+ self.info("echo-variable-%s: %s" % (variable, output))
+ return output
+
+ def _map(self, func, items):
+ """runs func for any item in items, calls the add_failure() for each
+ error. It assumes that function returns 0 when successful.
+ returns a two element tuple with (success_count, total_count)"""
+ success_count = 0
+ total_count = len(items)
+ name = func.__name__
+ for item in items:
+ result = func(item)
+ if result == SUCCESS:
+ # success!
+ success_count += 1
+ else:
+ # func failed...
+ message = "failure: %s(%s)" % (name, item)
+ self.add_failure(item, message)
+ return (success_count, total_count)
+
+ # Actions {{{2
+ def clone_locales(self):
+ self.pull_locale_source()
+
+ def setup(self):
+ """setup step"""
+ self._run_tooltool()
+ self._copy_mozconfig()
+ self._mach_configure()
+ self._run_make_in_config_dir()
+ self.make_wget_en_US()
+ self.make_unpack_en_US()
+
+ def _run_make_in_config_dir(self):
+ """this step creates nsinstall, needed my make_wget_en_US()"""
+ dirs = self.query_abs_dirs()
+ config_dir = os.path.join(dirs["abs_obj_dir"], "config")
+ env = self.query_bootstrap_env()
+ return self._make(target=["export"], cwd=config_dir, env=env)
+
+ def _copy_mozconfig(self):
+ """copies the mozconfig file into abs_src_dir/.mozconfig
+ and logs the content
+ """
+ config = self.config
+ dirs = self.query_abs_dirs()
+ src = get_mozconfig_path(self, config, dirs)
+ dst = os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ self.copyfile(src, dst)
+ self.read_from_file(dst, verbose=True)
+
+ def _mach(self, target, env, halt_on_failure=True, output_parser=None):
+ dirs = self.query_abs_dirs()
+ mach = self._get_mach_executable()
+ return self.run_command(
+ mach + target,
+ halt_on_failure=True,
+ env=env,
+ cwd=dirs["abs_src_dir"],
+ output_parser=None,
+ )
+
+ def _mach_configure(self):
+ """calls mach configure"""
+ env = self.query_bootstrap_env()
+ target = ["configure"]
+ return self._mach(target=target, env=env)
+
+ def _get_mach_executable(self):
+ return [sys.executable, "mach"]
+
+ def _make(
+ self,
+ target,
+ cwd,
+ env,
+ error_list=MakefileErrorList,
+ halt_on_failure=True,
+ output_parser=None,
+ ):
+ """Runs make. Returns the exit code"""
+ make = ["make"]
+ if target:
+ make = make + target
+ return self.run_command(
+ make,
+ cwd=cwd,
+ env=env,
+ error_list=error_list,
+ halt_on_failure=halt_on_failure,
+ output_parser=output_parser,
+ )
+
+ def _get_output_from_make(
+ self, target, cwd, env, halt_on_failure=True, ignore_errors=False
+ ):
+ """runs make and returns the output of the command"""
+ return self.get_output_from_command(
+ ["make"] + target,
+ cwd=cwd,
+ env=env,
+ silent=True,
+ halt_on_failure=halt_on_failure,
+ ignore_errors=ignore_errors,
+ )
+
+ def make_unpack_en_US(self):
+ """wrapper for make unpack"""
+ config = self.config
+ dirs = self.query_abs_dirs()
+ env = self.query_bootstrap_env()
+ cwd = os.path.join(dirs["abs_obj_dir"], config["locales_dir"])
+ return self._make(target=["unpack"], cwd=cwd, env=env)
+
+ def make_wget_en_US(self):
+ """wrapper for make wget-en-US"""
+ env = self.query_bootstrap_env()
+ dirs = self.query_abs_dirs()
+ cwd = dirs["abs_locales_dir"]
+ return self._make(target=["wget-en-US"], cwd=cwd, env=env)
+
+ def make_upload(self, locale):
+ """wrapper for make upload command"""
+ env = self.query_l10n_env()
+ dirs = self.query_abs_dirs()
+ target = ["upload", "AB_CD=%s" % (locale)]
+ cwd = dirs["abs_locales_dir"]
+ parser = MakeUploadOutputParser(config=self.config, log_obj=self.log_obj)
+ retval = self._make(
+ target=target, cwd=cwd, env=env, halt_on_failure=False, output_parser=parser
+ )
+ if retval == SUCCESS:
+ self.info("Upload successful (%s)" % locale)
+ ret = SUCCESS
+ else:
+ self.error("failed to upload %s" % locale)
+ ret = FAILURE
+
+ if ret == FAILURE:
+ # If we failed above, we shouldn't even attempt a SIMPLE_NAME move
+ # even if we are configured to do so
+ return ret
+
+ # XXX Move the files to a SIMPLE_NAME format until we can enable
+ # Simple names in the build system
+ if self.config.get("simple_name_move"):
+ # Assume an UPLOAD PATH
+ upload_target = self.config["upload_env"]["UPLOAD_PATH"]
+ target_path = os.path.join(upload_target, locale)
+ self.mkdir_p(target_path)
+ glob_name = "*.%s.*" % locale
+ matches = (
+ glob.glob(os.path.join(upload_target, glob_name))
+ + glob.glob(os.path.join(upload_target, "update", glob_name))
+ + glob.glob(os.path.join(upload_target, "*", "xpi", glob_name))
+ + glob.glob(os.path.join(upload_target, "install", "sea", glob_name))
+ + glob.glob(os.path.join(upload_target, "setup.exe"))
+ + glob.glob(os.path.join(upload_target, "setup-stub.exe"))
+ )
+ targets_exts = [
+ "tar.bz2",
+ "dmg",
+ "langpack.xpi",
+ "checksums",
+ "zip",
+ "installer.exe",
+ "installer-stub.exe",
+ ]
+ targets = [(".%s" % (ext,), "target.%s" % (ext,)) for ext in targets_exts]
+ targets.extend([(f, f) for f in ("setup.exe", "setup-stub.exe")])
+ for f in matches:
+ possible_targets = [
+ (tail, target_file)
+ for (tail, target_file) in targets
+ if f.endswith(tail)
+ ]
+ if len(possible_targets) == 1:
+ _, target_file = possible_targets[0]
+ # Remove from list of available options for this locale
+ targets.remove(possible_targets[0])
+ else:
+ # wasn't valid (or already matched)
+ raise RuntimeError(
+ "Unexpected matching file name encountered: %s" % f
+ )
+ self.move(os.path.join(f), os.path.join(target_path, target_file))
+ self.log("Converted uploads for %s to simple names" % locale)
+ return ret
+
+ def set_upload_files(self, locale):
+ # The tree doesn't have a good way of exporting the list of files
+ # created during locale generation, but we can grab them by echoing the
+ # UPLOAD_FILES variable for each locale.
+ env = self.query_l10n_env()
+ target = [
+ "echo-variable-UPLOAD_FILES",
+ "echo-variable-CHECKSUM_FILES",
+ "AB_CD=%s" % locale,
+ ]
+ dirs = self.query_abs_dirs()
+ cwd = dirs["abs_locales_dir"]
+ # Bug 1242771 - echo-variable-UPLOAD_FILES via mozharness fails when stderr is found
+ # we should ignore stderr as unfortunately it's expected when parsing for values
+ output = self._get_output_from_make(
+ target=target, cwd=cwd, env=env, ignore_errors=True
+ )
+ self.info('UPLOAD_FILES is "%s"' % output)
+ files = shlex.split(output)
+ if not files:
+ self.error("failed to get upload file list for locale %s" % locale)
+ return FAILURE
+
+ self.upload_files[locale] = [
+ os.path.abspath(os.path.join(cwd, f)) for f in files
+ ]
+ return SUCCESS
+
+ def make_installers(self, locale):
+ """wrapper for make installers-(locale)"""
+ env = self.query_l10n_env()
+ env["PYTHONIOENCODING"] = "utf-8"
+ self._copy_mozconfig()
+ dirs = self.query_abs_dirs()
+ cwd = os.path.join(dirs["abs_locales_dir"])
+ target = [
+ "installers-%s" % locale,
+ ]
+ return self._make(target=target, cwd=cwd, env=env, halt_on_failure=False)
+
+ def repack_locale(self, locale):
+ """wraps the logic for make installers and generating
+ complete updates."""
+
+ # run make installers
+ if self.make_installers(locale) != SUCCESS:
+ self.error("make installers-%s failed" % (locale))
+ return FAILURE
+
+ # now try to upload the artifacts
+ if self.make_upload(locale):
+ self.error("make upload for locale %s failed!" % (locale))
+ return FAILURE
+
+ # set_upload_files() should be called after make upload, to make sure
+ # we have all files in place (checksums, etc)
+ if self.set_upload_files(locale):
+ self.error("failed to get list of files to upload for locale %s" % locale)
+ return FAILURE
+
+ return SUCCESS
+
+ def repack(self):
+ """creates the repacks and udpates"""
+ self._map(self.repack_locale, self.query_locales())
+
+ def _run_tooltool(self):
+ env = self.query_bootstrap_env()
+ config = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = config.get("tooltool_manifest_src")
+ if not manifest_src:
+ return
+ python = sys.executable
+
+ cmd = [
+ python,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = config["bootstrap_env"].get("TOOLTOOL_CACHE")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True, env=env)
+
+
+# main {{{
+if __name__ == "__main__":
+ single_locale = DesktopSingleLocale()
+ single_locale.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_partner_repacks.py b/testing/mozharness/scripts/desktop_partner_repacks.py
new file mode 100755
index 0000000000..4f20663c73
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_partner_repacks.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_partner_repacks.py
+
+This script manages Desktop partner repacks for beta/release builds.
+"""
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.log import FATAL
+from mozharness.base.python import VirtualenvMixin
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.secrets import SecretsMixin
+
+
+# DesktopPartnerRepacks {{{1
+class DesktopPartnerRepacks(AutomationMixin, BaseScript, VirtualenvMixin, SecretsMixin):
+ """Manages desktop partner repacks"""
+
+ actions = [
+ "get-secrets",
+ "setup",
+ "repack",
+ "summary",
+ ]
+ config_options = [
+ [
+ ["--version", "-v"],
+ {
+ "dest": "version",
+ "help": "Version of Firefox to repack",
+ },
+ ],
+ [
+ ["--build-number", "-n"],
+ {
+ "dest": "build_number",
+ "help": "Build number of Firefox to repack",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "dest": "platform",
+ "help": "Platform to repack (e.g. linux64, macosx64, ...)",
+ },
+ ],
+ [
+ ["--partner", "-p"],
+ {
+ "dest": "partner",
+ "help": "Limit repackaging to partners matching this string",
+ },
+ ],
+ [
+ ["--taskid", "-t"],
+ {
+ "dest": "taskIds",
+ "action": "extend",
+ "help": "taskId(s) of upstream tasks for vanilla Firefox artifacts",
+ },
+ ],
+ [
+ ["--limit-locale", "-l"],
+ {
+ "dest": "limitLocales",
+ "action": "append",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ # fxbuild style:
+ buildscript_kwargs = {
+ "all_actions": DesktopPartnerRepacks.actions,
+ "default_actions": DesktopPartnerRepacks.actions,
+ "config": {
+ "log_name": "partner-repacks",
+ "hashType": "sha512",
+ "workdir": "partner-repacks",
+ },
+ }
+ #
+
+ BaseScript.__init__(
+ self, config_options=self.config_options, **buildscript_kwargs
+ )
+
+ def _pre_config_lock(self, rw_config):
+ if os.getenv("REPACK_MANIFESTS_URL"):
+ self.info(
+ "Overriding repack_manifests_url to %s"
+ % os.getenv("REPACK_MANIFESTS_URL")
+ )
+ self.config["repack_manifests_url"] = os.getenv("REPACK_MANIFESTS_URL")
+ if os.getenv("UPSTREAM_TASKIDS"):
+ self.info("Overriding taskIds with %s" % os.getenv("UPSTREAM_TASKIDS"))
+ self.config["taskIds"] = os.getenv("UPSTREAM_TASKIDS").split()
+
+ if "version" not in self.config:
+ self.fatal("Version (-v) not supplied.")
+ if "build_number" not in self.config:
+ self.fatal("Build number (-n) not supplied.")
+ if "repo_file" not in self.config:
+ self.fatal("repo_file not supplied.")
+ if "repack_manifests_url" not in self.config:
+ self.fatal(
+ "repack_manifests_url not supplied in config or via REPACK_MANIFESTS_URL"
+ )
+ if "taskIds" not in self.config:
+ self.fatal("Need upstream taskIds from command line or in UPSTREAM_TASKIDS")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(DesktopPartnerRepacks, self).query_abs_dirs()
+ for directory in abs_dirs:
+ value = abs_dirs[directory]
+ abs_dirs[directory] = value
+ dirs = {}
+ dirs["abs_repo_dir"] = os.path.join(abs_dirs["abs_work_dir"], ".repo")
+ dirs["abs_partners_dir"] = os.path.join(abs_dirs["abs_work_dir"], "partners")
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # Actions {{{
+ def _repo_cleanup(self):
+ self.rmtree(self.query_abs_dirs()["abs_repo_dir"])
+ self.rmtree(self.query_abs_dirs()["abs_partners_dir"])
+
+ def _repo_init(self, repo):
+ partial_env = {
+ "GIT_SSH_COMMAND": "ssh -oIdentityFile={}".format(self.config["ssh_key"])
+ }
+ status = self.run_command(
+ [
+ repo,
+ "init",
+ "--no-repo-verify",
+ "-u",
+ self.config["repack_manifests_url"],
+ ],
+ cwd=self.query_abs_dirs()["abs_work_dir"],
+ partial_env=partial_env,
+ )
+ if status:
+ return status
+ return self.run_command(
+ [repo, "sync", "--current-branch", "--no-tags"],
+ cwd=self.query_abs_dirs()["abs_work_dir"],
+ partial_env=partial_env,
+ )
+
+ def setup(self):
+ """setup step"""
+ repo = self.download_file(
+ self.config["repo_file"],
+ file_name="repo",
+ parent_dir=self.query_abs_dirs()["abs_work_dir"],
+ error_level=FATAL,
+ )
+ if not os.path.exists(repo):
+ self.fatal("Unable to download repo tool.")
+ self.chmod(repo, 0o755)
+ self.retry(
+ self._repo_init,
+ args=(repo,),
+ error_level=FATAL,
+ cleanup=self._repo_cleanup(),
+ good_statuses=[0],
+ sleeptime=5,
+ )
+
+ def repack(self):
+ """creates the repacks"""
+ repack_cmd = [
+ "./mach",
+ "python",
+ "python/mozrelease/mozrelease/partner_repack.py",
+ "-v",
+ self.config["version"],
+ "-n",
+ str(self.config["build_number"]),
+ ]
+ if self.config.get("platform"):
+ repack_cmd.extend(["--platform", self.config["platform"]])
+ if self.config.get("partner"):
+ repack_cmd.extend(["--partner", self.config["partner"]])
+ if self.config.get("taskIds"):
+ for taskId in self.config["taskIds"]:
+ repack_cmd.extend(["--taskid", taskId])
+ if self.config.get("limitLocales"):
+ for locale in self.config["limitLocales"]:
+ repack_cmd.extend(["--limit-locale", locale])
+
+ self.run_command(repack_cmd, cwd=os.environ["GECKO_PATH"], halt_on_failure=True)
+
+
+# main {{{
+if __name__ == "__main__":
+ partner_repacks = DesktopPartnerRepacks()
+ partner_repacks.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_unittest.py b/testing/mozharness/scripts/desktop_unittest.py
new file mode 100755
index 0000000000..336f2c752b
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_unittest.py
@@ -0,0 +1,1331 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_unittest.py
+
+author: Jordan Lund
+"""
+
+import copy
+import glob
+import imp
+import json
+import multiprocessing
+import os
+import re
+import shutil
+import sys
+from datetime import datetime, timedelta
+
+# load modules from parent dir
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(1, os.path.dirname(here))
+
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import INFO, WARNING
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import TBPL_EXCEPTION, TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
+
+SUITE_CATEGORIES = [
+ "gtest",
+ "cppunittest",
+ "jittest",
+ "mochitest",
+ "reftest",
+ "xpcshell",
+]
+SUITE_DEFAULT_E10S = ["mochitest", "reftest"]
+SUITE_NO_E10S = ["xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest", "xpcshell"]
+
+
+# DesktopUnittest {{{1
+class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ [
+ "--mochitest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_mochitest_suites",
+ "type": "string",
+ "help": "Specify which mochi suite to run. "
+ "Suites are defined in the config file.\n"
+ "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'",
+ },
+ ],
+ [
+ [
+ "--reftest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_reftest_suites",
+ "type": "string",
+ "help": "Specify which reftest suite to run. "
+ "Suites are defined in the config file.\n"
+ "Examples: 'all', 'crashplan', or 'jsreftest'",
+ },
+ ],
+ [
+ [
+ "--xpcshell-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_xpcshell_suites",
+ "type": "string",
+ "help": "Specify which xpcshell suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'xpcshell'",
+ },
+ ],
+ [
+ [
+ "--cppunittest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_cppunittest_suites",
+ "type": "string",
+ "help": "Specify which cpp unittest suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'cppunittest'",
+ },
+ ],
+ [
+ [
+ "--gtest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_gtest_suites",
+ "type": "string",
+ "help": "Specify which gtest suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'gtest'",
+ },
+ ],
+ [
+ [
+ "--jittest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_jittest_suites",
+ "type": "string",
+ "help": "Specify which jit-test suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'jittest'",
+ },
+ ],
+ [
+ [
+ "--run-all-suites",
+ ],
+ {
+ "action": "store_true",
+ "dest": "run_all_suites",
+ "default": False,
+ "help": "This will run all suites that are specified "
+ "in the config file. You do not need to specify "
+ "any other suites.\nBeware, this may take a while ;)",
+ },
+ ],
+ [
+ [
+ "--disable-e10s",
+ ],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ [
+ "--headless",
+ ],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ [
+ "--no-random",
+ ],
+ {
+ "action": "store_true",
+ "dest": "no_random",
+ "default": False,
+ "help": "Run tests with no random intermittents and bisect in case of real failure.", # NOQA: E501
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use "
+ "the GL compositor.",
+ },
+ ],
+ [
+ ["--threads"],
+ {
+ "action": "store",
+ "dest": "threads",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--gpu-required"],
+ {
+ "action": "store_true",
+ "dest": "gpu_required",
+ "default": False,
+ "help": "Run additional verification on modified tests using gpu instances.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Defines an extra user preference.",
+ },
+ ],
+ [
+ [
+ "--repeat",
+ ],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ ["--enable-xorigin-tests"],
+ {
+ "action": "store_true",
+ "dest": "enable_xorigin_tests",
+ "default": False,
+ "help": "Run tests in a cross origin iframe.",
+ },
+ ],
+ [
+ ["--enable-a11y-checks"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "a11y_checks",
+ "help": "Run tests with accessibility checks disabled.",
+ },
+ ],
+ [
+ ["--run-failures"],
+ {
+ "action": "store",
+ "default": "",
+ "type": "string",
+ "dest": "run_failures",
+ "help": "Run only failures matching keyword. "
+ "Examples: 'apple_silicon'",
+ },
+ ],
+ [
+ ["--crash-as-pass"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "crash_as_pass",
+ "help": "treat harness level crash as a pass",
+ },
+ ],
+ [
+ ["--timeout-as-pass"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "timeout_as_pass",
+ "help": "treat harness level timeout as a pass",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "disable_fission",
+ "help": "do not run tests with fission enabled.",
+ },
+ ],
+ [
+ ["--conditioned-profile"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "conditioned_profile",
+ "help": "run tests with a conditioned profile",
+ },
+ ],
+ [
+ ["--tag"],
+ {
+ "action": "append",
+ "default": [],
+ "dest": "test_tags",
+ "help": "Filter out tests that don't have the given tag. Can be used multiple "
+ "times in which case the test must contain at least one of the given tags.",
+ },
+ ],
+ [
+ ["--use-http3-server"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "useHttp3Server",
+ "help": "Whether to use the Http3 server",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=True):
+ # abs_dirs defined already in BaseScript but is here to make pylint happy
+ self.abs_dirs = None
+ super(DesktopUnittest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-pulseaudio",
+ "install",
+ "stage-files",
+ "run-tests",
+ "uninstall",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ c = self.config
+ self.global_test_options = []
+ self.installer_url = c.get("installer_url")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.symbols_url = c.get("symbols_url")
+ # this is so mozinstall in install() doesn't bug out if we don't run
+ # the download_and_extract action
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.abs_app_dir = None
+ self.abs_res_dir = None
+
+ # Construct an identifier to be used to identify Perfherder data
+ # for resource monitoring recording. This attempts to uniquely
+ # identify this test invocation configuration.
+ perfherder_parts = []
+ perfherder_options = []
+ suites = (
+ ("specified_mochitest_suites", "mochitest"),
+ ("specified_reftest_suites", "reftest"),
+ ("specified_xpcshell_suites", "xpcshell"),
+ ("specified_cppunittest_suites", "cppunit"),
+ ("specified_gtest_suites", "gtest"),
+ ("specified_jittest_suites", "jittest"),
+ )
+ for s, prefix in suites:
+ if s in c:
+ perfherder_parts.append(prefix)
+ perfherder_parts.extend(c[s])
+
+ if "this_chunk" in c:
+ perfherder_parts.append(c["this_chunk"])
+
+ if c["e10s"]:
+ perfherder_options.append("e10s")
+
+ self.resource_monitor_perfherder_id = (
+ ".".join(perfherder_parts),
+ perfherder_options,
+ )
+
+ # helper methods {{{2
+ def _pre_config_lock(self, rw_config):
+ super(DesktopUnittest, self)._pre_config_lock(rw_config)
+ c = self.config
+ if not c.get("run_all_suites"):
+ return # configs are valid
+ for category in SUITE_CATEGORIES:
+ specific_suites = c.get("specified_%s_suites" % (category))
+ if specific_suites:
+ if specific_suites != "all":
+ self.fatal(
+ "Config options are not valid. Please ensure"
+ " that if the '--run-all-suites' flag was enabled,"
+ " then do not specify to run only specific suites "
+ "like:\n '--mochitest-suite browser-chrome'"
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(DesktopUnittest, self).query_abs_dirs()
+
+ c = self.config
+ dirs = {}
+ dirs["abs_work_dir"] = abs_dirs["abs_work_dir"]
+ dirs["abs_app_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "application"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_extensions_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "extensions"
+ )
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+ dirs["abs_test_bin_plugins_dir"] = os.path.join(
+ dirs["abs_test_bin_dir"], "plugins"
+ )
+ dirs["abs_test_bin_components_dir"] = os.path.join(
+ dirs["abs_test_bin_dir"], "components"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+ dirs["abs_cppunittest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "cppunittest"
+ )
+ dirs["abs_gtest_dir"] = os.path.join(dirs["abs_test_install_dir"], "gtest")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_jittest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "jit-test", "jit-test"
+ )
+
+ if os.path.isabs(c["virtualenv_path"]):
+ dirs["abs_virtualenv_dir"] = c["virtualenv_path"]
+ else:
+ dirs["abs_virtualenv_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], c["virtualenv_path"]
+ )
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def query_abs_app_dir(self):
+ """We can't set this in advance, because OSX install directories
+ change depending on branding and opt/debug.
+ """
+ if self.abs_app_dir:
+ return self.abs_app_dir
+ if not self.binary_path:
+ self.fatal("Can't determine abs_app_dir (binary_path not set!)")
+ self.abs_app_dir = os.path.dirname(self.binary_path)
+ return self.abs_app_dir
+
+ def query_abs_res_dir(self):
+ """The directory containing resources like plugins and extensions. On
+ OSX this is Contents/Resources, on all other platforms its the same as
+ the app dir.
+
+ As with the app dir, we can't set this in advance, because OSX install
+ directories change depending on branding and opt/debug.
+ """
+ if self.abs_res_dir:
+ return self.abs_res_dir
+
+ abs_app_dir = self.query_abs_app_dir()
+ if self._is_darwin():
+ res_subdir = self.config.get("mac_res_subdir", "Resources")
+ self.abs_res_dir = os.path.join(os.path.dirname(abs_app_dir), res_subdir)
+ else:
+ self.abs_res_dir = abs_app_dir
+ return self.abs_res_dir
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ self.register_virtualenv_module(name="mock")
+ self.register_virtualenv_module(name="simplejson")
+
+ requirements_files = [
+ os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+ ]
+
+ if self._query_specified_suites("mochitest", "mochitest-media") is not None:
+ # mochitest-media is the only thing that needs this
+ requirements_files.append(
+ os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ "websocketprocessbridge_requirements_3.txt",
+ )
+ )
+
+ for requirements_file in requirements_files:
+ self.register_virtualenv_module(
+ requirements=[requirements_file], two_pass=True
+ )
+
+ _python_interp = self.query_exe("python")
+ if "win" in self.platform_name() and os.path.exists(_python_interp):
+ multiprocessing.set_executable(_python_interp)
+
+ def _query_symbols_url(self):
+ """query the full symbols URL based upon binary URL"""
+ # may break with name convention changes but is one less 'input' for script
+ if self.symbols_url:
+ return self.symbols_url
+
+ # Use simple text substitution to determine the symbols_url from the
+ # installer_url. This will not always work: For signed builds, the
+ # installer_url is likely an artifact in a signing task, which may not
+ # have a symbols artifact. It might be better to use the test target
+ # preferentially, like query_prefixed_build_dir_url() does (for future
+ # consideration, if this code proves troublesome).
+ symbols_url = None
+ self.info("finding symbols_url based upon self.installer_url")
+ if self.installer_url:
+ for ext in [".zip", ".dmg", ".tar.bz2"]:
+ if ext in self.installer_url:
+ symbols_url = self.installer_url.replace(
+ ext, ".crashreporter-symbols.zip"
+ )
+ if not symbols_url:
+ self.fatal(
+ "self.installer_url was found but symbols_url could \
+ not be determined"
+ )
+ else:
+ self.fatal("self.installer_url was not found in self.config")
+ self.info("setting symbols_url as %s" % (symbols_url))
+ self.symbols_url = symbols_url
+ return self.symbols_url
+
+ def _get_mozharness_test_paths(self, suite_category, suite):
+ test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ if "-coverage" in suite:
+ suite = suite[: suite.index("-coverage")]
+
+ if not test_paths or suite not in test_paths:
+ return None
+
+ suite_test_paths = test_paths[suite]
+
+ if suite_category == "reftest":
+ dirs = self.query_abs_dirs()
+ suite_test_paths = [
+ os.path.join(dirs["abs_reftest_dir"], "tests", p)
+ for p in suite_test_paths
+ ]
+
+ return suite_test_paths
+
+ def _query_abs_base_cmd(self, suite_category, suite):
+ if self.binary_path:
+ c = self.config
+ dirs = self.query_abs_dirs()
+ run_file = c["run_file_names"][suite_category]
+ base_cmd = [self.query_python_path("python"), "-u"]
+ base_cmd.append(os.path.join(dirs["abs_%s_dir" % suite_category], run_file))
+ abs_app_dir = self.query_abs_app_dir()
+ abs_res_dir = self.query_abs_res_dir()
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], suite
+ )
+
+ str_format_values = {
+ "binary_path": self.binary_path,
+ "symbols_path": self._query_symbols_url(),
+ "abs_work_dir": dirs["abs_work_dir"],
+ "abs_app_dir": abs_app_dir,
+ "abs_res_dir": abs_res_dir,
+ "raw_log_file": raw_log_file,
+ "error_summary_file": error_summary_file,
+ "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"),
+ }
+
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ if self.symbols_path:
+ str_format_values["symbols_path"] = self.symbols_path
+
+ if suite_category not in SUITE_NO_E10S:
+ if suite_category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ base_cmd.append("--disable-e10s")
+ elif suite_category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ base_cmd.append("--e10s")
+ if c.get("repeat"):
+ if suite_category in SUITE_REPEATABLE:
+ base_cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log(
+ "--repeat not supported in {}".format(suite_category),
+ level=WARNING,
+ )
+
+ # do not add --disable fission if we don't have --disable-e10s
+ if c["disable_fission"] and suite_category not in [
+ "gtest",
+ "cppunittest",
+ "jittest",
+ ]:
+ base_cmd.append("--disable-fission")
+
+ if c["useHttp3Server"]:
+ base_cmd.append("--use-http3-server")
+
+ # Ignore chunking if we have user specified test paths
+ if not (self.verify_enabled or self.per_test_coverage):
+ test_paths = self._get_mozharness_test_paths(suite_category, suite)
+ if test_paths:
+ base_cmd.extend(test_paths)
+ elif c.get("total_chunks") and c.get("this_chunk"):
+ base_cmd.extend(
+ [
+ "--total-chunks",
+ c["total_chunks"],
+ "--this-chunk",
+ c["this_chunk"],
+ ]
+ )
+
+ if c["no_random"]:
+ if suite_category == "mochitest":
+ base_cmd.append("--bisect-chunk=default")
+ else:
+ self.warning(
+ "--no-random does not currently work with suites other than "
+ "mochitest."
+ )
+
+ if c["headless"]:
+ base_cmd.append("--headless")
+
+ if c.get("threads"):
+ base_cmd.extend(["--threads", c["threads"]])
+
+ if c["enable_xorigin_tests"]:
+ base_cmd.append("--enable-xorigin-tests")
+
+ if suite_category not in ["cppunittest", "gtest", "jittest"]:
+ # Enable stylo threads everywhere we can. Some tests don't
+ # support --setpref, so ignore those.
+ base_cmd.append("--setpref=layout.css.stylo-threads=4")
+
+ if c["extra_prefs"]:
+ base_cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])
+
+ if c["a11y_checks"]:
+ base_cmd.append("--enable-a11y-checks")
+
+ if c["run_failures"]:
+ base_cmd.extend(["--run-failures={}".format(c["run_failures"])])
+
+ if c["timeout_as_pass"]:
+ base_cmd.append("--timeout-as-pass")
+
+ if c["crash_as_pass"]:
+ base_cmd.append("--crash-as-pass")
+
+ if c["conditioned_profile"]:
+ base_cmd.append("--conditioned-profile")
+
+ # Ensure the --tag flag and its params get passed along
+ if c["test_tags"]:
+ base_cmd.extend(["--tag={}".format(t) for t in c["test_tags"]])
+
+ if suite_category not in c["suite_definitions"]:
+ self.fatal("'%s' not defined in the config!")
+
+ if suite in (
+ "browser-chrome-coverage",
+ "xpcshell-coverage",
+ "mochitest-devtools-chrome-coverage",
+ "plain-coverage",
+ ):
+ base_cmd.append("--jscov-dir-prefix=%s" % dirs["abs_blob_upload_dir"])
+
+ options = c["suite_definitions"][suite_category]["options"]
+ if options:
+ for option in options:
+ option = option % str_format_values
+ if not option.endswith("None"):
+ base_cmd.append(option)
+ if self.structured_output(
+ suite_category, self._query_try_flavor(suite_category, suite)
+ ):
+ base_cmd.append("--log-raw=-")
+ return base_cmd
+ else:
+ self.warning(
+ "Suite options for %s could not be determined."
+ "\nIf you meant to have options for this suite, "
+ "please make sure they are specified in your "
+ "config under %s_options" % (suite_category, suite_category)
+ )
+
+ return base_cmd
+ else:
+ self.fatal(
+ "'binary_path' could not be determined.\n This should "
+ "be like '/path/build/application/firefox/firefox'"
+ "\nIf you are running this script without the 'install' "
+ "action (where binary_path is set), please ensure you are"
+ " either:\n(1) specifying it in the config file under "
+ "binary_path\n(2) specifying it on command line with the"
+ " '--binary-path' flag"
+ )
+
+ def _query_specified_suites(self, category, sub_category=None):
+ """Checks if the provided suite does indeed exist.
+
+ If at least one suite was given and if it does exist, return the suite
+ as legitimate and line it up for execution.
+
+ Otherwise, do not run any suites and return a fatal error.
+ """
+ c = self.config
+ all_suites = c.get("all_{}_suites".format(category), None)
+ specified_suites = c.get("specified_{}_suites".format(category), None)
+
+ # Bug 1603842 - disallow selection of more than 1 suite at at time
+ if specified_suites is None:
+ # Path taken by test-verify
+ return self.query_per_test_category_suites(category, all_suites)
+ if specified_suites and len(specified_suites) > 1:
+ self.fatal(
+ """Selection of multiple suites is not permitted. \
+ Please select at most 1 test suite."""
+ )
+ return
+
+ # Normal path taken by most test suites as only one suite is specified
+ suite = specified_suites[0]
+ if suite not in all_suites:
+ self.fatal("""Selected suite does not exist!""")
+
+ # allow for fine grain suite selection
+ ret_val = all_suites[suite]
+ if sub_category in all_suites:
+ if all_suites[sub_category] != ret_val:
+ return None
+
+ return {suite: ret_val}
+
+ def _query_try_flavor(self, category, suite):
+ flavors = {
+ "mochitest": [
+ ("plain.*", "mochitest"),
+ ("browser-chrome.*", "browser-chrome"),
+ ("mochitest-browser-a11y.*", "browser-a11y"),
+ ("mochitest-browser-media.*", "browser-media"),
+ ("mochitest-devtools-chrome.*", "devtools-chrome"),
+ ("chrome", "chrome"),
+ ],
+ "xpcshell": [("xpcshell", "xpcshell")],
+ "reftest": [("reftest", "reftest"), ("crashtest", "crashtest")],
+ }
+ for suite_pattern, flavor in flavors.get(category, []):
+ if re.compile(suite_pattern).match(suite):
+ return flavor
+
+ def structured_output(self, suite_category, flavor=None):
+ unstructured_flavors = self.config.get("unstructured_flavors")
+ if not unstructured_flavors:
+ return True
+ if suite_category not in unstructured_flavors:
+ return True
+ if not unstructured_flavors.get(
+ suite_category
+ ) or flavor in unstructured_flavors.get(suite_category):
+ return False
+ return True
+
+ def get_test_output_parser(
+ self, suite_category, flavor=None, strict=False, **kwargs
+ ):
+ if not self.structured_output(suite_category, flavor):
+ return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
+ self.info("Structured output parser in use for %s." % suite_category)
+ return StructuredOutputParser(
+ suite_category=suite_category, strict=strict, **kwargs
+ )
+
+ # Actions {{{2
+
+ # clobber defined in BaseScript, deletes mozharness/build if exists
+ # preflight_download_and_extract is in TestingMixin.
+ # create_virtualenv is in VirtualenvMixin.
+ # preflight_install is in TestingMixin.
+ # install is in TestingMixin.
+
+ @PreScriptAction("download-and-extract")
+ def _pre_download_and_extract(self, action):
+ """Abort if --artifact try syntax is used with compiled-code tests"""
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ self.mkdir_p(dir)
+
+ if not self.try_message_has_flag("artifact"):
+ return
+ self.info("Artifact build requested in try syntax.")
+ rejected = []
+ compiled_code_suites = [
+ "cppunit",
+ "gtest",
+ "jittest",
+ ]
+ for category in SUITE_CATEGORIES:
+ suites = self._query_specified_suites(category) or []
+ for suite in suites:
+ if any([suite.startswith(c) for c in compiled_code_suites]):
+ rejected.append(suite)
+ break
+ if rejected:
+ self.record_status(TBPL_EXCEPTION)
+ self.fatal(
+ "There are specified suites that are incompatible with "
+ "--artifact try syntax flag: {}".format(", ".join(rejected)),
+ exit_code=self.return_code,
+ )
+
+ def download_and_extract(self):
+ """
+ download and extract test zip / download installer
+ optimizes which subfolders to extract from tests archive
+ """
+ c = self.config
+
+ extract_dirs = None
+
+ if c.get("run_all_suites"):
+ target_categories = SUITE_CATEGORIES
+ else:
+ target_categories = [
+ cat
+ for cat in SUITE_CATEGORIES
+ if self._query_specified_suites(cat) is not None
+ ]
+ super(DesktopUnittest, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=target_categories
+ )
+
+ def start_pulseaudio(self):
+ command = []
+ # Implies that underlying system is Linux.
+ if os.environ.get("NEED_PULSEAUDIO") == "true":
+ command.extend(
+ [
+ "pulseaudio",
+ "--daemonize",
+ "--log-level=4",
+ "--log-time=1",
+ "-vvvvv",
+ "--exit-idle-time=-1",
+ ]
+ )
+
+ # Only run the initialization for Debian.
+ # Ubuntu appears to have an alternate method of starting pulseaudio.
+ if self._is_debian():
+ self._kill_named_proc("pulseaudio")
+ self.run_command(command)
+
+ # All Linux systems need module-null-sink to be loaded, otherwise
+ # media tests fail.
+ self.run_command("pactl load-module module-null-sink")
+ self.run_command("pactl list modules short")
+
+ def stage_files(self):
+ for category in SUITE_CATEGORIES:
+ suites = self._query_specified_suites(category)
+ stage = getattr(self, "_stage_{}".format(category), None)
+ if suites and stage:
+ stage(suites)
+
+ def _stage_files(self, bin_name=None, fail_if_not_exists=True):
+ dirs = self.query_abs_dirs()
+ abs_app_dir = self.query_abs_app_dir()
+
+ # For mac these directories are in Contents/Resources, on other
+ # platforms abs_res_dir will point to abs_app_dir.
+ abs_res_dir = self.query_abs_res_dir()
+ abs_res_components_dir = os.path.join(abs_res_dir, "components")
+ abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins")
+ abs_res_extensions_dir = os.path.join(abs_res_dir, "extensions")
+
+ if bin_name:
+ src = os.path.join(dirs["abs_test_bin_dir"], bin_name)
+ if os.path.exists(src):
+ self.info(
+ "copying %s to %s" % (src, os.path.join(abs_app_dir, bin_name))
+ )
+ shutil.copy2(src, os.path.join(abs_app_dir, bin_name))
+ elif fail_if_not_exists:
+ raise OSError("File %s not found" % src)
+ self.copytree(
+ dirs["abs_test_bin_components_dir"],
+ abs_res_components_dir,
+ overwrite="overwrite_if_exists",
+ )
+ self.mkdir_p(abs_res_plugins_dir)
+ self.copytree(
+ dirs["abs_test_bin_plugins_dir"],
+ abs_res_plugins_dir,
+ overwrite="overwrite_if_exists",
+ )
+ if os.path.isdir(dirs["abs_test_extensions_dir"]):
+ self.mkdir_p(abs_res_extensions_dir)
+ self.copytree(
+ dirs["abs_test_extensions_dir"],
+ abs_res_extensions_dir,
+ overwrite="overwrite_if_exists",
+ )
+
+ def _stage_xpcshell(self, suites):
+ if "WindowsApps" in self.binary_path:
+ self.log(
+ "Skipping stage xpcshell for MSIX tests because we cannot copy files into the installation directory."
+ )
+ return
+
+ self._stage_files(self.config["xpcshell_name"])
+ # http3server isn't built for Windows tests or Linux asan/tsan
+ # builds. Only stage if the `http3server_name` config is set and if
+ # the file actually exists.
+ if self.config.get("http3server_name"):
+ self._stage_files(self.config["http3server_name"], fail_if_not_exists=False)
+
+ def _stage_cppunittest(self, suites):
+ abs_res_dir = self.query_abs_res_dir()
+ dirs = self.query_abs_dirs()
+ abs_cppunittest_dir = dirs["abs_cppunittest_dir"]
+
+ # move manifest and js fils to resources dir, where tests expect them
+ files = glob.glob(os.path.join(abs_cppunittest_dir, "*.js"))
+ files.extend(glob.glob(os.path.join(abs_cppunittest_dir, "*.manifest")))
+ for f in files:
+ self.move(f, abs_res_dir)
+
+ def _stage_gtest(self, suites):
+ abs_res_dir = self.query_abs_res_dir()
+ abs_app_dir = self.query_abs_app_dir()
+ dirs = self.query_abs_dirs()
+ abs_gtest_dir = dirs["abs_gtest_dir"]
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+
+ files = glob.glob(os.path.join(dirs["abs_test_bin_plugins_dir"], "gmp-*"))
+ files.append(os.path.join(abs_gtest_dir, "dependentlibs.list.gtest"))
+ for f in files:
+ self.move(f, abs_res_dir)
+
+ self.copytree(
+ os.path.join(abs_gtest_dir, "gtest_bin"), os.path.join(abs_app_dir)
+ )
+
+ def _kill_proc_tree(self, pid):
+ # Kill a process tree (including grandchildren) with signal.SIGTERM
+ try:
+ import signal
+
+ import psutil
+
+ if pid == os.getpid():
+ return (None, None)
+
+ parent = psutil.Process(pid)
+ children = parent.children(recursive=True)
+ children.append(parent)
+
+ for p in children:
+ p.send_signal(signal.SIGTERM)
+
+ # allow for 60 seconds to kill procs
+ timeout = 60
+ gone, alive = psutil.wait_procs(children, timeout=timeout)
+ for p in gone:
+ self.info("psutil found pid %s dead" % p.pid)
+ for p in alive:
+ self.error("failed to kill pid %d after %d" % (p.pid, timeout))
+
+ return (gone, alive)
+ except Exception as e:
+ self.error("Exception while trying to kill process tree: %s" % str(e))
+
+ def _kill_named_proc(self, pname):
+ try:
+ import psutil
+ except Exception as e:
+ self.info(
+ "Error importing psutil, not killing process %s: %s" % pname, str(e)
+ )
+ return
+
+ for proc in psutil.process_iter():
+ try:
+ if proc.name() == pname:
+ procd = proc.as_dict(attrs=["pid", "ppid", "name", "username"])
+ self.info("in _kill_named_proc, killing %s" % procd)
+ self._kill_proc_tree(proc.pid)
+ except Exception as e:
+ self.info("Warning: Unable to kill process %s: %s" % (pname, str(e)))
+ # may not be able to access process info for all processes
+ continue
+
+ def _remove_xen_clipboard(self):
+ """
+ When running on a Windows 7 VM, we have XenDPriv.exe running which
+ interferes with the clipboard, lets terminate this process and remove
+ the binary so it doesn't restart
+ """
+ if not self._is_windows():
+ return
+
+ self._kill_named_proc("XenDPriv.exe")
+ xenpath = os.path.join(
+ os.environ["ProgramFiles"], "Citrix", "XenTools", "XenDPriv.exe"
+ )
+ try:
+ if os.path.isfile(xenpath):
+ os.remove(xenpath)
+ except Exception as e:
+ self.error("Error: Failure to remove file %s: %s" % (xenpath, str(e)))
+
+ def _report_system_info(self):
+ """
+ Create the system-info.log artifact file, containing a variety of
+ system information that might be useful in diagnosing test failures.
+ """
+ try:
+ import psutil
+
+ path = os.path.join(
+ self.query_abs_dirs()["abs_blob_upload_dir"], "system-info.log"
+ )
+ with open(path, "w") as f:
+ f.write("System info collected at %s\n\n" % datetime.now())
+ f.write("\nBoot time %s\n" % datetime.fromtimestamp(psutil.boot_time()))
+ f.write("\nVirtual memory: %s\n" % str(psutil.virtual_memory()))
+ f.write("\nDisk partitions: %s\n" % str(psutil.disk_partitions()))
+ f.write("\nDisk usage (/): %s\n" % str(psutil.disk_usage(os.path.sep)))
+ if not self._is_windows():
+ # bug 1417189: frequent errors querying users on Windows
+ f.write("\nUsers: %s\n" % str(psutil.users()))
+ f.write("\nNetwork connections:\n")
+ try:
+ for nc in psutil.net_connections():
+ f.write(" %s\n" % str(nc))
+ except Exception:
+ f.write("Exception getting network info: %s\n" % sys.exc_info()[0])
+ f.write("\nProcesses:\n")
+ try:
+ for p in psutil.process_iter():
+ ctime = str(datetime.fromtimestamp(p.create_time()))
+ f.write(
+ " PID %d %s %s created at %s\n"
+ % (p.pid, p.name(), str(p.cmdline()), ctime)
+ )
+ except Exception:
+ f.write("Exception getting process info: %s\n" % sys.exc_info()[0])
+ except Exception:
+ # psutil throws a variety of intermittent exceptions
+ self.info("Unable to complete system-info.log: %s" % sys.exc_info()[0])
+
+ # pull defined in VCSScript.
+ # preflight_run_tests defined in TestingMixin.
+
+ def run_tests(self):
+ self._remove_xen_clipboard()
+ self._report_system_info()
+ self.start_time = datetime.now()
+ for category in SUITE_CATEGORIES:
+ if not self._run_category_suites(category):
+ break
+
+ def get_timeout_for_category(self, suite_category):
+ if suite_category == "cppunittest":
+ return 2500
+ return self.config["suite_definitions"][suite_category].get("run_timeout", 1000)
+
+ def _run_category_suites(self, suite_category):
+ """run suite(s) to a specific category"""
+ dirs = self.query_abs_dirs()
+ suites = self._query_specified_suites(suite_category)
+ abs_app_dir = self.query_abs_app_dir()
+ abs_res_dir = self.query_abs_res_dir()
+
+ max_per_test_time = timedelta(minutes=60)
+ max_per_test_tests = 10
+ if self.per_test_coverage:
+ max_per_test_tests = 30
+ executed_tests = 0
+ executed_too_many_tests = False
+ xpcshell_selftests = 0
+
+ if suites:
+ self.info("#### Running %s suites" % suite_category)
+ for suite in suites:
+ if executed_too_many_tests and not self.per_test_coverage:
+ return False
+
+ replace_dict = {
+ "abs_app_dir": abs_app_dir,
+ # Mac specific, but points to abs_app_dir on other
+ # platforms.
+ "abs_res_dir": abs_res_dir,
+ "binary_path": self.binary_path,
+ "install_dir": self.install_dir,
+ }
+ options_list = []
+ env = {"TEST_SUITE": suite}
+ if isinstance(suites[suite], dict):
+ options_list = suites[suite].get("options", [])
+ if (
+ self.verify_enabled
+ or self.per_test_coverage
+ or self._get_mozharness_test_paths(suite_category, suite)
+ ):
+ # Ignore tests list in modes where we are running specific tests.
+ tests_list = []
+ else:
+ tests_list = suites[suite].get("tests", [])
+ env = copy.deepcopy(suites[suite].get("env", {}))
+ else:
+ options_list = suites[suite]
+ tests_list = []
+
+ flavor = self._query_try_flavor(suite_category, suite)
+ try_options, try_tests = self.try_args(flavor)
+
+ suite_name = suite_category + "-" + suite
+ tbpl_status, log_level = None, None
+ error_list = BaseErrorList + HarnessErrorList
+ parser = self.get_test_output_parser(
+ suite_category,
+ flavor=flavor,
+ config=self.config,
+ error_list=error_list,
+ log_obj=self.log_obj,
+ )
+
+ if suite_category == "reftest":
+ ref_formatter = imp.load_source(
+ "ReftestFormatter",
+ os.path.abspath(
+ os.path.join(dirs["abs_reftest_dir"], "output.py")
+ ),
+ )
+ parser.formatter = ref_formatter.ReftestFormatter()
+
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ if self.config["nodejs_path"]:
+ env["MOZ_NODE_PATH"] = self.config["nodejs_path"]
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ env = self.query_env(partial_env=env, log_level=INFO)
+ cmd_timeout = self.get_timeout_for_category(suite_category)
+
+ summary = {}
+ for per_test_args in self.query_args(suite):
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = (
+ "baselinecoverage" in per_test_args[-1]
+ if self.per_test_coverage
+ else False
+ )
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return False
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info(
+ "TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>"
+ )
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
+
+ abs_base_cmd = self._query_abs_base_cmd(suite_category, suite)
+ cmd = abs_base_cmd[:]
+ cmd.extend(
+ self.query_options(
+ options_list, try_options, str_format_values=replace_dict
+ )
+ )
+ cmd.extend(
+ self.query_tests_args(
+ tests_list, try_tests, str_format_values=replace_dict
+ )
+ )
+
+ final_cmd = copy.copy(cmd)
+ final_cmd.extend(per_test_args)
+
+ # Bug 1714406: In test-verify of xpcshell tests on Windows, repeated
+ # self-tests can trigger https://bugs.python.org/issue37380,
+ # for python < 3.7; avoid by running xpcshell self-tests only once
+ # per test-verify run.
+ if (
+ (self.verify_enabled or self.per_test_coverage)
+ and sys.platform.startswith("win")
+ and sys.version_info < (3, 7)
+ and "--self-test" in final_cmd
+ ):
+ xpcshell_selftests += 1
+ if xpcshell_selftests > 1:
+ final_cmd.remove("--self-test")
+
+ final_env = copy.copy(env)
+
+ if self.per_test_coverage:
+ self.set_coverage_env(final_env)
+
+ return_code = self.run_command(
+ final_cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=cmd_timeout,
+ output_parser=parser,
+ env=final_env,
+ )
+
+ if self.per_test_coverage:
+ self.add_per_test_coverage_report(
+ final_env, suite, per_test_args[-1]
+ )
+
+ # mochitest, reftest, and xpcshell suites do not return
+ # appropriate return codes. Therefore, we must parse the output
+ # to determine what the tbpl_status and worst_log_level must
+ # be. We do this by:
+ # 1) checking to see if our mozharness script ran into any
+ # errors itself with 'num_errors' <- OutputParser
+ # 2) if num_errors is 0 then we look in the subclassed 'parser'
+ # findings for harness/suite errors <- DesktopUnittestOutputParser
+ # 3) checking to see if the return code is in success_codes
+
+ success_codes = None
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code, success_codes, summary
+ )
+ parser.append_tinderboxprint_line(suite_name)
+
+ self.record_status(tbpl_status, level=log_level)
+ if len(per_test_args) > 0:
+ self.log_per_test_status(
+ per_test_args[-1], tbpl_status, log_level
+ )
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return False
+ else:
+ # report as INFO instead of log_level to avoid extra Treeherder lines
+ self.info(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ )
+
+ if executed_too_many_tests:
+ return False
+ else:
+ self.debug("There were no suites to run for %s" % suite_category)
+ return True
+
+ def uninstall(self):
+ # Technically, we might miss this step if earlier steps fail badly.
+ # If that becomes a big issue we should consider moving this to
+ # something that is more likely to execute, such as
+ # postflight_run_cmd_suites
+ if "WindowsApps" in self.binary_path:
+ self.uninstall_app(self.binary_path)
+ else:
+ self.log("Skipping uninstall for non-MSIX test")
+
+
+# main {{{1
+if __name__ == "__main__":
+ desktop_unittest = DesktopUnittest()
+ desktop_unittest.run_and_exit()
diff --git a/testing/mozharness/scripts/does_it_crash.py b/testing/mozharness/scripts/does_it_crash.py
new file mode 100755
index 0000000000..0c54b63131
--- /dev/null
+++ b/testing/mozharness/scripts/does_it_crash.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" does_it_crash.py
+
+ Runs a thing to see if it crashes within a set period.
+"""
+import os
+import sys
+
+import requests
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozinstall
+from mozharness.base.script import BaseScript
+from mozprocess import ProcessHandler
+
+
+class DoesItCrash(BaseScript):
+ config_options = [
+ [
+ [
+ "--thing-url",
+ ],
+ {
+ "action": "store",
+ "dest": "thing_url",
+ "type": str,
+ "help": "An URL that points to a package containing the thing to run",
+ },
+ ],
+ [
+ [
+ "--thing-to-run",
+ ],
+ {
+ "action": "store",
+ "dest": "thing_to_run",
+ "type": str,
+ "help": "The thing to run. If --thing-url is a package, this should be "
+ "its location relative to the root of the package.",
+ },
+ ],
+ [
+ [
+ "--thing-arg",
+ ],
+ {
+ "action": "append",
+ "dest": "thing_args",
+ "type": str,
+ "default": [],
+ "help": "Args for the thing. May be passed multiple times",
+ },
+ ],
+ [
+ [
+ "--run-for",
+ ],
+ {
+ "action": "store",
+ "dest": "run_for",
+ "default": 30,
+ "type": int,
+ "help": "How long to run the thing for, in seconds",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ super(DoesItCrash, self).__init__(
+ all_actions=[
+ "download",
+ "run-thing",
+ ],
+ default_actions=[
+ "download",
+ "run-thing",
+ ],
+ config_options=self.config_options,
+ )
+
+ def downloadFile(self, url, file_name):
+ req = requests.get(url, stream=True, timeout=30)
+ file_path = os.path.join(os.getcwd(), file_name)
+
+ with open(file_path, "wb") as f:
+ for chunk in req.iter_content(chunk_size=1024):
+ if not chunk:
+ continue
+ f.write(chunk)
+ f.flush()
+ return file_path
+
+ def download(self):
+ url = self.config["thing_url"]
+ fn = "thing." + url.split(".")[-1]
+ self.downloadFile(url=url, file_name=fn)
+ if mozinstall.is_installer(fn):
+ self.install_dir = mozinstall.install(fn, "thing")
+ else:
+ self.install_dir = ""
+
+ def run_thing(self):
+ thing = os.path.abspath(
+ os.path.join(self.install_dir, self.config["thing_to_run"])
+ )
+ # thing_args is a LockedTuple, which mozprocess doesn't like
+ args = list(self.config["thing_args"])
+ timeout = self.config["run_for"]
+
+ self.log(f"Running {thing} with args {args}")
+ p = ProcessHandler(
+ thing,
+ args=args,
+ shell=False,
+ storeOutput=True,
+ kill_on_timeout=True,
+ stream=False,
+ )
+ p.run(timeout)
+ # Wait for the timeout + a grace period (to make sure we don't interrupt
+ # process tear down).
+ # Without this, this script could potentially hang
+ p.wait(timeout + 10)
+ if not p.timedOut:
+ # It crashed, oh no!
+ self.critical(
+ f"TEST-UNEXPECTED-FAIL: {thing} did not run for {timeout} seconds"
+ )
+ self.critical("Output was:")
+ for l in p.output:
+ self.critical(l)
+ self.fatal("fail")
+ else:
+ self.info(f"PASS: {thing} ran successfully for {timeout} seconds")
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ crashit = DoesItCrash()
+ crashit.run_and_exit()
diff --git a/testing/mozharness/scripts/firefox_ui_tests.py b/testing/mozharness/scripts/firefox_ui_tests.py
new file mode 100644
index 0000000000..7b05d0ca5c
--- /dev/null
+++ b/testing/mozharness/scripts/firefox_ui_tests.py
@@ -0,0 +1,299 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+import copy
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.python import PreScriptAction
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.vcstools import VCSToolsScript
+
+# General command line arguments for Firefox ui tests
+firefox_ui_tests_config_options = (
+ [
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL "
+ "compositor.",
+ },
+ ],
+ [
+ ["--dry-run"],
+ {
+ "dest": "dry_run",
+ "default": False,
+ "help": "Only show what was going to be tested.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Disable multi-process (e10s) mode when running tests.",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "dest": "disable_fission",
+ "action": "store_true",
+ "default": False,
+ "help": "Disable fission mode when running tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "dest": "extra_prefs",
+ "action": "append",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--symbols-path=SYMBOLS_PATH"],
+ {
+ "dest": "symbols_path",
+ "help": "absolute path to directory containing breakpad "
+ "symbols, or the url of a zip file containing symbols.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+)
+
+
+class FirefoxUIFunctionalTests(TestingMixin, VCSToolsScript, CodeCoverageMixin):
+ def __init__(
+ self,
+ config_options=None,
+ all_actions=None,
+ default_actions=None,
+ *args,
+ **kwargs
+ ):
+ config_options = config_options or firefox_ui_tests_config_options
+ actions = [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ "uninstall",
+ ]
+
+ super(FirefoxUIFunctionalTests, self).__init__(
+ config_options=config_options,
+ all_actions=all_actions or actions,
+ default_actions=default_actions or actions,
+ *args,
+ **kwargs
+ )
+
+ # Code which runs in automation has to include the following properties
+ self.binary_path = self.config.get("binary_path")
+ self.installer_path = self.config.get("installer_path")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url = self.config.get("test_url")
+
+ if not self.test_url and not self.test_packages_url:
+ self.fatal("You must use --test-url, or --test-packages-url")
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "firefox_ui_requirements.txt"
+ )
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """Override method from TestingMixin for more specific behavior."""
+ extract_dirs = [
+ "config/*",
+ "firefox-ui/*",
+ "marionette/*",
+ "mozbase/*",
+ "tools/mozterm/*",
+ "tools/wptserve/*",
+ "tools/wpt_third_party/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ]
+ super(FirefoxUIFunctionalTests, self).download_and_extract(
+ extract_dirs=extract_dirs
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(FirefoxUIFunctionalTests, self).query_abs_dirs()
+ abs_tests_install_dir = os.path.join(abs_dirs["abs_work_dir"], "tests")
+
+ dirs = {
+ "abs_blob_upload_dir": os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ ),
+ "abs_fxui_dir": os.path.join(abs_tests_install_dir, "firefox-ui"),
+ "abs_fxui_manifest_dir": os.path.join(
+ abs_tests_install_dir,
+ "firefox-ui",
+ "tests",
+ "testing",
+ "firefox-ui",
+ "tests",
+ ),
+ "abs_test_install_dir": abs_tests_install_dir,
+ }
+
+ for key in dirs:
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def query_harness_args(self, extra_harness_config_options=None):
+ """Collects specific test related command line arguments.
+
+ Sub classes should override this method for their own specific arguments.
+ """
+ config_options = extra_harness_config_options or []
+
+ args = []
+ for option in config_options:
+ dest = option[1]["dest"]
+ name = self.config.get(dest)
+
+ if name:
+ if type(name) is bool:
+ args.append(option[0][0])
+ else:
+ args.extend([option[0][0], self.config[dest]])
+
+ return args
+
+ def run_test(self, binary_path, env=None, marionette_port=2828):
+ """All required steps for running the tests against an installer."""
+ dirs = self.query_abs_dirs()
+
+ # Import the harness to retrieve the location of the cli scripts
+ import firefox_ui_harness
+
+ cmd = [
+ self.query_python_path(),
+ os.path.join(
+ os.path.dirname(firefox_ui_harness.__file__), "cli_functional.py"
+ ),
+ "--binary",
+ binary_path,
+ "--address",
+ "localhost:{}".format(marionette_port),
+ # Resource files to serve via local webserver
+ "--server-root",
+ os.path.join(dirs["abs_fxui_dir"], "resources"),
+ # Use the work dir to get temporary data stored
+ "--workspace",
+ dirs["abs_work_dir"],
+ # logging options
+ "--gecko-log=-", # output from the gecko process redirected to stdout
+ "--log-raw=-", # structured log for output parser redirected to stdout
+ # Enable tracing output to log transmission protocol
+ "-vv",
+ ]
+
+ # Collect all pass-through harness options to the script
+ cmd.extend(self.query_harness_args())
+
+ if not self.config.get("e10s"):
+ cmd.append("--disable-e10s")
+
+ if self.config.get("disable_fission"):
+ cmd.append("--disable-fission")
+
+ cmd.extend(["--setpref={}".format(p) for p in self.config.get("extra_prefs")])
+
+ if self.symbols_url:
+ cmd.extend(["--symbols-path", self.symbols_url])
+
+ parser = StructuredOutputParser(
+ config=self.config, log_obj=self.log_obj, strict=False
+ )
+
+ # Add the tests to run
+ cmd.append(
+ os.path.join(dirs["abs_fxui_manifest_dir"], "functional", "manifest.ini")
+ )
+
+ # Set further environment settings
+ env = env or self.query_env()
+ env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
+ if self.query_minidump_stackwalk():
+ env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
+ env["RUST_BACKTRACE"] = "full"
+
+ # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR
+ # env variables
+ if self.config.get("code_coverage"):
+ env["GCOV_PREFIX"] = self.gcov_dir
+ env["JS_CODE_COVERAGE_OUTPUT_DIR"] = self.jsvm_dir
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_fxui_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=env,
+ )
+
+ tbpl_status, log_level, summary = parser.evaluate_parser(return_code)
+ self.record_status(tbpl_status, level=log_level)
+
+ return return_code
+
+ @PreScriptAction("run-tests")
+ def _pre_run_tests(self, action):
+ if not self.installer_path and not self.installer_url:
+ self.critical(
+ "Please specify an installer via --installer-path or --installer-url."
+ )
+ sys.exit(1)
+
+ def run_tests(self):
+ """Run all the tests"""
+ return self.run_test(
+ binary_path=self.binary_path,
+ env=self.query_env(),
+ )
+
+
+if __name__ == "__main__":
+ myScript = FirefoxUIFunctionalTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/fx_desktop_build.py b/testing/mozharness/scripts/fx_desktop_build.py
new file mode 100755
index 0000000000..93f46b34ca
--- /dev/null
+++ b/testing/mozharness/scripts/fx_desktop_build.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""fx_desktop_build.py.
+
+script harness to build nightly firefox within Mozilla's build environment
+and developer machines alike
+
+author: Jordan Lund
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozharness.base.script as script
+from mozharness.mozilla.building.buildbase import (
+ BUILD_BASE_CONFIG_OPTIONS,
+ BuildingConfig,
+ BuildScript,
+)
+
+
+class FxDesktopBuild(BuildScript, object):
+ def __init__(self):
+ buildscript_kwargs = {
+ "config_options": BUILD_BASE_CONFIG_OPTIONS,
+ "all_actions": [
+ "get-secrets",
+ "clobber",
+ "build",
+ "static-analysis-autotest",
+ "valgrind-test",
+ "multi-l10n",
+ "package-source",
+ ],
+ "require_config_file": True,
+ # Default configuration
+ "config": {
+ "is_automation": True,
+ "debug_build": False,
+ # nightly stuff
+ "nightly_build": False,
+ # Seed all clones with mozilla-unified. This ensures subsequent
+ # jobs have a minimal `hg pull`.
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ "repo_base": "https://hg.mozilla.org",
+ "build_resources_path": "%(upload_path)s/build_resources.json",
+ "nightly_promotion_branches": ["mozilla-central", "mozilla-aurora"],
+ # try will overwrite these
+ "clone_with_purge": False,
+ "clone_by_revision": False,
+ "virtualenv_modules": [
+ "requests==2.8.1",
+ ],
+ "virtualenv_path": "venv",
+ },
+ "ConfigClass": BuildingConfig,
+ }
+ super(FxDesktopBuild, self).__init__(**buildscript_kwargs)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(FxDesktopBuild, self).query_abs_dirs()
+
+ dirs = {
+ # BuildFactories in factory.py refer to a 'build' dir on the slave.
+ # This contains all the source code/objdir to compile. However,
+ # there is already a build dir in mozharness for every mh run. The
+ # 'build' that factory refers to I named: 'src' so
+ # there is a seperation in mh. for example, rather than having
+ # '{mozharness_repo}/build/build/', I have '{
+ # mozharness_repo}/build/src/'
+ "abs_obj_dir": os.path.join(abs_dirs["abs_work_dir"], self._query_objdir()),
+ "upload_path": self.config["upload_env"]["UPLOAD_PATH"],
+ }
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # Actions {{{2
+
+ @script.PreScriptRun
+ def suppress_windows_modal_dialogs(self, *args, **kwargs):
+ if self._is_windows():
+ # Suppress Windows modal dialogs to avoid hangs
+ import ctypes
+
+ ctypes.windll.kernel32.SetErrorMode(0x8001)
+
+
+if __name__ == "__main__":
+ fx_desktop_build = FxDesktopBuild()
+ fx_desktop_build.run_and_exit()
diff --git a/testing/mozharness/scripts/l10n_bumper.py b/testing/mozharness/scripts/l10n_bumper.py
new file mode 100755
index 0000000000..e597d5386d
--- /dev/null
+++ b/testing/mozharness/scripts/l10n_bumper.py
@@ -0,0 +1,380 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" l10n_bumper.py
+
+ Updates a gecko repo with up to date changesets from l10n.mozilla.org.
+
+ Specifically, it updates l10n-changesets.json which is used by mobile releases.
+
+ This is to allow for `mach taskgraph` to reference specific l10n revisions
+ without having to resort to task.extra or commandline base64 json hacks.
+"""
+import codecs
+import os
+import pprint
+import sys
+import time
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import HgErrorList
+from mozharness.base.log import FATAL
+from mozharness.base.vcs.vcsbase import VCSScript
+
+
+class L10nBumper(VCSScript):
+ config_options = [
+ [
+ [
+ "--ignore-closed-tree",
+ ],
+ {
+ "action": "store_true",
+ "dest": "ignore_closed_tree",
+ "default": False,
+ "help": "Bump l10n changesets on a closed tree.",
+ },
+ ],
+ [
+ [
+ "--build",
+ ],
+ {
+ "action": "store_false",
+ "dest": "dontbuild",
+ "default": True,
+ "help": "Trigger new builds on push.",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ super(L10nBumper, self).__init__(
+ all_actions=[
+ "clobber",
+ "check-treestatus",
+ "checkout-gecko",
+ "bump-changesets",
+ "push",
+ "push-loop",
+ ],
+ default_actions=[
+ "push-loop",
+ ],
+ require_config_file=require_config_file,
+ config_options=self.config_options,
+ # Default config options
+ config={
+ "treestatus_base_url": "https://treestatus.mozilla-releng.net",
+ "log_max_rotate": 99,
+ },
+ )
+
+ # Helper methods {{{1
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(L10nBumper, self).query_abs_dirs()
+
+ abs_dirs.update(
+ {
+ "gecko_local_dir": os.path.join(
+ abs_dirs["abs_work_dir"],
+ self.config.get(
+ "gecko_local_dir",
+ os.path.basename(self.config["gecko_pull_url"]),
+ ),
+ ),
+ }
+ )
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def hg_commit(self, path, repo_path, message):
+ """
+ Commits changes in repo_path, with specified user and commit message
+ """
+ user = self.config["hg_user"]
+ hg = self.query_exe("hg", return_type="list")
+ env = self.query_env(partial_env={"LANG": "en_US.UTF-8"})
+ cmd = hg + ["add", path]
+ self.run_command(cmd, cwd=repo_path, env=env)
+ cmd = hg + ["commit", "-u", user, "-m", message]
+ self.run_command(cmd, cwd=repo_path, env=env)
+
+ def hg_push(self, repo_path):
+ hg = self.query_exe("hg", return_type="list")
+ command = hg + [
+ "push",
+ "-e",
+ "ssh -oIdentityFile=%s -l %s"
+ % (
+ self.config["ssh_key"],
+ self.config["ssh_user"],
+ ),
+ "-r",
+ ".",
+ self.config["gecko_push_url"],
+ ]
+ status = self.run_command(command, cwd=repo_path, error_list=HgErrorList)
+ if status != 0:
+ # We failed; get back to a known state so we can either retry
+ # or fail out and continue later.
+ self.run_command(
+ hg
+ + ["--config", "extensions.mq=", "strip", "--no-backup", "outgoing()"],
+ cwd=repo_path,
+ )
+ self.run_command(hg + ["up", "-C"], cwd=repo_path)
+ self.run_command(
+ hg + ["--config", "extensions.purge=", "purge", "--all"], cwd=repo_path
+ )
+ return False
+ return True
+
+ def _read_json(self, path):
+ contents = self.read_from_file(path)
+ try:
+ json_contents = json.loads(contents)
+ return json_contents
+ except ValueError:
+ self.error("%s is invalid json!" % path)
+
+ def _read_version(self, path):
+ contents = self.read_from_file(path).split("\n")[0]
+ return contents.split(".")
+
+ def _build_locale_map(self, old_contents, new_contents):
+ locale_map = {}
+ for key in old_contents:
+ if key not in new_contents:
+ locale_map[key] = "removed"
+ for k, v in new_contents.items():
+ if old_contents.get(k, {}).get("revision") != v["revision"]:
+ locale_map[k] = v["revision"]
+ elif old_contents.get(k, {}).get("platforms") != v["platforms"]:
+ locale_map[k] = v["platforms"]
+ return locale_map
+
+ def _build_platform_dict(self, bump_config):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ platform_dict = {}
+ ignore_config = bump_config.get("ignore_config", {})
+ for platform_config in bump_config["platform_configs"]:
+ path = os.path.join(repo_path, platform_config["path"])
+ self.info(
+ "Reading %s for %s locales..." % (path, platform_config["platforms"])
+ )
+ contents = self.read_from_file(path)
+ for locale in contents.splitlines():
+ # locale is 1st word in line in shipped-locales
+ if platform_config.get("format") == "shipped-locales":
+ locale = locale.split(" ")[0]
+ existing_platforms = set(
+ platform_dict.get(locale, {}).get("platforms", [])
+ )
+ platforms = set(platform_config["platforms"])
+ ignore_platforms = set(ignore_config.get(locale, []))
+ platforms = (platforms | existing_platforms) - ignore_platforms
+ platform_dict[locale] = {"platforms": sorted(list(platforms))}
+ self.info("Built platform_dict:\n%s" % pprint.pformat(platform_dict))
+ return platform_dict
+
+ def _build_revision_dict(self, bump_config, version_list):
+ self.info("Building revision dict...")
+ platform_dict = self._build_platform_dict(bump_config)
+ revision_dict = {}
+ if bump_config.get("revision_url"):
+ repl_dict = {
+ "MAJOR_VERSION": version_list[0],
+ "COMBINED_MAJOR_VERSION": str(
+ int(version_list[0]) + int(version_list[1])
+ ),
+ }
+
+ url = bump_config["revision_url"] % repl_dict
+ path = self.download_file(url, error_level=FATAL)
+ revision_info = self.read_from_file(path)
+ self.info("Got %s" % revision_info)
+ for line in revision_info.splitlines():
+ locale, revision = line.split(" ")
+ if locale in platform_dict:
+ revision_dict[locale] = platform_dict[locale]
+ revision_dict[locale]["revision"] = revision
+ else:
+ for k, v in platform_dict.items():
+ v["revision"] = "default"
+ revision_dict[k] = v
+ self.info("revision_dict:\n%s" % pprint.pformat(revision_dict))
+ return revision_dict
+
+ def build_commit_message(self, name, locale_map):
+ comments = ""
+ approval_str = "r=release a=l10n-bump"
+ for locale, revision in sorted(locale_map.items()):
+ comments += "%s -> %s\n" % (locale, revision)
+ if self.config["dontbuild"]:
+ approval_str += " DONTBUILD"
+ if self.config["ignore_closed_tree"]:
+ approval_str += " CLOSED TREE"
+ message = "no bug - Bumping %s %s\n\n" % (name, approval_str)
+ message += comments
+ message = message.encode("utf-8")
+ return message
+
+ def query_treestatus(self):
+ "Return True if we can land based on treestatus"
+ c = self.config
+ dirs = self.query_abs_dirs()
+ tree = c.get(
+ "treestatus_tree", os.path.basename(c["gecko_pull_url"].rstrip("/"))
+ )
+ treestatus_url = "%s/trees/%s" % (c["treestatus_base_url"], tree)
+ treestatus_json = os.path.join(dirs["abs_work_dir"], "treestatus.json")
+ if not os.path.exists(dirs["abs_work_dir"]):
+ self.mkdir_p(dirs["abs_work_dir"])
+ self.rmtree(treestatus_json)
+
+ self.run_command(
+ ["curl", "--retry", "4", "-o", treestatus_json, treestatus_url],
+ throw_exception=True,
+ )
+
+ treestatus = self._read_json(treestatus_json)
+ if treestatus["result"]["status"] != "closed":
+ self.info(
+ "treestatus is %s - assuming we can land"
+ % repr(treestatus["result"]["status"])
+ )
+ return True
+
+ return False
+
+ # Actions {{{1
+ def check_treestatus(self):
+ if not self.config["ignore_closed_tree"] and not self.query_treestatus():
+ self.info("breaking early since treestatus is closed")
+ sys.exit(0)
+
+ def checkout_gecko(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+ dest = dirs["gecko_local_dir"]
+ repos = [
+ {
+ "repo": c["gecko_pull_url"],
+ "tag": c.get("gecko_tag", "default"),
+ "dest": dest,
+ "vcs": "hg",
+ }
+ ]
+ self.vcs_checkout_repos(repos)
+
+ def bump_changesets(self):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ version_path = os.path.join(repo_path, self.config["version_path"])
+ changes = False
+ version_list = self._read_version(version_path)
+ for bump_config in self.config["bump_configs"]:
+ path = os.path.join(repo_path, bump_config["path"])
+ # For now, assume format == 'json'. When we add desktop support,
+ # we may need to add flatfile support
+ if os.path.exists(path):
+ old_contents = self._read_json(path)
+ else:
+ old_contents = {}
+
+ new_contents = self._build_revision_dict(bump_config, version_list)
+
+ if new_contents == old_contents:
+ continue
+ # super basic sanity check
+ if not isinstance(new_contents, dict) or len(new_contents) < 5:
+ self.error(
+ "Cowardly refusing to land a broken-seeming changesets file!"
+ )
+ continue
+
+ # Write to disk
+ content_string = json.dumps(
+ new_contents,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ )
+ fh = codecs.open(path, encoding="utf-8", mode="w+")
+ fh.write(content_string + "\n")
+ fh.close()
+
+ locale_map = self._build_locale_map(old_contents, new_contents)
+
+ # Commit
+ message = self.build_commit_message(bump_config["name"], locale_map)
+ self.hg_commit(path, repo_path, message)
+ changes = True
+ return changes
+
+ def push(self):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ return self.hg_push(repo_path)
+
+ def push_loop(self):
+ max_retries = 5
+ for _ in range(max_retries):
+ changed = False
+ if not self.config["ignore_closed_tree"] and not self.query_treestatus():
+ # Tree is closed; exit early to avoid a bunch of wasted time
+ self.info("breaking early since treestatus is closed")
+ break
+
+ self.checkout_gecko()
+ if self.bump_changesets():
+ changed = True
+
+ if not changed:
+ # Nothing changed, we're all done
+ self.info("No changes - all done")
+ break
+
+ if self.push():
+ # We did it! Hurray!
+ self.info("Great success!")
+ break
+ # If we're here, then the push failed. It also stripped any
+ # outgoing commits, so we should be in a pristine state again
+ # Empty our local cache of manifests so they get loaded again next
+ # time through this loop. This makes sure we get fresh upstream
+ # manifests, and avoids problems like bug 979080
+ self.device_manifests = {}
+
+ # Sleep before trying again
+ self.info("Sleeping 60 before trying again")
+ time.sleep(60)
+ else:
+ self.fatal("Didn't complete successfully (hit max_retries)")
+
+ # touch status file for nagios
+ dirs = self.query_abs_dirs()
+ status_path = os.path.join(dirs["base_work_dir"], self.config["status_path"])
+ self._touch_file(status_path)
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ bumper = L10nBumper()
+ bumper.run_and_exit()
diff --git a/testing/mozharness/scripts/marionette.py b/testing/mozharness/scripts/marionette.py
new file mode 100755
index 0000000000..8052927d2a
--- /dev/null
+++ b/testing/mozharness/scripts/marionette.py
@@ -0,0 +1,455 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import json
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import BaseErrorList, TarErrorList
+from mozharness.base.log import INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.transfer import TransferMixin
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList, LogcatErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper
+
+
+class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ ["--application"],
+ {
+ "action": "store",
+ "dest": "application",
+ "default": None,
+ "help": "application name of binary",
+ },
+ ],
+ [
+ ["--app-arg"],
+ {
+ "action": "store",
+ "dest": "app_arg",
+ "default": None,
+ "help": "Optional command-line argument to pass to the browser",
+ },
+ ],
+ [
+ ["--marionette-address"],
+ {
+ "action": "store",
+ "dest": "marionette_address",
+ "default": None,
+ "help": "The host:port of the Marionette server running inside Gecko. "
+ "Unused for emulator testing",
+ },
+ ],
+ [
+ ["--emulator"],
+ {
+ "action": "store",
+ "type": "choice",
+ "choices": ["arm", "x86"],
+ "dest": "emulator",
+ "default": None,
+ "help": "Use an emulator for testing",
+ },
+ ],
+ [
+ ["--test-manifest"],
+ {
+ "action": "store",
+ "dest": "test_manifest",
+ "default": "unit-tests.ini",
+ "help": "Path to test manifest to run relative to the Marionette "
+ "tests directory",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--headless"],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ ["--headless-width"],
+ {
+ "action": "store",
+ "dest": "headless_width",
+ "default": "1600",
+ "help": "Specify headless virtual screen width (default: 1600).",
+ },
+ ],
+ [
+ ["--headless-height"],
+ {
+ "action": "store",
+ "dest": "headless_height",
+ "default": "1200",
+ "help": "Specify headless virtual screen height (default: 1200).",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.", # NOQA: E501
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run the browser without fission enabled",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ repos = []
+
+ def __init__(self, require_config_file=False):
+ super(MarionetteTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "pull",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ default_actions=[
+ "clobber",
+ "pull",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+
+ self.test_suite = self._get_test_suite(c.get("emulator"))
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("{} is not defined in the config!".format(self.test_suite))
+
+ if c.get("structured_output"):
+ self.parser_class = StructuredOutputParser
+ else:
+ self.parser_class = TestSummaryOutputParserHelper
+
+ def _pre_config_lock(self, rw_config):
+ super(MarionetteTest, self)._pre_config_lock(rw_config)
+ if not self.config.get("emulator") and not self.config.get(
+ "marionette_address"
+ ):
+ self.fatal(
+ "You need to specify a --marionette-address for non-emulator tests! "
+ "(Try --marionette-address localhost:2828 )"
+ )
+
+ def _query_tests_dir(self):
+ dirs = self.query_abs_dirs()
+ test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
+
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(MarionetteTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_marionette_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "marionette", "harness", "marionette_harness"
+ )
+ dirs["abs_marionette_tests_dir"] = os.path.join(
+ dirs["abs_test_install_dir"],
+ "marionette",
+ "tests",
+ "testing",
+ "marionette",
+ "harness",
+ "marionette_harness",
+ "tests",
+ )
+ dirs["abs_gecko_dir"] = os.path.join(abs_dirs["abs_work_dir"], "gecko")
+ dirs["abs_emulator_dir"] = os.path.join(abs_dirs["abs_work_dir"], "emulator")
+
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ @PreScriptAction("create-virtualenv")
+ def _configure_marionette_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+ if not os.path.isfile(requirements):
+ self.fatal(
+ "Could not find marionette requirements file: {}".format(requirements)
+ )
+
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def _get_test_suite(self, is_emulator):
+ """
+ Determine which in tree options group to use and return the
+ appropriate key.
+ """
+ platform = "emulator" if is_emulator else "desktop"
+ # Currently running marionette on an emulator means webapi
+ # tests. This method will need to change if this does.
+ testsuite = "webapi" if is_emulator else "marionette"
+ return "{}_{}".format(testsuite, platform)
+
+ def download_and_extract(self):
+ super(MarionetteTest, self).download_and_extract()
+
+ if self.config.get("emulator"):
+ dirs = self.query_abs_dirs()
+
+ self.mkdir_p(dirs["abs_emulator_dir"])
+ tar = self.query_exe("tar", return_type="list")
+ self.run_command(
+ tar + ["zxf", self.installer_path],
+ cwd=dirs["abs_emulator_dir"],
+ error_list=TarErrorList,
+ halt_on_failure=True,
+ fatal_exit_code=3,
+ )
+
+ def install(self):
+ if self.config.get("emulator"):
+ self.info("Emulator tests; skipping.")
+ else:
+ super(MarionetteTest, self).install()
+
+ def run_tests(self):
+ """
+ Run the Marionette tests
+ """
+ dirs = self.query_abs_dirs()
+
+ raw_log_file = os.path.join(dirs["abs_blob_upload_dir"], "marionette_raw.log")
+ error_summary_file = os.path.join(
+ dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
+ )
+ html_report_file = os.path.join(dirs["abs_blob_upload_dir"], "report.html")
+
+ config_fmt_args = {
+ # emulator builds require a longer timeout
+ "timeout": 60000 if self.config.get("emulator") else 10000,
+ "profile": os.path.join(dirs["abs_work_dir"], "profile"),
+ "xml_output": os.path.join(dirs["abs_work_dir"], "output.xml"),
+ "html_output": os.path.join(dirs["abs_blob_upload_dir"], "output.html"),
+ "logcat_dir": dirs["abs_work_dir"],
+ "emulator": "arm",
+ "symbols_path": self.symbols_path,
+ "binary": self.binary_path,
+ "address": self.config.get("marionette_address"),
+ "raw_log_file": raw_log_file,
+ "error_summary_file": error_summary_file,
+ "html_report_file": html_report_file,
+ "gecko_log": dirs["abs_blob_upload_dir"],
+ "this_chunk": self.config.get("this_chunk", 1),
+ "total_chunks": self.config.get("total_chunks", 1),
+ }
+
+ self.info("The emulator type: %s" % config_fmt_args["emulator"])
+ # build the marionette command arguments
+ python = self.query_python_path("python")
+
+ cmd = [python, "-u", os.path.join(dirs["abs_marionette_dir"], "runtests.py")]
+
+ manifest = os.path.join(
+ dirs["abs_marionette_tests_dir"], self.config["test_manifest"]
+ )
+
+ if self.config.get("app_arg"):
+ config_fmt_args["app_arg"] = self.config["app_arg"]
+
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+
+ cmd.append("--gecko-log=-")
+
+ if self.config.get("structured_output"):
+ cmd.append("--log-raw=-")
+
+ if self.config["disable_fission"]:
+ cmd.append("--disable-fission")
+ cmd.extend(["--setpref=fission.autostart=false"])
+
+ for arg in self.config["suite_definitions"][self.test_suite]["options"]:
+ cmd.append(arg % config_fmt_args)
+
+ if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
+ # Make sure that the logging directory exists
+ self.fatal("Could not create blobber upload directory")
+
+ test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ if test_paths and "marionette" in test_paths:
+ paths = [
+ os.path.join(dirs["abs_test_install_dir"], "marionette", "tests", p)
+ for p in test_paths["marionette"]
+ ]
+ cmd.extend(paths)
+ else:
+ cmd.append(manifest)
+
+ try_options, try_tests = self.try_args("marionette")
+ cmd.extend(self.query_tests_args(try_tests, str_format_values=config_fmt_args))
+
+ env = {}
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ if self.config["headless"]:
+ env["MOZ_HEADLESS"] = "1"
+ env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
+ env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
+
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+
+ # Causes Firefox to crash when using non-local connections.
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+ env = self.query_env(partial_env=env)
+
+ try:
+ cwd = self._query_tests_dir()
+ except Exception as e:
+ self.fatal(
+ "Don't know how to run --test-suite '{0}': {1}!".format(
+ self.test_suite, e
+ )
+ )
+
+ marionette_parser = self.parser_class(
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=BaseErrorList + HarnessErrorList,
+ strict=False,
+ )
+ return_code = self.run_command(
+ cmd, cwd=cwd, output_timeout=1000, output_parser=marionette_parser, env=env
+ )
+ level = INFO
+ tbpl_status, log_level, summary = marionette_parser.evaluate_parser(
+ return_code=return_code
+ )
+ marionette_parser.append_tinderboxprint_line("marionette")
+
+ qemu = os.path.join(dirs["abs_work_dir"], "qemu.log")
+ if os.path.isfile(qemu):
+ self.copyfile(qemu, os.path.join(dirs["abs_blob_upload_dir"], "qemu.log"))
+
+ # dump logcat output if there were failures
+ if self.config.get("emulator"):
+ if (
+ marionette_parser.failed != "0"
+ or "T-FAIL" in marionette_parser.tsummary
+ ):
+ logcat = os.path.join(dirs["abs_work_dir"], "emulator-5554.log")
+ if os.access(logcat, os.F_OK):
+ self.info("dumping logcat")
+ self.run_command(["cat", logcat], error_list=LogcatErrorList)
+ else:
+ self.info("no logcat file found")
+ else:
+ # .. or gecko.log if it exists
+ gecko_log = os.path.join(self.config["base_work_dir"], "gecko.log")
+ if os.access(gecko_log, os.F_OK):
+ self.info("dumping gecko.log")
+ self.run_command(["cat", gecko_log])
+ self.rmtree(gecko_log)
+ else:
+ self.info("gecko.log not found")
+
+ marionette_parser.print_summary("marionette")
+
+ self.log(
+ "Marionette exited with return code %s: %s" % (return_code, tbpl_status),
+ level=level,
+ )
+ self.record_status(tbpl_status)
+
+
+if __name__ == "__main__":
+ marionetteTest = MarionetteTest()
+ marionetteTest.run_and_exit()
diff --git a/testing/mozharness/scripts/multil10n.py b/testing/mozharness/scripts/multil10n.py
new file mode 100755
index 0000000000..ae5c013fc7
--- /dev/null
+++ b/testing/mozharness/scripts/multil10n.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""multil10n.py
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.l10n.multi_locale_build import MultiLocaleBuild
+
+if __name__ == "__main__":
+ multi_locale_build = MultiLocaleBuild()
+ multi_locale_build.run_and_exit()
diff --git a/testing/mozharness/scripts/openh264_build.py b/testing/mozharness/scripts/openh264_build.py
new file mode 100755
index 0000000000..1c6089e3f1
--- /dev/null
+++ b/testing/mozharness/scripts/openh264_build.py
@@ -0,0 +1,472 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import glob
+import os
+import re
+import subprocess
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+# import the guts
+import mozharness
+from mozharness.base.log import DEBUG, ERROR, FATAL
+from mozharness.base.transfer import TransferMixin
+from mozharness.base.vcs.vcsbase import VCSScript
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+
+class OpenH264Build(TransferMixin, VCSScript, TooltoolMixin):
+ all_actions = [
+ "clobber",
+ "get-tooltool",
+ "checkout-sources",
+ "build",
+ "test",
+ "package",
+ "dump-symbols",
+ ]
+
+ default_actions = [
+ "get-tooltool",
+ "checkout-sources",
+ "build",
+ "package",
+ "dump-symbols",
+ ]
+
+ config_options = [
+ [
+ ["--repo"],
+ {
+ "dest": "repo",
+ "help": "OpenH264 repository to use",
+ "default": "https://github.com/dminor/openh264.git",
+ },
+ ],
+ [
+ ["--rev"],
+ {"dest": "revision", "help": "revision to checkout", "default": "master"},
+ ],
+ [
+ ["--debug"],
+ {
+ "dest": "debug_build",
+ "action": "store_true",
+ "help": "Do a debug build",
+ },
+ ],
+ [
+ ["--arch"],
+ {
+ "dest": "arch",
+ "help": "Arch type to use (x64, x86, arm, or aarch64)",
+ },
+ ],
+ [
+ ["--os"],
+ {
+ "dest": "operating_system",
+ "help": "Specify the operating system to build for",
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "dest": "branch",
+ "help": "dummy option",
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "dest": "build_pool",
+ "help": "dummy option",
+ },
+ ],
+ ]
+
+ def __init__(
+ self,
+ require_config_file=False,
+ config={},
+ all_actions=all_actions,
+ default_actions=default_actions,
+ ):
+
+ # Default configuration
+ default_config = {
+ "debug_build": False,
+ "upload_ssh_key": "~/.ssh/ffxbld_rsa",
+ "upload_ssh_user": "ffxbld",
+ "upload_ssh_host": "upload.ffxbld.productdelivery.prod.mozaws.net",
+ "upload_path_base": "/tmp/openh264",
+ }
+ default_config.update(config)
+
+ VCSScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ config=default_config,
+ all_actions=all_actions,
+ default_actions=default_actions,
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ dirs = super(OpenH264Build, self).query_abs_dirs()
+ dirs["abs_upload_dir"] = os.path.join(dirs["abs_work_dir"], "upload")
+ self.abs_dirs = dirs
+ return self.abs_dirs
+
+ def get_tooltool(self):
+ c = self.config
+ if not c.get("tooltool_manifest_file"):
+ self.info("Skipping tooltool fetching since no tooltool manifest")
+ return
+ dirs = self.query_abs_dirs()
+ self.mkdir_p(dirs["abs_work_dir"])
+ manifest = os.path.join(
+ dirs["abs_src_dir"],
+ "testing",
+ "mozharness",
+ "configs",
+ "openh264",
+ "tooltool-manifests",
+ c["tooltool_manifest_file"],
+ )
+ self.info("Getting tooltool files from manifest (%s)" % manifest)
+ try:
+ self.tooltool_fetch(
+ manifest=manifest,
+ output_dir=os.path.join(dirs["abs_work_dir"]),
+ cache=c.get("tooltool_cache"),
+ )
+ except KeyError:
+ self.error("missing a required key.")
+
+ def query_package_name(self):
+ if self.config["arch"] in ("x64", "aarch64"):
+ bits = "64"
+ else:
+ bits = "32"
+ version = self.config["revision"]
+
+ if sys.platform in ("linux2", "linux"):
+ if self.config.get("operating_system") == "android":
+ return "openh264-android-{arch}-{version}.zip".format(
+ version=version, arch=self.config["arch"]
+ )
+ elif self.config.get("operating_system") == "darwin":
+ suffix = ""
+ if self.config["arch"] != "x64":
+ suffix = "-" + self.config["arch"]
+ return "openh264-macosx{bits}{suffix}-{version}.zip".format(
+ version=version, bits=bits, suffix=suffix
+ )
+ elif self.config["arch"] == "aarch64":
+ return "openh264-linux64-aarch64-{version}.zip".format(version=version)
+ else:
+ return "openh264-linux{bits}-{version}.zip".format(
+ version=version, bits=bits
+ )
+ elif sys.platform == "win32":
+ if self.config["arch"] == "aarch64":
+ return "openh264-win64-aarch64-{version}.zip".format(version=version)
+ else:
+ return "openh264-win{bits}-{version}.zip".format(
+ version=version, bits=bits
+ )
+ self.fatal("can't determine platform")
+
+ def query_make_params(self):
+ retval = []
+ if self.config["debug_build"]:
+ retval.append("BUILDTYPE=Debug")
+
+ if self.config["arch"] in ("x64", "aarch64"):
+ retval.append("ENABLE64BIT=Yes")
+ else:
+ retval.append("ENABLE64BIT=No")
+
+ if self.config["arch"] == "x86":
+ retval.append("ARCH=x86")
+ elif self.config["arch"] == "x64":
+ retval.append("ARCH=x86_64")
+ elif self.config["arch"] == "aarch64":
+ retval.append("ARCH=arm64")
+ else:
+ self.fatal("Unknown arch: {}".format(self.config["arch"]))
+
+ if "operating_system" in self.config:
+ retval.append("OS=%s" % self.config["operating_system"])
+ if self.config["operating_system"] == "android":
+ retval.append("TARGET=invalid")
+ retval.append("NDKLEVEL=%s" % self.config["min_sdk"])
+ retval.append("NDKROOT=%s/android-ndk" % os.environ["MOZ_FETCHES_DIR"])
+ retval.append("NDK_TOOLCHAIN_VERSION=clang")
+ if self.config["operating_system"] == "darwin":
+ retval.append("OS=darwin")
+
+ if self._is_windows():
+ retval.append("OS=msvc")
+ retval.append("CC=clang-cl")
+ retval.append("CXX=clang-cl")
+ if self.config["arch"] == "aarch64":
+ retval.append("CXX_LINK_O=-nologo --target=aarch64-windows-msvc -Fe$@")
+ else:
+ retval.append("CC=clang")
+ retval.append("CXX=clang++")
+
+ return retval
+
+ def query_upload_ssh_key(self):
+ return self.config["upload_ssh_key"]
+
+ def query_upload_ssh_host(self):
+ return self.config["upload_ssh_host"]
+
+ def query_upload_ssh_user(self):
+ return self.config["upload_ssh_user"]
+
+ def query_upload_ssh_path(self):
+ return "%s/%s" % (self.config["upload_path_base"], self.config["revision"])
+
+ def run_make(self, target, capture_output=False):
+ make = (
+ f"{os.environ['MOZ_FETCHES_DIR']}/mozmake/mozmake"
+ if sys.platform == "win32"
+ else "make"
+ )
+ cmd = [make, target] + self.query_make_params()
+ dirs = self.query_abs_dirs()
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ env = None
+ if self.config.get("partial_env"):
+ env = self.query_env(self.config["partial_env"])
+ kwargs = dict(cwd=repo_dir, env=env)
+ if capture_output:
+ return self.get_output_from_command(cmd, **kwargs)
+ else:
+ return self.run_command(cmd, **kwargs)
+
+ def _git_checkout(self, repo, repo_dir, rev):
+ try:
+ subprocess.run(["git", "clone", "-q", "--no-checkout", repo, repo_dir])
+ subprocess.run(["git", "checkout", "-q", "-f", f"{rev}^0"], cwd=repo_dir)
+ except Exception:
+ self.rmtree(repo_dir)
+ raise
+ return True
+
+ def checkout_sources(self):
+ repo = self.config["repo"]
+ rev = self.config["revision"]
+
+ dirs = self.query_abs_dirs()
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+
+ if self._is_windows():
+ # We don't have git on our windows builders, so download a zip
+ # package instead.
+ path = repo.replace(".git", "/archive/") + rev + ".zip"
+ self.download_file(path)
+ self.unzip(rev + ".zip", dirs["abs_work_dir"])
+ self.move(
+ os.path.join(dirs["abs_work_dir"], "openh264-" + rev),
+ os.path.join(dirs["abs_work_dir"], "openh264"),
+ )
+
+ # Retrieve in-tree version of gmp-api
+ self.copytree(
+ os.path.join(dirs["abs_src_dir"], "dom", "media", "gmp", "gmp-api"),
+ os.path.join(repo_dir, "gmp-api"),
+ )
+
+ # We need gas-preprocessor.pl for arm64 builds
+ if self.config["arch"] == "aarch64":
+ openh264_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ self.download_file(
+ (
+ "https://raw.githubusercontent.com/libav/"
+ "gas-preprocessor/c2bc63c96678d9739509e58"
+ "7aa30c94bdc0e636d/gas-preprocessor.pl"
+ ),
+ parent_dir=openh264_dir,
+ )
+ self.chmod(os.path.join(openh264_dir, "gas-preprocessor.pl"), 744)
+
+ # gas-preprocessor.pl expects cpp to exist
+ # os.symlink is not available on Windows until we switch to
+ # Python 3.
+ os.system(
+ "ln -s %s %s"
+ % (
+ os.path.join(
+ os.environ["MOZ_FETCHES_DIR"], "clang", "bin", "clang.exe"
+ ),
+ os.path.join(openh264_dir, "cpp"),
+ )
+ )
+ return 0
+
+ self.retry(
+ self._git_checkout,
+ error_level=FATAL,
+ error_message="Automation Error: couldn't clone repo",
+ args=(repo, repo_dir, rev),
+ )
+
+ # Checkout gmp-api
+ # TODO: Nothing here updates it yet, or enforces versions!
+ if not os.path.exists(os.path.join(repo_dir, "gmp-api")):
+ retval = self.run_make("gmp-bootstrap")
+ if retval != 0:
+ self.fatal("couldn't bootstrap gmp")
+ else:
+ self.info("skipping gmp bootstrap - we have it locally")
+
+ # Checkout gtest
+ # TODO: Requires svn!
+ if not os.path.exists(os.path.join(repo_dir, "gtest")):
+ retval = self.run_make("gtest-bootstrap")
+ if retval != 0:
+ self.fatal("couldn't bootstrap gtest")
+ else:
+ self.info("skipping gtest bootstrap - we have it locally")
+
+ return retval
+
+ def build(self):
+ retval = self.run_make("plugin")
+ if retval != 0:
+ self.fatal("couldn't build plugin")
+
+ def package(self):
+ dirs = self.query_abs_dirs()
+ srcdir = os.path.join(dirs["abs_work_dir"], "openh264")
+ package_name = self.query_package_name()
+ package_file = os.path.join(dirs["abs_work_dir"], package_name)
+ if os.path.exists(package_file):
+ os.unlink(package_file)
+ to_package = []
+ for f in glob.glob(os.path.join(srcdir, "*gmpopenh264*")):
+ if not re.search(
+ "(?:lib)?gmpopenh264(?!\.\d)\.(?:dylib|so|dll|info)(?!\.\d)", f
+ ):
+ # Don't package unnecessary zip bloat
+ # Blocks things like libgmpopenh264.2.dylib and libgmpopenh264.so.1
+ self.log("Skipping packaging of {package}".format(package=f))
+ continue
+ to_package.append(os.path.basename(f))
+ self.log("Packaging files %s" % to_package)
+ cmd = ["zip", package_file] + to_package
+ retval = self.run_command(cmd, cwd=srcdir)
+ if retval != 0:
+ self.fatal("couldn't make package")
+ self.copy_to_upload_dir(
+ package_file, dest=os.path.join(srcdir, "artifacts", package_name)
+ )
+
+ # Taskcluster expects this path to exist, but we don't use it
+ # because our builds are private.
+ path = os.path.join(
+ self.query_abs_dirs()["abs_work_dir"], "..", "public", "build"
+ )
+ self.mkdir_p(path)
+
+ def dump_symbols(self):
+ dirs = self.query_abs_dirs()
+ c = self.config
+ srcdir = os.path.join(dirs["abs_work_dir"], "openh264")
+ package_name = self.run_make("echo-plugin-name", capture_output=True)
+ if not package_name:
+ self.fatal("failure running make")
+ zip_package_name = self.query_package_name()
+ if not zip_package_name[-4:] == ".zip":
+ self.fatal("Unexpected zip_package_name")
+ symbol_package_name = "{base}.symbols.zip".format(base=zip_package_name[:-4])
+ symbol_zip_path = os.path.join(srcdir, "artifacts", symbol_package_name)
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ env = None
+ if self.config.get("partial_env"):
+ env = self.query_env(self.config["partial_env"])
+ kwargs = dict(cwd=repo_dir, env=env)
+ dump_syms = os.path.join(dirs["abs_work_dir"], c["dump_syms_binary"])
+ self.chmod(dump_syms, 0o755)
+ python = self.query_exe("python3")
+ cmd = [
+ python,
+ os.path.join(external_tools_path, "packagesymbols.py"),
+ "--symbol-zip",
+ symbol_zip_path,
+ dump_syms,
+ os.path.join(srcdir, package_name),
+ ]
+ self.run_command(cmd, **kwargs)
+
+ def test(self):
+ retval = self.run_make("test")
+ if retval != 0:
+ self.fatal("test failures")
+
+ def copy_to_upload_dir(
+ self,
+ target,
+ dest=None,
+ log_level=DEBUG,
+ error_level=ERROR,
+ compress=False,
+ upload_dir=None,
+ ):
+ """Copy target file to upload_dir/dest.
+
+ Potentially update a manifest in the future if we go that route.
+
+ Currently only copies a single file; would be nice to allow for
+ recursive copying; that would probably done by creating a helper
+ _copy_file_to_upload_dir().
+ """
+ dest_filename_given = dest is not None
+ if upload_dir is None:
+ upload_dir = self.query_abs_dirs()["abs_upload_dir"]
+ if dest is None:
+ dest = os.path.basename(target)
+ if dest.endswith("/"):
+ dest_file = os.path.basename(target)
+ dest_dir = os.path.join(upload_dir, dest)
+ dest_filename_given = False
+ else:
+ dest_file = os.path.basename(dest)
+ dest_dir = os.path.join(upload_dir, os.path.dirname(dest))
+ if compress and not dest_filename_given:
+ dest_file += ".gz"
+ dest = os.path.join(dest_dir, dest_file)
+ if not os.path.exists(target):
+ self.log("%s doesn't exist!" % target, level=error_level)
+ return None
+ self.mkdir_p(dest_dir)
+ self.copyfile(target, dest, log_level=log_level, compress=compress)
+ if os.path.exists(dest):
+ return dest
+ else:
+ self.log("%s doesn't exist after copy!" % dest, level=error_level)
+ return None
+
+
+# main {{{1
+if __name__ == "__main__":
+ myScript = OpenH264Build()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/raptor_script.py b/testing/mozharness/scripts/raptor_script.py
new file mode 100644
index 0000000000..be2ed181e8
--- /dev/null
+++ b/testing/mozharness/scripts/raptor_script.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""raptor
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.testing.raptor import Raptor
+
+if __name__ == "__main__":
+ raptor = Raptor()
+ raptor.run_and_exit()
diff --git a/testing/mozharness/scripts/release/bouncer_check.py b/testing/mozharness/scripts/release/bouncer_check.py
new file mode 100644
index 0000000000..7a7e39b274
--- /dev/null
+++ b/testing/mozharness/scripts/release/bouncer_check.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+# lint_ignore=E501
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" bouncer_check.py
+
+A script to check HTTP statuses of Bouncer products to be shipped.
+"""
+
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_FAILURE
+
+BOUNCER_URL_PATTERN = "{bouncer_prefix}?product={product}&os={os}&lang={lang}"
+
+
+class BouncerCheck(BaseScript):
+ config_options = [
+ [
+ ["--version"],
+ {
+ "dest": "version",
+ "help": "Version of release, eg: 39.0b5",
+ },
+ ],
+ [
+ ["--product-field"],
+ {
+ "dest": "product_field",
+ "help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION", # NOQA: E501
+ },
+ ],
+ [
+ ["--products-url"],
+ {
+ "dest": "products_url",
+ "help": "The URL of the current Firefox product versions",
+ "type": str,
+ "default": "https://product-details.mozilla.org/1.0/firefox_versions.json",
+ },
+ ],
+ [
+ ["--previous-version"],
+ {
+ "dest": "prev_versions",
+ "action": "extend",
+ "help": "Previous version(s)",
+ },
+ ],
+ [
+ ["--locale"],
+ {
+ "dest": "locales",
+ # Intentionally limited for several reasons:
+ # 1) faster to check
+ # 2) do not need to deal with situation when a new locale
+ # introduced and we do not have partials for it yet
+ # 3) it mimics the old Sentry behaviour that worked for ages
+ # 4) no need to handle ja-JP-mac
+ "default": ["en-US", "de", "it", "zh-TW"],
+ "action": "append",
+ "help": "List of locales to check.",
+ },
+ ],
+ [
+ ["-j", "--parallelization"],
+ {
+ "dest": "parallelization",
+ "default": 20,
+ "type": int,
+ "help": "Number of HTTP sessions running in parallel",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ super(BouncerCheck, self).__init__(
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ config={
+ "cdn_urls": [
+ "download-installer.cdn.mozilla.net",
+ "download.cdn.mozilla.net",
+ "download.mozilla.org",
+ "archive.mozilla.org",
+ ],
+ },
+ all_actions=[
+ "check-bouncer",
+ ],
+ default_actions=[
+ "check-bouncer",
+ ],
+ )
+
+ def _pre_config_lock(self, rw_config):
+ super(BouncerCheck, self)._pre_config_lock(rw_config)
+
+ if "product_field" not in self.config:
+ return
+
+ firefox_versions = self.load_json_url(self.config["products_url"])
+
+ if self.config["product_field"] not in firefox_versions:
+ self.fatal("Unknown Firefox label: {}".format(self.config["product_field"]))
+ self.config["version"] = firefox_versions[self.config["product_field"]]
+ self.log("Set Firefox version {}".format(self.config["version"]))
+
+ def check_url(self, session, url):
+ from redo import retry
+ from requests.exceptions import HTTPError
+
+ try:
+ from urllib.parse import urlparse
+ except ImportError:
+ # Python 2
+ from urlparse import urlparse
+
+ def do_check_url():
+ self.log("Checking {}".format(url))
+ r = session.head(url, verify=True, timeout=10, allow_redirects=True)
+ try:
+ r.raise_for_status()
+ except HTTPError:
+ self.error("FAIL: {}, status: {}".format(url, r.status_code))
+ raise
+
+ final_url = urlparse(r.url)
+ if final_url.scheme != "https":
+ self.error("FAIL: URL scheme is not https: {}".format(r.url))
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+
+ if final_url.netloc not in self.config["cdn_urls"]:
+ self.error("FAIL: host not in allowed locations: {}".format(r.url))
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+
+ try:
+ retry(do_check_url, sleeptime=3, max_sleeptime=10, attempts=3)
+ except HTTPError:
+ # The error was already logged above.
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+ return
+
+ def get_urls(self):
+ for product in self.config["products"].values():
+ product_name = product["product-name"] % {"version": self.config["version"]}
+ for bouncer_platform in product["platforms"]:
+ for locale in self.config["locales"]:
+ url = BOUNCER_URL_PATTERN.format(
+ bouncer_prefix=self.config["bouncer_prefix"],
+ product=product_name,
+ os=bouncer_platform,
+ lang=locale,
+ )
+ yield url
+
+ for product in self.config.get("partials", {}).values():
+ for prev_version in self.config.get("prev_versions", []):
+ product_name = product["product-name"] % {
+ "version": self.config["version"],
+ "prev_version": prev_version,
+ }
+ for bouncer_platform in product["platforms"]:
+ for locale in self.config["locales"]:
+ url = BOUNCER_URL_PATTERN.format(
+ bouncer_prefix=self.config["bouncer_prefix"],
+ product=product_name,
+ os=bouncer_platform,
+ lang=locale,
+ )
+ yield url
+
+ def check_bouncer(self):
+ import concurrent.futures as futures
+
+ import requests
+
+ session = requests.Session()
+ http_adapter = requests.adapters.HTTPAdapter(
+ pool_connections=self.config["parallelization"],
+ pool_maxsize=self.config["parallelization"],
+ )
+ session.mount("https://", http_adapter)
+ session.mount("http://", http_adapter)
+
+ with futures.ThreadPoolExecutor(self.config["parallelization"]) as e:
+ fs = []
+ for url in self.get_urls():
+ fs.append(e.submit(self.check_url, session, url))
+ for f in futures.as_completed(fs):
+ f.result()
+
+
+if __name__ == "__main__":
+ BouncerCheck().run_and_exit()
diff --git a/testing/mozharness/scripts/release/generate-checksums.py b/testing/mozharness/scripts/release/generate-checksums.py
new file mode 100644
index 0000000000..ae092ae4de
--- /dev/null
+++ b/testing/mozharness/scripts/release/generate-checksums.py
@@ -0,0 +1,263 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import binascii
+import hashlib
+import os
+import re
+import sys
+from multiprocessing.pool import ThreadPool
+
+import six
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.checksums import parse_checksums_file
+from mozharness.mozilla.merkle import MerkleTree
+
+
+class ChecksumsGenerator(BaseScript, VirtualenvMixin):
+ config_options = [
+ [
+ ["--stage-product"],
+ {
+ "dest": "stage_product",
+ "help": "Name of product used in file server's directory structure, "
+ "e.g.: firefox, mobile",
+ },
+ ],
+ [
+ ["--version"],
+ {
+ "dest": "version",
+ "help": "Version of release, e.g.: 59.0b5",
+ },
+ ],
+ [
+ ["--build-number"],
+ {
+ "dest": "build_number",
+ "help": "Build number of release, e.g.: 2",
+ },
+ ],
+ [
+ ["--bucket-name"],
+ {
+ "dest": "bucket_name",
+ "help": "Full bucket name e.g.: moz-fx-productdelivery-pr-38b5-productdelivery.",
+ },
+ ],
+ [
+ ["-j", "--parallelization"],
+ {
+ "dest": "parallelization",
+ "default": 20,
+ "type": int,
+ "help": "Number of checksums file to download concurrently",
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "dest": "branch",
+ "help": "dummy option",
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "dest": "build_pool",
+ "help": "dummy option",
+ },
+ ],
+ ] + virtualenv_config_options
+
+ def __init__(self):
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=False,
+ config={
+ "virtualenv_modules": [
+ "boto",
+ ],
+ "virtualenv_path": "venv",
+ },
+ all_actions=[
+ "create-virtualenv",
+ "collect-individual-checksums",
+ "create-big-checksums",
+ "create-summary",
+ ],
+ default_actions=[
+ "create-virtualenv",
+ "collect-individual-checksums",
+ "create-big-checksums",
+ "create-summary",
+ ],
+ )
+
+ self.checksums = {}
+ self.file_prefix = self._get_file_prefix()
+
+ def _pre_config_lock(self, rw_config):
+ super(ChecksumsGenerator, self)._pre_config_lock(rw_config)
+
+ # These defaults are set here rather in the config because default
+ # lists cannot be completely overidden, only appended to.
+ if not self.config.get("formats"):
+ self.config["formats"] = ["sha512", "sha256"]
+
+ if not self.config.get("includes"):
+ self.config["includes"] = [
+ r"^.*\.tar\.bz2$",
+ r"^.*\.tar\.xz$",
+ r"^.*\.snap$",
+ r"^.*\.dmg$",
+ r"^.*\.pkg$",
+ r"^.*\.bundle$",
+ r"^.*\.mar$",
+ r"^.*Setup.*\.exe$",
+ r"^.*Installer\.exe$",
+ r"^.*\.msi$",
+ r"^.*\.xpi$",
+ r"^.*fennec.*\.apk$",
+ r"^.*/jsshell.*$",
+ ]
+
+ def _get_file_prefix(self):
+ return "pub/{}/candidates/{}-candidates/build{}/".format(
+ self.config["stage_product"],
+ self.config["version"],
+ self.config["build_number"],
+ )
+
+ def _get_sums_filename(self, format_):
+ return "{}SUMS".format(format_.upper())
+
+ def _get_summary_filename(self, format_):
+ return "{}SUMMARY".format(format_.upper())
+
+ def _get_hash_function(self, format_):
+ if format_ in ("sha256", "sha384", "sha512"):
+ return getattr(hashlib, format_)
+ else:
+ self.fatal("Unsupported format {}".format(format_))
+
+ def _get_bucket(self):
+ self.activate_virtualenv()
+ from boto import connect_s3
+
+ self.info("Connecting to S3")
+ conn = connect_s3(anon=True, host="storage.googleapis.com")
+ self.info("Connecting to bucket {}".format(self.config["bucket_name"]))
+ self.bucket = conn.get_bucket(self.config["bucket_name"])
+ return self.bucket
+
+ def collect_individual_checksums(self):
+ """This step grabs all of the small checksums files for the release,
+ filters out any unwanted files from within them, and adds the remainder
+ to self.checksums for subsequent steps to use."""
+ bucket = self._get_bucket()
+ self.info("File prefix is: {}".format(self.file_prefix))
+
+ # temporary holding place for checksums
+ raw_checksums = []
+
+ def worker(item):
+ self.debug("Downloading {}".format(item))
+ sums = bucket.get_key(item).get_contents_as_string()
+ raw_checksums.append(sums)
+
+ def find_checksums_files():
+ self.info("Getting key names from bucket")
+ checksum_files = {"beets": [], "checksums": []}
+ for key in bucket.list(prefix=self.file_prefix):
+ if key.key.endswith(".checksums"):
+ self.debug("Found checksums file: {}".format(key.key))
+ checksum_files["checksums"].append(key.key)
+ elif key.key.endswith(".beet"):
+ self.debug("Found beet file: {}".format(key.key))
+ checksum_files["beets"].append(key.key)
+ else:
+ self.debug("Ignoring non-checksums file: {}".format(key.key))
+ if checksum_files["beets"]:
+ self.log("Using beet format")
+ return checksum_files["beets"]
+ else:
+ self.log("Using checksums format")
+ return checksum_files["checksums"]
+
+ pool = ThreadPool(self.config["parallelization"])
+ pool.map(worker, find_checksums_files())
+
+ for c in raw_checksums:
+ for f, info in six.iteritems(parse_checksums_file(c)):
+ for pattern in self.config["includes"]:
+ if re.search(pattern, f):
+ if f in self.checksums:
+ if info == self.checksums[f]:
+ self.debug(
+ "Duplicate checksum for file {}"
+ " but the data matches;"
+ " continuing...".format(f)
+ )
+ continue
+ self.fatal(
+ "Found duplicate checksum entry for {}, "
+ "don't know which one to pick.".format(f)
+ )
+ if not set(self.config["formats"]) <= set(info["hashes"]):
+ self.fatal("Missing necessary format for file {}".format(f))
+ self.debug("Adding checksums for file: {}".format(f))
+ self.checksums[f] = info
+ break
+ else:
+ self.debug("Ignoring checksums for file: {}".format(f))
+
+ def create_summary(self):
+ """
+ This step computes a Merkle tree over the checksums for each format
+ and writes a file containing the head of the tree and inclusion proofs
+ for each file.
+ """
+ for fmt in self.config["formats"]:
+ hash_fn = self._get_hash_function(fmt)
+ files = [fn for fn in sorted(self.checksums)]
+ data = [self.checksums[fn]["hashes"][fmt] for fn in files]
+
+ tree = MerkleTree(hash_fn, data)
+ head = binascii.hexlify(tree.head())
+ proofs = [
+ binascii.hexlify(tree.inclusion_proof(i).to_rfc6962_bis())
+ for i in range(len(files))
+ ]
+
+ summary = self._get_summary_filename(fmt)
+ self.info("Creating summary file: {}".format(summary))
+
+ content = "{} TREE_HEAD\n".format(head.decode("ascii"))
+ for i in range(len(files)):
+ content += "{} {}\n".format(proofs[i].decode("ascii"), files[i])
+
+ self.write_to_file(summary, content)
+
+ def create_big_checksums(self):
+ for fmt in self.config["formats"]:
+ sums = self._get_sums_filename(fmt)
+ self.info("Creating big checksums file: {}".format(sums))
+ with open(sums, "w+") as output_file:
+ for fn in sorted(self.checksums):
+ output_file.write(
+ "{} {}\n".format(
+ self.checksums[fn]["hashes"][fmt].decode("ascii"), fn
+ )
+ )
+
+
+if __name__ == "__main__":
+ myScript = ChecksumsGenerator()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/release/update-verify-config-creator.py b/testing/mozharness/scripts/release/update-verify-config-creator.py
new file mode 100644
index 0000000000..9de0175577
--- /dev/null
+++ b/testing/mozharness/scripts/release/update-verify-config-creator.py
@@ -0,0 +1,642 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import math
+import os
+import pprint
+import re
+import sys
+
+from looseversion import LooseVersion
+from mozilla_version.gecko import GeckoVersion
+from mozilla_version.version import VersionType
+from six.moves.urllib.parse import urljoin
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.log import DEBUG, FATAL, INFO, WARNING
+from mozharness.base.script import BaseScript
+
+
+# ensure all versions are 3 part (i.e. 99.1.0)
+# ensure all text (i.e. 'esr') is in the last part
+class CompareVersion(LooseVersion):
+ version = ""
+
+ def __init__(self, versionMap):
+ parts = versionMap.split(".")
+ # assume version is 99.9.0, look for 99.0
+ if len(parts) == 2:
+ intre = re.compile("([0-9.]+)(.*)")
+ match = intre.match(parts[-1])
+ if match:
+ parts[-1] = match.group(1)
+ parts.append("0%s" % match.group(2))
+ else:
+ parts.append("0")
+ self.version = ".".join(parts)
+ LooseVersion(versionMap)
+
+
+def is_triangualar(x):
+ """Check if a number is triangular (0, 1, 3, 6, 10, 15, ...)
+ see: https://en.wikipedia.org/wiki/Triangular_number#Triangular_roots_and_tests_for_triangular_numbers # noqa
+
+ >>> is_triangualar(0)
+ True
+ >>> is_triangualar(1)
+ True
+ >>> is_triangualar(2)
+ False
+ >>> is_triangualar(3)
+ True
+ >>> is_triangualar(4)
+ False
+ >>> all(is_triangualar(x) for x in [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105])
+ True
+ >>> all(not is_triangualar(x) for x in [4, 5, 8, 9, 11, 17, 25, 29, 39, 44, 59, 61, 72, 98, 112])
+ True
+ """
+ # pylint --py3k W1619
+ n = (math.sqrt(8 * x + 1) - 1) / 2
+ return n == int(n)
+
+
+class UpdateVerifyConfigCreator(BaseScript):
+ config_options = [
+ [
+ ["--product"],
+ {
+ "dest": "product",
+ "help": "Product being tested, as used in the update URL and filenames. Eg: firefox", # NOQA: E501
+ },
+ ],
+ [
+ ["--stage-product"],
+ {
+ "dest": "stage_product",
+ "help": "Product being tested, as used in stage directories and ship it"
+ "If not passed this is assumed to be the same as product.",
+ },
+ ],
+ [
+ ["--app-name"],
+ {
+ "dest": "app_name",
+ "help": "App name being tested. Eg: browser",
+ },
+ ],
+ [
+ ["--branch-prefix"],
+ {
+ "dest": "branch_prefix",
+ "help": "Prefix of release branch names. Eg: mozilla, comm",
+ },
+ ],
+ [
+ ["--channel"],
+ {
+ "dest": "channel",
+ "help": "Channel to run update verify against",
+ },
+ ],
+ [
+ ["--aus-server"],
+ {
+ "dest": "aus_server",
+ "default": "https://aus5.mozilla.org",
+ "help": "AUS server to run update verify against",
+ },
+ ],
+ [
+ ["--to-version"],
+ {
+ "dest": "to_version",
+ "help": "The version of the release being updated to. Eg: 59.0b5",
+ },
+ ],
+ [
+ ["--to-app-version"],
+ {
+ "dest": "to_app_version",
+ "help": "The in-app version of the release being updated to. Eg: 59.0",
+ },
+ ],
+ [
+ ["--to-display-version"],
+ {
+ "dest": "to_display_version",
+ "help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9", # NOQA: E501
+ },
+ ],
+ [
+ ["--to-build-number"],
+ {
+ "dest": "to_build_number",
+ "help": "The build number of the release being updated to",
+ },
+ ],
+ [
+ ["--to-buildid"],
+ {
+ "dest": "to_buildid",
+ "help": "The buildid of the release being updated to",
+ },
+ ],
+ [
+ ["--to-revision"],
+ {
+ "dest": "to_revision",
+ "help": "The revision that the release being updated to was built against",
+ },
+ ],
+ [
+ ["--partial-version"],
+ {
+ "dest": "partial_versions",
+ "default": [],
+ "action": "append",
+ "help": "A previous release version that is expected to receive a partial update. "
+ "Eg: 59.0b4. May be specified multiple times.",
+ },
+ ],
+ [
+ ["--last-watershed"],
+ {
+ "dest": "last_watershed",
+ "help": "The earliest version to include in the update verify config. Eg: 57.0b10",
+ },
+ ],
+ [
+ ["--include-version"],
+ {
+ "dest": "include_versions",
+ "default": [],
+ "action": "append",
+ "help": "Only include versions that match one of these regexes. "
+ "May be passed multiple times",
+ },
+ ],
+ [
+ ["--mar-channel-id-override"],
+ {
+ "dest": "mar_channel_id_options",
+ "default": [],
+ "action": "append",
+ "help": "A version regex and channel id string to override those versions with."
+ "Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release "
+ "will set accepted mar channel ids to 'firefox-mozilla-beta' and "
+ "'firefox-mozilla-release for x.y and x.y.z versions. "
+ "May be passed multiple times",
+ },
+ ],
+ [
+ ["--override-certs"],
+ {
+ "dest": "override_certs",
+ "default": None,
+ "help": "Certs to override the updater with prior to running update verify."
+ "If passed, should be one of: dep, nightly, release"
+ "If not passed, no certificate overriding will be configured",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "dest": "platform",
+ "help": "The platform to generate the update verify config for, in FTP-style",
+ },
+ ],
+ [
+ ["--updater-platform"],
+ {
+ "dest": "updater_platform",
+ "help": "The platform to run the updater on, in FTP-style."
+ "If not specified, this is assumed to be the same as platform",
+ },
+ ],
+ [
+ ["--archive-prefix"],
+ {
+ "dest": "archive_prefix",
+ "help": "The server/path to pull the current release from. "
+ "Eg: https://archive.mozilla.org/pub",
+ },
+ ],
+ [
+ ["--previous-archive-prefix"],
+ {
+ "dest": "previous_archive_prefix",
+ "help": "The server/path to pull the previous releases from"
+ "If not specified, this is assumed to be the same as --archive-prefix",
+ },
+ ],
+ [
+ ["--repo-path"],
+ {
+ "dest": "repo_path",
+ "help": (
+ "The repository (relative to the hg server root) that the current "
+ "release was built from Eg: releases/mozilla-beta"
+ ),
+ },
+ ],
+ [
+ ["--output-file"],
+ {
+ "dest": "output_file",
+ "help": "Where to write the update verify config to",
+ },
+ ],
+ [
+ ["--product-details-server"],
+ {
+ "dest": "product_details_server",
+ "default": "https://product-details.mozilla.org",
+ "help": "Product Details server to pull previous release info from. "
+ "Using anything other than the production server is likely to "
+ "cause issues with update verify.",
+ },
+ ],
+ [
+ ["--hg-server"],
+ {
+ "dest": "hg_server",
+ "default": "https://hg.mozilla.org",
+ "help": "Mercurial server to pull various previous and current version info from",
+ },
+ ],
+ [
+ ["--full-check-locale"],
+ {
+ "dest": "full_check_locales",
+ "default": ["de", "en-US", "ru"],
+ "action": "append",
+ "help": "A list of locales to generate full update verify checks for",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ config={},
+ all_actions=[
+ "gather-info",
+ "create-config",
+ "write-config",
+ ],
+ default_actions=[
+ "gather-info",
+ "create-config",
+ "write-config",
+ ],
+ )
+
+ def _pre_config_lock(self, rw_config):
+ super(UpdateVerifyConfigCreator, self)._pre_config_lock(rw_config)
+
+ if "updater_platform" not in self.config:
+ self.config["updater_platform"] = self.config["platform"]
+ if "stage_product" not in self.config:
+ self.config["stage_product"] = self.config["product"]
+ if "previous_archive_prefix" not in self.config:
+ self.config["previous_archive_prefix"] = self.config["archive_prefix"]
+ self.config["archive_prefix"].rstrip("/")
+ self.config["previous_archive_prefix"].rstrip("/")
+ self.config["mar_channel_id_overrides"] = {}
+ for override in self.config["mar_channel_id_options"]:
+ pattern, override_str = override.split(",", 1)
+ self.config["mar_channel_id_overrides"][pattern] = override_str
+
+ def _get_branch_url(self, branch_prefix, version):
+ version = GeckoVersion.parse(version)
+ branch = None
+ if version.version_type == VersionType.BETA:
+ branch = "releases/{}-beta".format(branch_prefix)
+ elif version.version_type == VersionType.ESR:
+ branch = "releases/{}-esr{}".format(branch_prefix, version.major_number)
+ elif version.version_type == VersionType.RELEASE:
+ if branch_prefix == "comm":
+ # Thunderbird does not have ESR releases, regular releases
+ # go in an ESR branch
+ branch = "releases/{}-esr{}".format(branch_prefix, version.major_number)
+ else:
+ branch = "releases/{}-release".format(branch_prefix)
+ if not branch:
+ raise Exception("Cannot determine branch, cannot continue!")
+
+ return branch
+
+ def _get_update_paths(self):
+ from mozrelease.l10n import getPlatformLocales
+ from mozrelease.paths import getCandidatesDir
+ from mozrelease.platforms import ftp2infoFile
+ from mozrelease.versions import MozillaVersion
+
+ self.update_paths = {}
+
+ ret = self._retry_download(
+ "{}/1.0/{}.json".format(
+ self.config["product_details_server"],
+ self.config["stage_product"],
+ ),
+ "WARNING",
+ )
+ releases = json.load(ret)["releases"]
+ for release_name, release_info in reversed(
+ sorted(releases.items(), key=lambda x: MozillaVersion(x[1]["version"]))
+ ):
+ # we need to use releases_name instead of release_info since esr
+ # string is included in the name. later we rely on this.
+ product, version = release_name.split("-", 1)
+ tag = "{}_{}_RELEASE".format(product.upper(), version.replace(".", "_"))
+
+ # Exclude any releases that don't match one of our include version
+ # regexes. This is generally to avoid including versions from other
+ # channels. Eg: including betas when testing releases
+ for v in self.config["include_versions"]:
+ if re.match(v, version):
+ break
+ else:
+ self.log(
+ "Skipping release whose version doesn't match any "
+ "include_version pattern: %s" % release_name,
+ level=INFO,
+ )
+ continue
+
+ # We also have to trim out previous releases that aren't in the same
+ # product line, too old, etc.
+ if self.config["stage_product"] != product:
+ self.log(
+ "Skipping release that doesn't match product name: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if MozillaVersion(version) < MozillaVersion(self.config["last_watershed"]):
+ self.log(
+ "Skipping release that's behind the last watershed: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if version == self.config["to_version"]:
+ self.log(
+ "Skipping release that is the same as to version: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if MozillaVersion(version) > MozillaVersion(self.config["to_version"]):
+ self.log(
+ "Skipping release that's newer than to version: %s" % release_name,
+ level=INFO,
+ )
+ continue
+
+ if version in self.update_paths:
+ raise Exception("Found duplicate release for version: %s", version)
+
+ # This is a crappy place to get buildids from, but we don't have a better one.
+ # This will start to fail if old info files are deleted.
+ info_file_url = "{}{}/{}_info.txt".format(
+ self.config["previous_archive_prefix"],
+ getCandidatesDir(
+ self.config["stage_product"],
+ version,
+ release_info["build_number"],
+ ),
+ ftp2infoFile(self.config["platform"]),
+ )
+ self.log(
+ "Retrieving buildid from info file: %s" % info_file_url, level=DEBUG
+ )
+ ret = self._retry_download(info_file_url, "WARNING")
+ buildID = ret.read().split(b"=")[1].strip().decode("utf-8")
+
+ branch = self._get_branch_url(self.config["branch_prefix"], version)
+
+ shipped_locales_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/locales/shipped-locales".format(
+ branch,
+ tag,
+ self.config["app_name"],
+ ),
+ )
+ ret = self._retry_download(shipped_locales_url, "WARNING")
+ shipped_locales = ret.read().strip().decode("utf-8")
+
+ app_version_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/config/version.txt".format(
+ branch,
+ tag,
+ self.config["app_name"],
+ ),
+ )
+ app_version = (
+ self._retry_download(app_version_url, "WARNING")
+ .read()
+ .strip()
+ .decode("utf-8")
+ )
+
+ self.log("Adding {} to update paths".format(version), level=INFO)
+ self.update_paths[version] = {
+ "appVersion": app_version,
+ "locales": getPlatformLocales(shipped_locales, self.config["platform"]),
+ "buildID": buildID,
+ }
+ for pattern, mar_channel_ids in self.config[
+ "mar_channel_id_overrides"
+ ].items():
+ if re.match(pattern, version):
+ self.update_paths[version]["marChannelIds"] = mar_channel_ids
+
+ def gather_info(self):
+ from mozilla_version.gecko import GeckoVersion
+
+ self._get_update_paths()
+ if self.update_paths:
+ self.log("Found update paths:", level=DEBUG)
+ self.log(pprint.pformat(self.update_paths), level=DEBUG)
+ elif GeckoVersion.parse(self.config["to_version"]) <= GeckoVersion.parse(
+ self.config["last_watershed"]
+ ):
+ self.log(
+ "Didn't find any update paths, but to_version {} is before the last_"
+ "watershed {}, generating empty config".format(
+ self.config["to_version"],
+ self.config["last_watershed"],
+ ),
+ level=WARNING,
+ )
+ else:
+ self.log("Didn't find any update paths, cannot continue", level=FATAL)
+
+ def create_config(self):
+ from mozrelease.l10n import getPlatformLocales
+ from mozrelease.paths import (
+ getCandidatesDir,
+ getReleaseInstallerPath,
+ getReleasesDir,
+ )
+ from mozrelease.platforms import ftp2updatePlatforms
+ from mozrelease.update_verify import UpdateVerifyConfig
+ from mozrelease.versions import getPrettyVersion
+
+ candidates_dir = getCandidatesDir(
+ self.config["stage_product"],
+ self.config["to_version"],
+ self.config["to_build_number"],
+ )
+ to_ = getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ self.config["to_version"],
+ self.config["platform"],
+ locale="%locale%",
+ )
+ to_path = "{}/{}".format(candidates_dir, to_)
+
+ to_display_version = self.config.get("to_display_version")
+ if not to_display_version:
+ to_display_version = getPrettyVersion(self.config["to_version"])
+
+ self.update_verify_config = UpdateVerifyConfig(
+ product=self.config["product"].title(),
+ channel=self.config["channel"],
+ aus_server=self.config["aus_server"],
+ to=to_path,
+ to_build_id=self.config["to_buildid"],
+ to_app_version=self.config["to_app_version"],
+ to_display_version=to_display_version,
+ override_certs=self.config.get("override_certs"),
+ )
+
+ to_shipped_locales_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/locales/shipped-locales".format(
+ self.config["repo_path"],
+ self.config["to_revision"],
+ self.config["app_name"],
+ ),
+ )
+ to_shipped_locales = (
+ self._retry_download(to_shipped_locales_url, "WARNING")
+ .read()
+ .strip()
+ .decode("utf-8")
+ )
+ to_locales = set(
+ getPlatformLocales(to_shipped_locales, self.config["platform"])
+ )
+
+ completes_only_index = 0
+ for fromVersion in reversed(sorted(self.update_paths, key=CompareVersion)):
+ from_ = self.update_paths[fromVersion]
+ locales = sorted(list(set(from_["locales"]).intersection(to_locales)))
+ appVersion = from_["appVersion"]
+ build_id = from_["buildID"]
+ mar_channel_IDs = from_.get("marChannelIds")
+
+ # Use new build targets for Windows, but only on compatible
+ # versions (42+). See bug 1185456 for additional context.
+ if self.config["platform"] not in ("win32", "win64") or LooseVersion(
+ fromVersion
+ ) < LooseVersion("42.0"):
+ update_platform = ftp2updatePlatforms(self.config["platform"])[0]
+ else:
+ update_platform = ftp2updatePlatforms(self.config["platform"])[1]
+
+ release_dir = getReleasesDir(self.config["stage_product"], fromVersion)
+ path_ = getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ fromVersion,
+ self.config["platform"],
+ locale="%locale%",
+ )
+ from_path = "{}/{}".format(release_dir, path_)
+
+ updater_package = "{}/{}".format(
+ release_dir,
+ getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ fromVersion,
+ self.config["updater_platform"],
+ locale="%locale%",
+ ),
+ )
+
+ # Exclude locales being full checked
+ quick_check_locales = [
+ l for l in locales if l not in self.config["full_check_locales"]
+ ]
+ # Get the intersection of from and to full_check_locales
+ this_full_check_locales = [
+ l for l in self.config["full_check_locales"] if l in locales
+ ]
+
+ if fromVersion in self.config["partial_versions"]:
+ self.info(
+ "Generating configs for partial update checks for %s" % fromVersion
+ )
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=locales,
+ patch_types=["complete", "partial"],
+ from_path=from_path,
+ ftp_server_from=self.config["previous_archive_prefix"],
+ ftp_server_to=self.config["archive_prefix"],
+ mar_channel_IDs=mar_channel_IDs,
+ platform=update_platform,
+ updater_package=updater_package,
+ )
+ else:
+ if this_full_check_locales and is_triangualar(completes_only_index):
+ self.info("Generating full check configs for %s" % fromVersion)
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=this_full_check_locales,
+ from_path=from_path,
+ ftp_server_from=self.config["previous_archive_prefix"],
+ ftp_server_to=self.config["archive_prefix"],
+ mar_channel_IDs=mar_channel_IDs,
+ platform=update_platform,
+ updater_package=updater_package,
+ )
+ # Quick test for other locales, no download
+ if len(quick_check_locales) > 0:
+ self.info("Generating quick check configs for %s" % fromVersion)
+ if not is_triangualar(completes_only_index):
+ # Assuming we skipped full check locales, using all locales
+ _locales = locales
+ else:
+ # Excluding full check locales from the quick check
+ _locales = quick_check_locales
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=_locales,
+ platform=update_platform,
+ )
+ completes_only_index += 1
+
+ def write_config(self):
+ # Needs to be opened in "bytes" mode because we perform relative seeks on it
+ with open(self.config["output_file"], "wb+") as fh:
+ self.update_verify_config.write(fh)
+
+
+if __name__ == "__main__":
+ UpdateVerifyConfigCreator().run_and_exit()
diff --git a/testing/mozharness/scripts/repackage.py b/testing/mozharness/scripts/repackage.py
new file mode 100644
index 0000000000..e26a32c1db
--- /dev/null
+++ b/testing/mozharness/scripts/repackage.py
@@ -0,0 +1,175 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(sys.path[0])) # noqa - don't warn about imports
+
+from mozharness.base.log import FATAL
+from mozharness.base.script import BaseScript
+
+
+class Repackage(BaseScript):
+ def __init__(self, require_config_file=False):
+ script_kwargs = {
+ "all_actions": [
+ "setup",
+ "repackage",
+ ],
+ }
+ BaseScript.__init__(
+ self, require_config_file=require_config_file, **script_kwargs
+ )
+
+ def setup(self):
+ dirs = self.query_abs_dirs()
+
+ self._run_tooltool()
+
+ mar_path = os.path.join(dirs["abs_input_dir"], "mar")
+ if self._is_windows():
+ mar_path += ".exe"
+ if mar_path and os.path.exists(mar_path):
+ self.chmod(mar_path, 0o755)
+ if self.config.get("run_configure", True):
+ self._get_mozconfig()
+ self._run_configure()
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Repackage, self).query_abs_dirs()
+ config = self.config
+
+ dirs = {}
+ dirs["abs_input_dir"] = os.path.join(abs_dirs["base_work_dir"], "fetches")
+ output_dir_suffix = []
+ if config.get("locale"):
+ output_dir_suffix.append(config["locale"])
+ if config.get("repack_id"):
+ output_dir_suffix.append(config["repack_id"])
+ dirs["abs_output_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "outputs", *output_dir_suffix
+ )
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def repackage(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+
+ subst = {
+ "package-name": config["package-name"],
+ # sfx-stub is only defined for Windows targets
+ "sfx-stub": config.get("sfx-stub"),
+ "installer-tag": config["installer-tag"],
+ "stub-installer-tag": config["stub-installer-tag"],
+ "wsx-stub": config["wsx-stub"],
+ }
+ subst.update(dirs)
+ if config.get("fetch-dir"):
+ subst.update({"fetch-dir": os.path.abspath(config["fetch-dir"])})
+
+ # Make sure the upload dir is around.
+ self.mkdir_p(dirs["abs_output_dir"])
+
+ for repack_config in config["repackage_config"]:
+ command = [sys.executable, "mach", "--log-no-times", "repackage"]
+ command.extend([arg.format(**subst) for arg in repack_config["args"]])
+ for arg, filename in repack_config["inputs"].items():
+ command.extend(
+ [
+ "--{}".format(arg),
+ os.path.join(dirs["abs_input_dir"], filename),
+ ]
+ )
+ command.extend(
+ [
+ "--output",
+ os.path.join(dirs["abs_output_dir"], repack_config["output"]),
+ ]
+ )
+ self.run_command(
+ command=command,
+ cwd=dirs["abs_src_dir"],
+ halt_on_failure=True,
+ env=self.query_env(),
+ )
+
+ def _run_tooltool(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = config.get("tooltool_manifest_src")
+ if not manifest_src:
+ return
+
+ cmd = [
+ sys.executable,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = config.get("tooltool_cache")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True)
+
+ def _get_mozconfig(self):
+ """assign mozconfig."""
+ c = self.config
+ dirs = self.query_abs_dirs()
+ abs_mozconfig_path = ""
+
+ # first determine the mozconfig path
+ if c.get("src_mozconfig"):
+ self.info("Using in-tree mozconfig")
+ abs_mozconfig_path = os.path.join(dirs["abs_src_dir"], c["src_mozconfig"])
+ else:
+ self.fatal(
+ "'src_mozconfig' must be in the config "
+ "in order to determine the mozconfig."
+ )
+
+ # print its contents
+ self.read_from_file(abs_mozconfig_path, error_level=FATAL)
+
+ # finally, copy the mozconfig to a path that 'mach build' expects it to be
+ self.copyfile(
+ abs_mozconfig_path, os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ )
+
+ def _run_configure(self):
+ dirs = self.query_abs_dirs()
+ command = [sys.executable, "mach", "--log-no-times", "configure"]
+ return self.run_command(
+ command=command,
+ cwd=dirs["abs_src_dir"],
+ output_timeout=60 * 3,
+ halt_on_failure=True,
+ )
+
+
+if __name__ == "__main__":
+ repack = Repackage()
+ repack.run_and_exit()
diff --git a/testing/mozharness/scripts/talos_script.py b/testing/mozharness/scripts/talos_script.py
new file mode 100755
index 0000000000..10e441070c
--- /dev/null
+++ b/testing/mozharness/scripts/talos_script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""talos
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.testing.talos import Talos
+
+if __name__ == "__main__":
+ talos = Talos()
+ talos.run_and_exit()
diff --git a/testing/mozharness/scripts/telemetry/telemetry_client.py b/testing/mozharness/scripts/telemetry/telemetry_client.py
new file mode 100755
index 0000000000..a0c91ad1a1
--- /dev/null
+++ b/testing/mozharness/scripts/telemetry/telemetry_client.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+import copy
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import PreScriptAction
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.vcstools import VCSToolsScript
+
+# General command line arguments for Firefox ui tests
+telemetry_tests_config_options = (
+ [
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) "
+ "to use the GL compositor.",
+ },
+ ],
+ [
+ ["--dry-run"],
+ {
+ "dest": "dry_run",
+ "default": False,
+ "help": "Only show what was going to be tested.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Disable multi-process (e10s) mode when running tests.",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "dest": "disable_fission",
+ "action": "store_true",
+ "default": False,
+ "help": "Disable fission mode when running tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "dest": "extra_prefs",
+ "action": "append",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--symbols-path=SYMBOLS_PATH"],
+ {
+ "dest": "symbols_path",
+ "help": "absolute path to directory containing breakpad "
+ "symbols, or the url of a zip file containing symbols.",
+ },
+ ],
+ [
+ ["--tag=TAG"],
+ {
+ "dest": "tag",
+ "help": "Subset of tests to run (local, remote).",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+)
+
+
+class TelemetryTests(TestingMixin, VCSToolsScript, CodeCoverageMixin):
+ def __init__(
+ self,
+ config_options=None,
+ all_actions=None,
+ default_actions=None,
+ *args,
+ **kwargs
+ ):
+ config_options = config_options or telemetry_tests_config_options
+ actions = [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ "uninstall",
+ ]
+
+ super(TelemetryTests, self).__init__(
+ config_options=config_options,
+ all_actions=all_actions or actions,
+ default_actions=default_actions or actions,
+ *args,
+ **kwargs
+ )
+
+ # Code which runs in automation has to include the following properties
+ self.binary_path = self.config.get("binary_path")
+ self.installer_path = self.config.get("installer_path")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url = self.config.get("test_url")
+ self.disable_fission = self.config.get("disable_fission")
+
+ if not self.test_url and not self.test_packages_url:
+ self.fatal("You must use --test-url, or --test-packages-url")
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ abs_dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ abs_dirs["abs_test_install_dir"],
+ "config",
+ "telemetry_tests_requirements.txt",
+ )
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(TelemetryTests, self).query_abs_dirs()
+
+ abs_test_install_dir = os.path.join(abs_dirs["abs_work_dir"], "tests")
+
+ dirs = {
+ "abs_test_install_dir": abs_test_install_dir,
+ "abs_telemetry_dir": os.path.join(
+ abs_test_install_dir, "telemetry", "marionette"
+ ),
+ "abs_blob_upload_dir": os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ ),
+ }
+
+ for key in dirs:
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def run_test(self, binary_path, env=None, marionette_port=2828):
+ """All required steps for running the tests against an installer."""
+ dirs = self.query_abs_dirs()
+
+ # Import the harness to retrieve the location of the cli scripts
+ import telemetry_harness
+
+ cmd = [
+ self.query_python_path(),
+ os.path.join(os.path.dirname(telemetry_harness.__file__), self.cli_script),
+ "--binary",
+ binary_path,
+ "--address",
+ "localhost:{}".format(marionette_port),
+ # Resource files to serve via local webserver
+ "--server-root",
+ os.path.join(dirs["abs_telemetry_dir"], "harness", "www"),
+ # Use the work dir to get temporary data stored
+ "--workspace",
+ dirs["abs_work_dir"],
+ # logging options
+ "--gecko-log=-", # output from the gecko process redirected to stdout
+ "--log-raw=-", # structured log for output parser redirected to stdout
+ # additional reports helpful for Jenkins and inpection via Treeherder
+ "--log-html",
+ os.path.join(dirs["abs_blob_upload_dir"], "report.html"),
+ "--log-xunit",
+ os.path.join(dirs["abs_blob_upload_dir"], "report.xml"),
+ # Enable tracing output to log transmission protocol
+ "-vv",
+ ]
+
+ # Symbols for crash reports
+ if self.symbols_path:
+ cmd.extend(["--symbols-path", self.symbols_path])
+
+ if self.disable_fission:
+ cmd.append("--disable-fission")
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+
+ if not self.config["e10s"]:
+ cmd.append("--disable-e10s")
+
+ parser = StructuredOutputParser(
+ config=self.config, log_obj=self.log_obj, strict=False
+ )
+
+ # Add the default tests to run
+ tests = [
+ os.path.join(dirs["abs_telemetry_dir"], "tests", test)
+ for test in self.default_tests
+ ]
+ cmd.extend(tests)
+
+ # Set further environment settings
+ env = env or self.query_env()
+ env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
+ if self.query_minidump_stackwalk():
+ env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
+ env["RUST_BACKTRACE"] = "1"
+ env["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"
+
+ # Causes Firefox to crash when using non-local connections.
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+ # If code coverage is enabled, set GCOV_PREFIX env variable
+ if self.config.get("code_coverage"):
+ env["GCOV_PREFIX"] = self.gcov_dir
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=env,
+ )
+
+ tbpl_status, log_level, _ = parser.evaluate_parser(return_code)
+ self.record_status(tbpl_status, level=log_level)
+
+ return return_code
+
+ @PreScriptAction("run-tests")
+ def _pre_run_tests(self, action):
+ if not self.installer_path and not self.installer_url:
+ self.critical(
+ "Please specify an installer via --installer-path or --installer-url."
+ )
+ sys.exit(1)
+
+ def run_tests(self):
+ """Run all the tests"""
+ return self.run_test(
+ binary_path=self.binary_path,
+ env=self.query_env(),
+ )
+
+
+class TelemetryClientTests(TelemetryTests):
+ cli_script = "runtests.py"
+ default_tests = [
+ os.path.join("client", "manifest.ini"),
+ os.path.join("unit", "manifest.ini"),
+ ]
+
+
+if __name__ == "__main__":
+ myScript = TelemetryClientTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/web_platform_tests.py b/testing/mozharness/scripts/web_platform_tests.py
new file mode 100755
index 0000000000..662f1a7540
--- /dev/null
+++ b/testing/mozharness/scripts/web_platform_tests.py
@@ -0,0 +1,700 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import copy
+import gzip
+import json
+import os
+import sys
+from datetime import datetime, timedelta
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozinfo
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import WptHarnessErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+
+class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin):
+ config_options = (
+ [
+ [
+ ["--test-type"],
+ {
+ "action": "extend",
+ "dest": "test_type",
+ "help": "Specify the test types to run.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run without e10s enabled",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run without fission enabled",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) "
+ "to use the GL compositor.",
+ },
+ ],
+ [
+ ["--headless"],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ ["--headless-width"],
+ {
+ "action": "store",
+ "dest": "headless_width",
+ "default": "1600",
+ "help": "Specify headless virtual screen width (default: 1600).",
+ },
+ ],
+ [
+ ["--headless-height"],
+ {
+ "action": "store",
+ "dest": "headless_height",
+ "default": "1200",
+ "help": "Specify headless virtual screen height (default: 1200).",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Defines an extra user preference.",
+ },
+ ],
+ [
+ ["--skip-implementation-status"],
+ {
+ "action": "extend",
+ "dest": "skip_implementation_status",
+ "default": [],
+ "help": "Defines a way to not run a specific implementation status "
+ " (i.e. not implemented).",
+ },
+ ],
+ [
+ ["--backlog"],
+ {
+ "action": "store_true",
+ "dest": "backlog",
+ "default": False,
+ "help": "Defines if test category is backlog.",
+ },
+ ],
+ [
+ ["--skip-timeout"],
+ {
+ "action": "store_true",
+ "dest": "skip_timeout",
+ "default": False,
+ "help": "Ignore tests that are expected status of TIMEOUT",
+ },
+ ],
+ [
+ ["--default-exclude"],
+ {
+ "action": "store_true",
+ "dest": "default_exclude",
+ "default": False,
+ "help": "Only run the tests explicitly given in arguments",
+ },
+ ],
+ [
+ ["--include"],
+ {
+ "action": "append",
+ "dest": "include",
+ "default": [],
+ "help": "Add URL prefix to include.",
+ },
+ ],
+ [
+ ["--exclude"],
+ {
+ "action": "append",
+ "dest": "exclude",
+ "default": [],
+ "help": "Add URL prefix to exclude.",
+ },
+ ],
+ [
+ ["--tag"],
+ {
+ "action": "append",
+ "dest": "tag",
+ "default": [],
+ "help": "Add test tag (which includes URL prefix) to include.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=True):
+ super(WebPlatformTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "download-and-process-manifest",
+ "create-virtualenv",
+ "pull",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ # Surely this should be in the superclass
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.abs_app_dir = None
+ self.xre_path = None
+ if self.is_emulator:
+ self.device_serial = "emulator-5554"
+
+ def query_abs_app_dir(self):
+ """We can't set this in advance, because OSX install directories
+ change depending on branding and opt/debug.
+ """
+ if self.abs_app_dir:
+ return self.abs_app_dir
+ if not self.binary_path:
+ self.fatal("Can't determine abs_app_dir (binary_path not set!)")
+ self.abs_app_dir = os.path.dirname(self.binary_path)
+ return self.abs_app_dir
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(WebPlatformTest, self).query_abs_dirs()
+
+ dirs = {}
+ dirs["abs_app_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "application"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+ dirs["abs_wpttest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "web-platform"
+ )
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_test_extensions_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "extensions"
+ )
+ if self.is_android:
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ if self.is_emulator:
+ work_dir = os.environ.get("MOZ_FETCHES_DIR") or abs_dirs["abs_work_dir"]
+ dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+ # AndroidMixin uses this when launching the emulator. We only want
+ # GLES3 if we're running WebRender (default)
+ self.use_gles3 = True
+
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ webtransport_requirements = os.path.join(
+ dirs["abs_test_install_dir"],
+ "web-platform",
+ "tests",
+ "tools",
+ "webtransport",
+ "requirements.txt",
+ )
+
+ self.register_virtualenv_module(
+ requirements=[webtransport_requirements], two_pass=True
+ )
+
+ def _query_geckodriver(self):
+ path = None
+ c = self.config
+ dirs = self.query_abs_dirs()
+ repl_dict = {}
+ repl_dict.update(dirs)
+ path = c.get("geckodriver", "geckodriver")
+ if path:
+ path = path % repl_dict
+ return path
+
+ def _query_cmd(self, test_types):
+ if not self.binary_path:
+ self.fatal("Binary path could not be determined")
+ # And exit
+
+ c = self.config
+ run_file_name = "runtests.py"
+
+ dirs = self.query_abs_dirs()
+ abs_app_dir = self.query_abs_app_dir()
+ str_format_values = {
+ "binary_path": self.binary_path,
+ "test_path": dirs["abs_wpttest_dir"],
+ "test_install_path": dirs["abs_test_install_dir"],
+ "abs_app_dir": abs_app_dir,
+ "abs_work_dir": dirs["abs_work_dir"],
+ "xre_path": self.xre_path,
+ }
+
+ cmd = [self.query_python_path("python"), "-u"]
+ cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))
+
+ mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], "wpt"
+ )
+
+ cmd += [
+ "--log-raw=-",
+ "--log-wptreport=%s"
+ % os.path.join(dirs["abs_blob_upload_dir"], "wptreport.json"),
+ "--log-errorsummary=%s" % error_summary_file,
+ "--symbols-path=%s" % self.symbols_path,
+ "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
+ "--stackfix-dir=%s" % os.path.join(dirs["abs_test_install_dir"], "bin"),
+ "--no-pause-after-test",
+ "--instrument-to-file=%s"
+ % os.path.join(dirs["abs_blob_upload_dir"], "wpt_instruments.txt"),
+ "--specialpowers-path=%s"
+ % os.path.join(
+ dirs["abs_test_extensions_dir"], "specialpowers@mozilla.org.xpi"
+ ),
+ ]
+
+ is_windows_7 = (
+ mozinfo.info["os"] == "win" and mozinfo.info["os_version"] == "6.1"
+ )
+
+ if (
+ self.is_android
+ or mozinfo.info["tsan"]
+ or "wdspec" in test_types
+ or not c["disable_fission"]
+ # Bug 1392106 - skia error 0x80070005: Access is denied.
+ or is_windows_7
+ and mozinfo.info["debug"]
+ ):
+ processes = 1
+ else:
+ processes = 2
+ cmd.append("--processes=%s" % processes)
+
+ if self.is_android:
+ cmd += [
+ "--device-serial=%s" % self.device_serial,
+ "--package-name=%s" % self.query_package_name(),
+ ]
+ else:
+ cmd.append("--binary=%s" % self.binary_path)
+
+ if is_windows_7:
+ # On Windows 7 --install-fonts fails, so fall back to a Firefox-specific codepath
+ self._install_fonts()
+ else:
+ cmd += ["--install-fonts"]
+
+ for test_type in test_types:
+ cmd.append("--test-type=%s" % test_type)
+
+ if c["extra_prefs"]:
+ cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])
+
+ if c["disable_fission"]:
+ cmd.append("--disable-fission")
+
+ if not c["e10s"]:
+ cmd.append("--disable-e10s")
+
+ if c["skip_timeout"]:
+ cmd.append("--skip-timeout")
+ if c["default_exclude"]:
+ cmd.append("--default-exclude")
+
+ for implementation_status in c["skip_implementation_status"]:
+ cmd.append("--skip-implementation-status=%s" % implementation_status)
+
+ # Bug 1643177 - reduce timeout multiplier for web-platform-tests backlog
+ if c["backlog"]:
+ cmd.append("--timeout-multiplier=0.25")
+
+ test_paths = set()
+ if not (self.verify_enabled or self.per_test_coverage):
+ mozharness_test_paths = json.loads(
+ os.environ.get("MOZHARNESS_TEST_PATHS", '""')
+ )
+ if mozharness_test_paths:
+ path = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")
+
+ if not os.path.exists(path):
+ self.critical("Unable to locate web-platform-test groups file.")
+
+ cmd.append("--test-groups={}".format(path))
+
+ for key in mozharness_test_paths.keys():
+ paths = mozharness_test_paths.get(key, [])
+ for path in paths:
+ if not path.startswith("/"):
+ # Assume this is a filesystem path rather than a test id
+ path = os.path.relpath(path, "testing/web-platform")
+ if ".." in path:
+ self.fatal("Invalid WPT path: {}".format(path))
+ path = os.path.join(dirs["abs_wpttest_dir"], path)
+ test_paths.add(path)
+ else:
+ # As per WPT harness, the --run-by-dir flag is incompatible with
+ # the --test-groups flag.
+ cmd.append("--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0))
+ for opt in ["total_chunks", "this_chunk"]:
+ val = c.get(opt)
+ if val:
+ cmd.append("--%s=%s" % (opt.replace("_", "-"), val))
+
+ options = list(c.get("options", []))
+
+ if "wdspec" in test_types:
+ geckodriver_path = self._query_geckodriver()
+ if not geckodriver_path or not os.path.isfile(geckodriver_path):
+ self.fatal(
+ "Unable to find geckodriver binary "
+ "in common test package: %s" % str(geckodriver_path)
+ )
+ cmd.append("--webdriver-binary=%s" % geckodriver_path)
+ cmd.append("--webdriver-arg=-vv") # enable trace logs
+
+ test_type_suite = {
+ "testharness": "web-platform-tests",
+ "crashtest": "web-platform-tests-crashtest",
+ "print-reftest": "web-platform-tests-print-reftest",
+ "reftest": "web-platform-tests-reftest",
+ "wdspec": "web-platform-tests-wdspec",
+ }
+ for test_type in test_types:
+ try_options, try_tests = self.try_args(test_type_suite[test_type])
+
+ cmd.extend(
+ self.query_options(
+ options, try_options, str_format_values=str_format_values
+ )
+ )
+ cmd.extend(
+ self.query_tests_args(try_tests, str_format_values=str_format_values)
+ )
+
+ for url_prefix in c["include"]:
+ cmd.append(f"--include={url_prefix}")
+ for url_prefix in c["exclude"]:
+ cmd.append(f"--exclude={url_prefix}")
+ for tag in c["tag"]:
+ cmd.append(f"--tag={tag}")
+
+ cmd.extend(test_paths)
+
+ return cmd
+
+ def download_and_extract(self):
+ super(WebPlatformTest, self).download_and_extract(
+ extract_dirs=[
+ "mach",
+ "bin/*",
+ "config/*",
+ "extensions/*",
+ "mozbase/*",
+ "marionette/*",
+ "tools/*",
+ "web-platform/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ],
+ suite_categories=["web-platform"],
+ )
+ dirs = self.query_abs_dirs()
+ if self.is_android:
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+ # Make sure that the logging directory exists
+ if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
+ self.fatal("Could not create blobber upload directory")
+ # Exit
+
+ def download_and_process_manifest(self):
+ """Downloads the tests-by-manifest JSON mapping generated by the decision task.
+
+ web-platform-tests are chunked in the decision task as of Bug 1608837
+ and this means tests are resolved by the TestResolver as part of this process.
+
+ The manifest file contains tests keyed by the groups generated in
+ TestResolver.get_wpt_group().
+
+ Upon successful call, a JSON file containing only the web-platform test
+ groups are saved in the fetch directory.
+
+ Bug:
+ 1634554
+ """
+ dirs = self.query_abs_dirs()
+ url = os.environ.get("TESTS_BY_MANIFEST_URL", "")
+ if not url:
+ self.fatal("TESTS_BY_MANIFEST_URL not defined.")
+
+ artifact_name = url.split("/")[-1]
+
+ # Save file to the MOZ_FETCHES dir.
+ self.download_file(
+ url, file_name=artifact_name, parent_dir=dirs["abs_fetches_dir"]
+ )
+
+ with gzip.open(os.path.join(dirs["abs_fetches_dir"], artifact_name), "r") as f:
+ tests_by_manifest = json.loads(f.read())
+
+ # We need to filter out non-web-platform-tests without knowing what the
+ # groups are. Fortunately, all web-platform test 'manifests' begin with a
+ # forward slash.
+ test_groups = {
+ key: tests_by_manifest[key]
+ for key in tests_by_manifest.keys()
+ if key.startswith("/")
+ }
+
+ outfile = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")
+ with open(outfile, "w+") as f:
+ json.dump(test_groups, f, indent=2, sort_keys=True)
+
+ def install(self):
+ if self.is_android:
+ self.install_android_app(self.installer_path)
+ else:
+ super(WebPlatformTest, self).install()
+
+ def _install_fonts(self):
+ if self.is_android:
+ return
+ # Ensure the Ahem font is available
+ dirs = self.query_abs_dirs()
+
+ if not sys.platform.startswith("darwin"):
+ font_path = os.path.join(os.path.dirname(self.binary_path), "fonts")
+ else:
+ font_path = os.path.join(
+ os.path.dirname(self.binary_path),
+ os.pardir,
+ "Resources",
+ "res",
+ "fonts",
+ )
+ if not os.path.exists(font_path):
+ os.makedirs(font_path)
+ ahem_src = os.path.join(dirs["abs_wpttest_dir"], "tests", "fonts", "Ahem.ttf")
+ ahem_dest = os.path.join(font_path, "Ahem.ttf")
+ with open(ahem_src, "rb") as src, open(ahem_dest, "wb") as dest:
+ dest.write(src.read())
+
+ def run_tests(self):
+ dirs = self.query_abs_dirs()
+
+ parser = StructuredOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ log_compact=True,
+ error_list=BaseErrorList + WptHarnessErrorList,
+ allow_crashes=True,
+ )
+
+ env = {"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]}
+ env["RUST_BACKTRACE"] = "full"
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+ if self.config["headless"]:
+ env["MOZ_HEADLESS"] = "1"
+ env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
+ env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
+
+ if self.is_android:
+ env["ADB_PATH"] = self.adb_path
+
+ env = self.query_env(partial_env=env, log_level=INFO)
+
+ start_time = datetime.now()
+ max_per_test_time = timedelta(minutes=60)
+ max_per_test_tests = 10
+ if self.per_test_coverage:
+ max_per_test_tests = 30
+ executed_tests = 0
+ executed_too_many_tests = False
+
+ if self.per_test_coverage or self.verify_enabled:
+ suites = self.query_per_test_category_suites(None, None)
+ if "wdspec" in suites:
+ # geckodriver is required for wdspec, but not always available
+ geckodriver_path = self._query_geckodriver()
+ if not geckodriver_path or not os.path.isfile(geckodriver_path):
+ suites.remove("wdspec")
+ self.info("Skipping 'wdspec' tests - no geckodriver")
+ else:
+ test_types = self.config.get("test_type", [])
+ suites = [None]
+ for suite in suites:
+ if executed_too_many_tests and not self.per_test_coverage:
+ continue
+
+ if suite:
+ test_types = [suite]
+
+ summary = {}
+ for per_test_args in self.query_args(suite):
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = (
+ "baselinecoverage" in per_test_args[-1]
+ if self.per_test_coverage
+ else False
+ )
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>"
+ )
+ return
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info(
+ "TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>"
+ )
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
+
+ cmd = self._query_cmd(test_types)
+ cmd.extend(per_test_args)
+
+ final_env = copy.copy(env)
+
+ if self.per_test_coverage:
+ self.set_coverage_env(final_env, is_baseline_test)
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=final_env,
+ )
+
+ if self.per_test_coverage:
+ self.add_per_test_coverage_report(
+ final_env, suite, per_test_args[-1]
+ )
+
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code, previous_summary=summary
+ )
+ self.record_status(tbpl_status, level=log_level)
+
+ if len(per_test_args) > 0:
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+
+
+# main {{{1
+if __name__ == "__main__":
+ web_platform_tests = WebPlatformTest()
+ web_platform_tests.run_and_exit()