summaryrefslogtreecommitdiffstats
path: root/testing/mozharness/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'testing/mozharness/scripts')
-rw-r--r--testing/mozharness/scripts/android_emulator_pgo.py332
-rw-r--r--testing/mozharness/scripts/android_emulator_unittest.py533
-rw-r--r--testing/mozharness/scripts/android_hardware_unittest.py474
-rw-r--r--testing/mozharness/scripts/android_wrench.py256
-rw-r--r--testing/mozharness/scripts/awsy_script.py337
-rwxr-xr-xtesting/mozharness/scripts/configtest.py161
-rwxr-xr-xtesting/mozharness/scripts/desktop_l10n.py495
-rwxr-xr-xtesting/mozharness/scripts/desktop_partner_repacks.py214
-rwxr-xr-xtesting/mozharness/scripts/desktop_unittest.py1220
-rwxr-xr-xtesting/mozharness/scripts/firefox_ui_tests/functional.py21
-rwxr-xr-xtesting/mozharness/scripts/firefox_ui_tests/update.py21
-rwxr-xr-xtesting/mozharness/scripts/firefox_ui_tests/update_release.py371
-rwxr-xr-xtesting/mozharness/scripts/fx_desktop_build.py102
-rwxr-xr-xtesting/mozharness/scripts/l10n_bumper.py381
-rwxr-xr-xtesting/mozharness/scripts/marionette.py468
-rwxr-xr-xtesting/mozharness/scripts/merge_day/gecko_migration.py566
-rwxr-xr-xtesting/mozharness/scripts/multil10n.py22
-rwxr-xr-xtesting/mozharness/scripts/openh264_build.py490
-rw-r--r--testing/mozharness/scripts/raptor_script.py21
-rw-r--r--testing/mozharness/scripts/release/bouncer_check.py206
-rw-r--r--testing/mozharness/scripts/release/generate-checksums.py264
-rw-r--r--testing/mozharness/scripts/release/update-verify-config-creator.py623
-rw-r--r--testing/mozharness/scripts/repackage.py176
-rwxr-xr-xtesting/mozharness/scripts/talos_script.py22
-rwxr-xr-xtesting/mozharness/scripts/telemetry/telemetry_client.py274
-rwxr-xr-xtesting/mozharness/scripts/web_platform_tests.py668
26 files changed, 8718 insertions, 0 deletions
diff --git a/testing/mozharness/scripts/android_emulator_pgo.py b/testing/mozharness/scripts/android_emulator_pgo.py
new file mode 100644
index 0000000000..3aae63c161
--- /dev/null
+++ b/testing/mozharness/scripts/android_emulator_pgo.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from __future__ import absolute_import
+import copy
+import json
+import time
+import glob
+import os
+import sys
+import posixpath
+import subprocess
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+PAGES = [
+ "js-input/webkit/PerformanceTests/Speedometer/index.html",
+ "blueprint/sample.html",
+ "blueprint/forms.html",
+ "blueprint/grid.html",
+ "blueprint/elements.html",
+ "js-input/3d-thingy.html",
+ "js-input/crypto-otp.html",
+ "js-input/sunspider/3d-cube.html",
+ "js-input/sunspider/3d-morph.html",
+ "js-input/sunspider/3d-raytrace.html",
+ "js-input/sunspider/access-binary-trees.html",
+ "js-input/sunspider/access-fannkuch.html",
+ "js-input/sunspider/access-nbody.html",
+ "js-input/sunspider/access-nsieve.html",
+ "js-input/sunspider/bitops-3bit-bits-in-byte.html",
+ "js-input/sunspider/bitops-bits-in-byte.html",
+ "js-input/sunspider/bitops-bitwise-and.html",
+ "js-input/sunspider/bitops-nsieve-bits.html",
+ "js-input/sunspider/controlflow-recursive.html",
+ "js-input/sunspider/crypto-aes.html",
+ "js-input/sunspider/crypto-md5.html",
+ "js-input/sunspider/crypto-sha1.html",
+ "js-input/sunspider/date-format-tofte.html",
+ "js-input/sunspider/date-format-xparb.html",
+ "js-input/sunspider/math-cordic.html",
+ "js-input/sunspider/math-partial-sums.html",
+ "js-input/sunspider/math-spectral-norm.html",
+ "js-input/sunspider/regexp-dna.html",
+ "js-input/sunspider/string-base64.html",
+ "js-input/sunspider/string-fasta.html",
+ "js-input/sunspider/string-tagcloud.html",
+ "js-input/sunspider/string-unpack-code.html",
+ "js-input/sunspider/string-validate-input.html",
+]
+
+
+class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin, AndroidMixin):
+ """
+ Mozharness script to generate an android PGO profile using the emulator
+ """
+
+ config_options = copy.deepcopy(testing_config_options)
+
+ def __init__(self, require_config_file=False):
+ super(AndroidProfileRun, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "setup-avds",
+ "download",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ "mozbase_requirements": "mozbase_source_requirements.txt",
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_path = c.get("installer_path")
+ self.device_serial = "emulator-5554"
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidProfileRun, self).query_abs_dirs()
+ dirs = {}
+
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_src_dir"], "testing")
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_blob_upload_dir"] = "/builds/worker/artifacts/blobber_upload_dir"
+ dirs["abs_avds_dir"] = os.path.join(abs_dirs["abs_work_dir"], ".android")
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ ##########################################
+ # Actions for AndroidProfileRun #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ self.register_virtualenv_module(
+ "marionette",
+ os.path.join(dirs["abs_test_install_dir"], "marionette", "client"),
+ )
+
+ def download(self):
+ """
+ Download host utilities
+ """
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.install_apk(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_serial)
+
+ def run_tests(self):
+ """
+ Generate the PGO profile data
+ """
+ from mozhttpd import MozHttpd
+ from mozprofile import Preferences
+ from mozdevice import ADBDeviceFactory, ADBTimeoutError
+ from six import string_types
+ from marionette_driver.marionette import Marionette
+
+ app = self.query_package_name()
+
+ IP = "10.0.2.2"
+ PORT = 8888
+
+ PATH_MAPPINGS = {
+ "/js-input/webkit/PerformanceTests": "third_party/webkit/PerformanceTests",
+ }
+
+ dirs = self.query_abs_dirs()
+ topsrcdir = dirs["abs_src_dir"]
+ adb = self.query_exe("adb")
+
+ path_mappings = {
+ k: os.path.join(topsrcdir, v) for k, v in PATH_MAPPINGS.items()
+ }
+ httpd = MozHttpd(
+ port=PORT,
+ docroot=os.path.join(topsrcdir, "build", "pgo"),
+ path_mappings=path_mappings,
+ )
+ httpd.start(block=False)
+
+ profile_data_dir = os.path.join(topsrcdir, "testing", "profiles")
+ with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
+ base_profiles = json.load(fh)["profileserver"]
+
+ prefpaths = [
+ os.path.join(profile_data_dir, profile, "user.js")
+ for profile in base_profiles
+ ]
+
+ prefs = {}
+ for path in prefpaths:
+ prefs.update(Preferences.read_prefs(path))
+
+ interpolation = {"server": "%s:%d" % httpd.httpd.server_address, "OOP": "false"}
+ for k, v in prefs.items():
+ if isinstance(v, string_types):
+ v = v.format(**interpolation)
+ prefs[k] = Preferences.cast(v)
+
+ outputdir = self.config.get("output_directory", "/sdcard/pgo_profile")
+ jarlog = posixpath.join(outputdir, "en-US.log")
+ profdata = posixpath.join(outputdir, "default_%p_random_%m.profraw")
+
+ env = {}
+ env["XPCOM_DEBUG_BREAK"] = "warn"
+ env["MOZ_IN_AUTOMATION"] = "1"
+ env["MOZ_JAR_LOG_FILE"] = jarlog
+ env["LLVM_PROFILE_FILE"] = profdata
+
+ if self.query_minidump_stackwalk():
+ os.environ["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ os.environ["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.symbols_path:
+ self.symbols_path = os.environ.get("MOZ_FETCHES_DIR")
+
+ # Force test_root to be on the sdcard for android pgo
+ # builds which fail for Android 4.3 when profiles are located
+ # in /data/local/tmp/test_root with
+ # E AndroidRuntime: FATAL EXCEPTION: Gecko
+ # E AndroidRuntime: java.lang.IllegalArgumentException: \
+ # Profile directory must be writable if specified: /data/local/tmp/test_root/profile
+ # This occurs when .can-write-sentinel is written to
+ # the profile in
+ # mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoProfile.java.
+ # This is not a problem on later versions of Android. This
+ # over-ride of test_root should be removed when Android 4.3 is no
+ # longer supported.
+ sdcard_test_root = "/sdcard/test_root"
+ adbdevice = ADBDeviceFactory(
+ adb=adb, device="emulator-5554", test_root=sdcard_test_root
+ )
+ if adbdevice.test_root != sdcard_test_root:
+ # If the test_root was previously set and shared
+ # the initializer will not have updated the shared
+ # value. Force it to match the sdcard_test_root.
+ adbdevice.test_root = sdcard_test_root
+ adbdevice.mkdir(outputdir, parents=True)
+
+ try:
+ # Run Fennec a first time to initialize its profile
+ driver = Marionette(
+ app="fennec",
+ package_name=app,
+ adb_path=adb,
+ bin="geckoview-androidTest.apk",
+ prefs=prefs,
+ connect_to_running_emulator=True,
+ startup_timeout=1000,
+ env=env,
+ symbols_path=self.symbols_path,
+ )
+ driver.start_session()
+
+ # Now generate the profile and wait for it to complete
+ for page in PAGES:
+ driver.navigate("http://%s:%d/%s" % (IP, PORT, page))
+ timeout = 2
+ if "Speedometer/index.html" in page:
+ # The Speedometer test actually runs many tests internally in
+ # javascript, so it needs extra time to run through them. The
+ # emulator doesn't get very far through the whole suite, but
+ # this extra time at least lets some of them process.
+ timeout = 360
+ time.sleep(timeout)
+
+ driver.set_context("chrome")
+ driver.execute_script(
+ """
+ Components.utils.import("resource://gre/modules/Services.jsm");
+ let cancelQuit = Components.classes["@mozilla.org/supports-PRBool;1"]
+ .createInstance(Components.interfaces.nsISupportsPRBool);
+ Services.obs.notifyObservers(cancelQuit, "quit-application-requested", null);
+ return cancelQuit.data;
+ """
+ )
+ driver.execute_script(
+ """
+ Components.utils.import("resource://gre/modules/Services.jsm");
+ Services.startup.quit(Ci.nsIAppStartup.eAttemptQuit)
+ """
+ )
+
+ # There is a delay between execute_script() returning and the profile data
+ # actually getting written out, so poll the device until we get a profile.
+ for i in range(50):
+ if not adbdevice.process_exist(app):
+ break
+ time.sleep(2)
+ else:
+ raise Exception("Android App (%s) never quit" % app)
+
+ # Pull all the profraw files and en-US.log
+ adbdevice.pull(outputdir, "/builds/worker/workspace/")
+ except ADBTimeoutError:
+ self.fatal(
+ "INFRA-ERROR: Failed with an ADBTimeoutError",
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ profraw_files = glob.glob("/builds/worker/workspace/*.profraw")
+ if not profraw_files:
+ self.fatal("Could not find any profraw files in /builds/worker/workspace")
+ merge_cmd = [
+ os.path.join(os.environ["MOZ_FETCHES_DIR"], "clang/bin/llvm-profdata"),
+ "merge",
+ "-o",
+ "/builds/worker/workspace/merged.profdata",
+ ] + profraw_files
+ rc = subprocess.call(merge_cmd)
+ if rc != 0:
+ self.fatal(
+ "INFRA-ERROR: Failed to merge profile data. Corrupt profile?",
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ # tarfile doesn't support xz in this version of Python
+ tar_cmd = [
+ "tar",
+ "-acvf",
+ "/builds/worker/artifacts/profdata.tar.xz",
+ "-C",
+ "/builds/worker/workspace",
+ "merged.profdata",
+ "en-US.log",
+ ]
+ subprocess.check_call(tar_cmd)
+
+ httpd.stop()
+
+
+if __name__ == "__main__":
+ test = AndroidProfileRun()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_emulator_unittest.py b/testing/mozharness/scripts/android_emulator_unittest.py
new file mode 100644
index 0000000000..310887b860
--- /dev/null
+++ b/testing/mozharness/scripts/android_emulator_unittest.py
@@ -0,0 +1,533 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from __future__ import absolute_import
+import copy
+import datetime
+import json
+import os
+import sys
+import subprocess
+
+# load modules from parent dir
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(1, os.path.dirname(here))
+
+from mozharness.base.log import WARNING
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+
+PY2 = sys.version_info.major == 2
+SUITE_DEFAULT_E10S = ["geckoview-junit", "mochitest", "reftest"]
+SUITE_NO_E10S = ["cppunittest", "geckoview-junit", "xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest"]
+
+
+class AndroidEmulatorTest(
+ TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMixin, AndroidMixin
+):
+ """
+ A mozharness script for Android functional tests (like mochitests and reftests)
+ run on an Android emulator. This script starts and manages an Android emulator
+ for the duration of the required tests. This is like desktop_unittest.py, but
+ for Android emulator test platforms.
+ """
+
+ config_options = (
+ [
+ [
+ ["--test-suite"],
+ {"action": "store", "dest": "test_suite", "default": None},
+ ],
+ [
+ ["--total-chunk"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "default": None,
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "default": None,
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--gpu-required"],
+ {
+ "action": "store_true",
+ "dest": "gpu_required",
+ "default": False,
+ "help": "Run additional verification on modified tests using gpu instances.",
+ },
+ ],
+ [
+ ["--log-raw-level"],
+ {
+ "action": "store",
+ "dest": "log_raw_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--log-tbpl-level"],
+ {
+ "action": "store",
+ "dest": "log_tbpl_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Run with WebRender enabled.",
+ },
+ ],
+ [
+ ["--enable-fission"],
+ {
+ "action": "store_true",
+ "dest": "enable_fission",
+ "default": False,
+ "help": "Run with Fission enabled.",
+ },
+ ],
+ [
+ ["--repeat"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=False):
+ super(AndroidEmulatorTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "setup-avds",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.test_manifest = c.get("test_manifest")
+ suite = c.get("test_suite")
+ self.test_suite = suite
+ self.this_chunk = c.get("this_chunk")
+ self.total_chunks = c.get("total_chunks")
+ self.xre_path = None
+ self.device_serial = "emulator-5554"
+ self.log_raw_level = c.get("log_raw_level")
+ self.log_tbpl_level = c.get("log_tbpl_level")
+ self.enable_webrender = c.get("enable_webrender")
+ if self.enable_webrender:
+ # AndroidMixin uses this when launching the emulator. We only want
+ # GLES3 if we're running WebRender
+ self.use_gles3 = True
+ self.enable_fission = c.get("enable_fission")
+ self.extra_prefs = c.get("extra_prefs")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidEmulatorTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests", "bin"
+ )
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_modules_dir"] = os.path.join(dirs["abs_test_install_dir"], "modules")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+ dirs["abs_avds_dir"] = os.path.join(abs_dirs["abs_work_dir"], ".android")
+ fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
+ if fetches_dir:
+ dirs["abs_sdk_dir"] = os.path.join(fetches_dir, "android-sdk-linux")
+ else:
+ dirs["abs_sdk_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "android-sdk-linux"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def _query_tests_dir(self, test_suite):
+ dirs = self.query_abs_dirs()
+ try:
+ test_dir = self.config["suite_definitions"][test_suite]["testsdir"]
+ except Exception:
+ test_dir = test_suite
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def _get_mozharness_test_paths(self, suite):
+ test_paths = os.environ.get("MOZHARNESS_TEST_PATHS")
+ if not test_paths:
+ return
+
+ return json.loads(test_paths).get(suite)
+
+ def _build_command(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("Key '%s' not defined in the config!" % self.test_suite)
+
+ cmd = [
+ self.query_python_path("python"),
+ "-u",
+ os.path.join(
+ self._query_tests_dir(self.test_suite),
+ self.config["suite_definitions"][self.test_suite]["run_filename"],
+ ),
+ ]
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], self.test_suite
+ )
+
+ str_format_values = {
+ "device_serial": self.device_serial,
+ # IP address of the host as seen from the emulator
+ "remote_webserver": "10.0.2.2",
+ "xre_path": self.xre_path,
+ "utility_path": self.xre_path,
+ "http_port": "8854", # starting http port to use for the mochitest server
+ "ssl_port": "4454", # starting ssl port to use for the server
+ "certs_path": os.path.join(dirs["abs_work_dir"], "tests/certs"),
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ "symbols_path": self.symbols_path,
+ "modules_dir": dirs["abs_modules_dir"],
+ "installer_path": self.installer_path,
+ "raw_log_file": raw_log_file,
+ "log_tbpl_level": self.log_tbpl_level,
+ "log_raw_level": self.log_raw_level,
+ "error_summary_file": error_summary_file,
+ "xpcshell_extra": c.get("xpcshell_extra", ""),
+ "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"),
+ }
+
+ user_paths = self._get_mozharness_test_paths(self.test_suite)
+
+ for option in self.config["suite_definitions"][self.test_suite]["options"]:
+ opt = option.split("=")[0]
+ # override configured chunk options with script args, if specified
+ if opt in ("--this-chunk", "--total-chunks"):
+ if (
+ user_paths
+ or getattr(self, opt.replace("-", "_").strip("_"), None) is not None
+ ):
+ continue
+
+ if "%(app)" in option:
+ # only query package name if requested
+ cmd.extend([option % {"app": self.query_package_name()}])
+ else:
+ option = option % str_format_values
+ if option:
+ cmd.extend([option])
+
+ if "mochitest" in self.test_suite:
+ category = "mochitest"
+ elif "reftest" in self.test_suite or "crashtest" in self.test_suite:
+ category = "reftest"
+ else:
+ category = self.test_suite
+ if c.get("repeat"):
+ if category in SUITE_REPEATABLE:
+ cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log("--repeat not supported in {}".format(category), level=WARNING)
+
+ cmd.extend(["--setpref={}".format(p) for p in self.extra_prefs])
+
+ if not (self.verify_enabled or self.per_test_coverage):
+ if user_paths:
+ cmd.extend(user_paths)
+ elif not (self.verify_enabled or self.per_test_coverage):
+ if self.this_chunk is not None:
+ cmd.extend(["--this-chunk", self.this_chunk])
+ if self.total_chunks is not None:
+ cmd.extend(["--total-chunks", self.total_chunks])
+
+ # Only enable WebRender if the flag is enabled. All downstream harnesses
+ # are expected to force-disable WebRender if not explicitly enabled,
+ # so that we don't have it accidentally getting enabled because the
+ # underlying hardware running the test becomes part of the WR-qualified
+ # set.
+ if self.enable_webrender:
+ cmd.extend(["--enable-webrender"])
+ if self.enable_fission:
+ cmd.extend(["--enable-fission"])
+
+ try_options, try_tests = self.try_args(self.test_suite)
+ cmd.extend(try_options)
+ if not self.verify_enabled and not self.per_test_coverage:
+ cmd.extend(
+ self.query_tests_args(
+ self.config["suite_definitions"][self.test_suite].get("tests"),
+ None,
+ try_tests,
+ )
+ )
+
+ if self.java_code_coverage_enabled:
+ cmd.extend(
+ [
+ "--enable-coverage",
+ "--coverage-output-dir",
+ self.java_coverage_output_dir,
+ ]
+ )
+
+ return cmd
+
+ def _query_suites(self):
+ if self.test_suite:
+ return [(self.test_suite, self.test_suite)]
+ # per-test mode: determine test suites to run
+
+ # For each test category, provide a list of supported sub-suites and a mapping
+ # between the per_test_base suite name and the android suite name.
+ all = [
+ (
+ "mochitest",
+ {
+ "mochitest-plain": "mochitest-plain",
+ "mochitest-media": "mochitest-media",
+ "mochitest-plain-gpu": "mochitest-plain-gpu",
+ },
+ ),
+ (
+ "reftest",
+ {
+ "reftest": "reftest",
+ "crashtest": "crashtest",
+ "jsreftest": "jsreftest",
+ },
+ ),
+ ("xpcshell", {"xpcshell": "xpcshell"}),
+ ]
+ suites = []
+ for (category, all_suites) in all:
+ cat_suites = self.query_per_test_category_suites(category, all_suites)
+ for k in cat_suites.keys():
+ suites.append((k, cat_suites[k]))
+ return suites
+
+ def _query_suite_categories(self):
+ if self.test_suite:
+ categories = [self.test_suite]
+ else:
+ # per-test mode
+ categories = ["mochitest", "reftest", "xpcshell"]
+ return categories
+
+ ##########################################
+ # Actions for AndroidEmulatorTest #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = None
+ suites = self._query_suites()
+ if PY2:
+ wspb_requirements = "websocketprocessbridge_requirements.txt"
+ else:
+ wspb_requirements = "websocketprocessbridge_requirements_3.txt"
+ if ("mochitest-media", "mochitest-media") in suites:
+ # mochitest-media is the only thing that needs this
+ requirements = os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ wspb_requirements,
+ )
+ if requirements:
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """
+ Download and extract product APK, tests.zip, and host utils.
+ """
+ super(AndroidEmulatorTest, self).download_and_extract(
+ suite_categories=self._query_suite_categories()
+ )
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ install_needed = (not self.test_suite) or self.config["suite_definitions"][
+ self.test_suite
+ ].get("install")
+ if install_needed is False:
+ self.info("Skipping apk installation for %s" % self.test_suite)
+ return
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.install_apk(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_serial)
+
+ def run_tests(self):
+ """
+ Run the tests
+ """
+ self.start_time = datetime.datetime.now()
+ max_per_test_time = datetime.timedelta(minutes=60)
+
+ per_test_args = []
+ suites = self._query_suites()
+ minidump = self.query_minidump_stackwalk()
+ for (per_test_suite, suite) in suites:
+ self.test_suite = suite
+
+ try:
+ cwd = self._query_tests_dir(self.test_suite)
+ except Exception:
+ self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
+
+ env = self.query_env()
+ if minidump:
+ env["MINIDUMP_STACKWALK"] = minidump
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if self.config["nodejs_path"]:
+ env["MOZ_NODE_PATH"] = self.config["nodejs_path"]
+
+ summary = {}
+ for per_test_args in self.query_args(per_test_suite):
+ if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: "
+ "Not all tests were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return
+
+ cmd = self._build_command()
+ final_cmd = copy.copy(cmd)
+ if len(per_test_args) > 0:
+ # in per-test mode, remove any chunk arguments from command
+ for arg in final_cmd:
+ if "total-chunk" in arg or "this-chunk" in arg:
+ final_cmd.remove(arg)
+ final_cmd.extend(per_test_args)
+
+ self.info("Running the command %s" % subprocess.list2cmdline(final_cmd))
+ self.info("##### %s log begins" % self.test_suite)
+
+ suite_category = self.test_suite
+ parser = self.get_test_output_parser(
+ suite_category,
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=[],
+ )
+ self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ 0, previous_summary=summary
+ )
+ parser.append_tinderboxprint_line(self.test_suite)
+
+ self.info("##### %s log ends" % self.test_suite)
+
+ if len(per_test_args) > 0:
+ self.record_status(tbpl_status, level=log_level)
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+ else:
+ self.record_status(tbpl_status, level=log_level)
+ self.log(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ level=log_level,
+ )
+
+
+if __name__ == "__main__":
+ test = AndroidEmulatorTest()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_hardware_unittest.py b/testing/mozharness/scripts/android_hardware_unittest.py
new file mode 100644
index 0000000000..f31350e374
--- /dev/null
+++ b/testing/mozharness/scripts/android_hardware_unittest.py
@@ -0,0 +1,474 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from __future__ import absolute_import
+import copy
+import datetime
+import json
+import os
+import sys
+import subprocess
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.log import WARNING
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.codecoverage import CodeCoverageMixin
+
+PY2 = sys.version_info.major == 2
+SUITE_DEFAULT_E10S = ["geckoview-junit", "mochitest", "reftest"]
+SUITE_NO_E10S = ["cppunittest", "xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest"]
+
+
+class AndroidHardwareTest(
+ TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMixin, AndroidMixin
+):
+ config_options = [
+ [["--test-suite"], {"action": "store", "dest": "test_suite", "default": None}],
+ [
+ ["--adb-path"],
+ {
+ "action": "store",
+ "dest": "adb_path",
+ "default": None,
+ "help": "Path to adb",
+ },
+ ],
+ [
+ ["--total-chunk"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "default": None,
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "default": None,
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--log-raw-level"],
+ {
+ "action": "store",
+ "dest": "log_raw_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--log-tbpl-level"],
+ {
+ "action": "store",
+ "dest": "log_tbpl_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Run with WebRender enabled.",
+ },
+ ],
+ [
+ ["--enable-fission"],
+ {
+ "action": "store_true",
+ "dest": "enable_fission",
+ "default": False,
+ "help": "Run with Fission enabled.",
+ },
+ ],
+ [
+ ["--repeat"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ [
+ "--setpref",
+ ],
+ {
+ "action": "append",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ ] + copy.deepcopy(testing_config_options)
+
+ def __init__(self, require_config_file=False):
+ super(AndroidHardwareTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ # IP address of the host as seen from the device.
+ "remote_webserver": os.environ["HOST_IP"],
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.test_manifest = c.get("test_manifest")
+ suite = c.get("test_suite")
+ self.test_suite = suite
+ self.this_chunk = c.get("this_chunk")
+ self.total_chunks = c.get("total_chunks")
+ self.xre_path = None
+ self.log_raw_level = c.get("log_raw_level")
+ self.log_tbpl_level = c.get("log_tbpl_level")
+ self.enable_webrender = c.get("enable_webrender")
+ self.enable_fission = c.get("enable_fission")
+ self.extra_prefs = c.get("extra_prefs")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidHardwareTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests", "bin"
+ )
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_modules_dir"] = os.path.join(dirs["abs_test_install_dir"], "modules")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def _query_tests_dir(self):
+ dirs = self.query_abs_dirs()
+ try:
+ test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
+ except Exception:
+ test_dir = self.test_suite
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def _build_command(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("Key '%s' not defined in the config!" % self.test_suite)
+
+ cmd = [
+ self.query_python_path("python"),
+ "-u",
+ os.path.join(
+ self._query_tests_dir(),
+ self.config["suite_definitions"][self.test_suite]["run_filename"],
+ ),
+ ]
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], self.test_suite
+ )
+
+ str_format_values = {
+ "device_serial": self.device_serial,
+ "remote_webserver": c["remote_webserver"],
+ "xre_path": self.xre_path,
+ "utility_path": self.xre_path,
+ "http_port": "8854", # starting http port to use for the mochitest server
+ "ssl_port": "4454", # starting ssl port to use for the server
+ "certs_path": os.path.join(dirs["abs_work_dir"], "tests/certs"),
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ "symbols_path": self.symbols_path,
+ "modules_dir": dirs["abs_modules_dir"],
+ "installer_path": self.installer_path,
+ "raw_log_file": raw_log_file,
+ "log_tbpl_level": self.log_tbpl_level,
+ "log_raw_level": self.log_raw_level,
+ "error_summary_file": error_summary_file,
+ "xpcshell_extra": c.get("xpcshell_extra", ""),
+ }
+
+ user_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ for option in self.config["suite_definitions"][self.test_suite]["options"]:
+ opt = option.split("=")[0]
+ # override configured chunk options with script args, if specified
+ if opt in ("--this-chunk", "--total-chunks"):
+ if (
+ user_paths
+ or getattr(self, opt.replace("-", "_").strip("_"), None) is not None
+ ):
+ continue
+
+ if "%(app)" in option:
+ # only query package name if requested
+ cmd.extend([option % {"app": self.query_package_name()}])
+ else:
+ option = option % str_format_values
+ if option:
+ cmd.extend([option])
+
+ if user_paths:
+ if self.test_suite in user_paths:
+ cmd.extend(user_paths[self.test_suite])
+ elif not self.verify_enabled:
+ if self.this_chunk is not None:
+ cmd.extend(["--this-chunk", self.this_chunk])
+ if self.total_chunks is not None:
+ cmd.extend(["--total-chunks", self.total_chunks])
+
+ if "mochitest" in self.test_suite:
+ category = "mochitest"
+ elif "reftest" in self.test_suite or "crashtest" in self.test_suite:
+ category = "reftest"
+ else:
+ category = self.test_suite
+ if c.get("repeat"):
+ if category in SUITE_REPEATABLE:
+ cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log("--repeat not supported in {}".format(category), level=WARNING)
+
+ # Only enable WebRender if the flag is enabled. All downstream harnesses
+ # are expected to force-disable WebRender if not explicitly enabled,
+ # so that we don't have it accidentally getting enabled because the
+ # underlying hardware running the test becomes part of the WR-qualified
+ # set.
+ if self.enable_webrender:
+ cmd.extend(["--enable-webrender"])
+ if self.enable_fission:
+ cmd.extend(["--enable-fission"])
+
+ cmd.extend(["--setpref={}".format(p) for p in self.extra_prefs])
+
+ try_options, try_tests = self.try_args(self.test_suite)
+ cmd.extend(try_options)
+ if not self.verify_enabled and not self.per_test_coverage:
+ cmd.extend(
+ self.query_tests_args(
+ self.config["suite_definitions"][self.test_suite].get("tests"),
+ None,
+ try_tests,
+ )
+ )
+
+ return cmd
+
+ def _query_suites(self):
+ if self.test_suite:
+ return [(self.test_suite, self.test_suite)]
+ # per-test mode: determine test suites to run
+ all = [
+ (
+ "mochitest",
+ {
+ "mochitest-plain": "mochitest-plain",
+ "mochitest-plain-gpu": "mochitest-plain-gpu",
+ },
+ ),
+ ("reftest", {"reftest": "reftest", "crashtest": "crashtest"}),
+ ("xpcshell", {"xpcshell": "xpcshell"}),
+ ]
+ suites = []
+ for (category, all_suites) in all:
+ cat_suites = self.query_per_test_category_suites(category, all_suites)
+ for k in cat_suites.keys():
+ suites.append((k, cat_suites[k]))
+ return suites
+
+ def _query_suite_categories(self):
+ if self.test_suite:
+ categories = [self.test_suite]
+ else:
+ # per-test mode
+ categories = ["mochitest", "reftest", "xpcshell"]
+ return categories
+
+ ##########################################
+ # Actions for AndroidHardwareTest #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = None
+ suites = self._query_suites()
+ # mochitest is the only thing that needs this
+ if PY2:
+ wspb_requirements = "websocketprocessbridge_requirements.txt"
+ else:
+ wspb_requirements = "websocketprocessbridge_requirements_3.txt"
+ if ("mochitest-media", "mochitest-media") in suites:
+ # mochitest-media is the only thing that needs this
+ requirements = os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ wspb_requirements,
+ )
+ if requirements:
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """
+ Download and extract product APK, tests.zip, and host utils.
+ """
+ super(AndroidHardwareTest, self).download_and_extract(
+ suite_categories=self._query_suite_categories()
+ )
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ install_needed = (not self.test_suite) or self.config["suite_definitions"][
+ self.test_suite
+ ].get("install")
+ if install_needed is False:
+ self.info("Skipping apk installation for %s" % self.test_suite)
+ return
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.uninstall_apk()
+ self.install_apk(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_name)
+
+ def run_tests(self):
+ """
+ Run the tests
+ """
+ self.start_time = datetime.datetime.now()
+ max_per_test_time = datetime.timedelta(minutes=60)
+
+ per_test_args = []
+ suites = self._query_suites()
+ minidump = self.query_minidump_stackwalk()
+ for (per_test_suite, suite) in suites:
+ self.test_suite = suite
+
+ try:
+ cwd = self._query_tests_dir()
+ except Exception:
+ self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
+ env = self.query_env()
+ if minidump:
+ env["MINIDUMP_STACKWALK"] = minidump
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+
+ summary = None
+ for per_test_args in self.query_args(per_test_suite):
+ if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: "
+ "Not all tests were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return
+
+ cmd = self._build_command()
+ final_cmd = copy.copy(cmd)
+ if len(per_test_args) > 0:
+ # in per-test mode, remove any chunk arguments from command
+ for arg in final_cmd:
+ if "total-chunk" in arg or "this-chunk" in arg:
+ final_cmd.remove(arg)
+ final_cmd.extend(per_test_args)
+
+ self.info(
+ "Running on %s the command %s"
+ % (self.device_name, subprocess.list2cmdline(final_cmd))
+ )
+ self.info("##### %s log begins" % self.test_suite)
+
+ suite_category = self.test_suite
+ parser = self.get_test_output_parser(
+ suite_category,
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=[],
+ )
+ self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
+ tbpl_status, log_level, summary = parser.evaluate_parser(0, summary)
+ parser.append_tinderboxprint_line(self.test_suite)
+
+ self.info("##### %s log ends" % self.test_suite)
+
+ if len(per_test_args) > 0:
+ self.record_status(tbpl_status, level=log_level)
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+ else:
+ self.record_status(tbpl_status, level=log_level)
+ self.log(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ level=log_level,
+ )
+
+
+if __name__ == "__main__":
+ test = AndroidHardwareTest()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_wrench.py b/testing/mozharness/scripts/android_wrench.py
new file mode 100644
index 0000000000..be22d403f1
--- /dev/null
+++ b/testing/mozharness/scripts/android_wrench.py
@@ -0,0 +1,256 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from __future__ import absolute_import
+import datetime
+import os
+import subprocess
+import sys
+import time
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import (
+ EXIT_STATUS_DICT,
+ TBPL_FAILURE,
+)
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin
+
+
+class AndroidWrench(TestingMixin, BaseScript, MozbaseMixin, AndroidMixin):
+ def __init__(self, require_config_file=False):
+ # code in BaseScript.__init__ iterates all the properties to attach
+ # pre- and post-flight listeners, so we need _is_emulator be defined
+ # before that happens. Doesn't need to be a real value though.
+ self._is_emulator = None
+
+ super(AndroidWrench, self).__init__()
+ if self.device_serial is None:
+ # Running on an emulator.
+ self._is_emulator = True
+ self.device_serial = "emulator-5554"
+ self.use_gles3 = True
+ else:
+ # Running on a device, ensure self.is_emulator returns False.
+ # The adb binary is preinstalled on the bitbar image and is
+ # already on the $PATH.
+ self._is_emulator = False
+ self._adb_path = "adb"
+ self._errored = False
+
+ @property
+ def is_emulator(self):
+ """Overrides the is_emulator property on AndroidMixin."""
+ if self._is_emulator is None:
+ self._is_emulator = self.device_serial is None
+ return self._is_emulator
+
+ def activate_virtualenv(self):
+ """Overrides the method on AndroidMixin to be a no-op, because the
+ setup for wrench doesn't require a special virtualenv."""
+ pass
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = {}
+
+ abs_dirs["abs_work_dir"] = os.path.expanduser("~/.wrench")
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ # In automation use the standard work dir if there is one
+ parent_abs_dirs = super(AndroidWrench, self).query_abs_dirs()
+ if "abs_work_dir" in parent_abs_dirs:
+ abs_dirs["abs_work_dir"] = parent_abs_dirs["abs_work_dir"]
+
+ abs_dirs["abs_avds_dir"] = os.path.join(abs_dirs["abs_work_dir"], "avds")
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(abs_dirs["abs_work_dir"], "logs")
+ abs_dirs["abs_apk_path"] = os.environ.get(
+ "WRENCH_APK", "gfx/wr/target/android-artifacts/debug/apk/wrench.apk"
+ )
+ abs_dirs["abs_reftests_path"] = os.environ.get(
+ "WRENCH_REFTESTS", "gfx/wr/wrench/reftests"
+ )
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
+ if self.is_emulator and fetches_dir:
+ abs_dirs["abs_sdk_dir"] = os.path.join(fetches_dir, "android-sdk-linux")
+ else:
+ abs_dirs["abs_sdk_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "android-sdk-linux"
+ )
+ else:
+ mozbuild_path = os.environ.get(
+ "MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild")
+ )
+ mozbuild_sdk = os.environ.get(
+ "ANDROID_SDK_HOME", os.path.join(mozbuild_path, "android-sdk-linux")
+ )
+ abs_dirs["abs_sdk_dir"] = mozbuild_sdk
+
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def logcat_start(self):
+ """Overrides logcat_start in android.py - ensures any pre-existing logcat
+ is cleared before starting to record the new logcat. This is helpful
+ when running multiple times in a local emulator."""
+ logcat_cmd = [self.adb_path, "-s", self.device_serial, "logcat", "-c"]
+ self.info(" ".join(logcat_cmd))
+ subprocess.check_call(logcat_cmd)
+ super(AndroidWrench, self).logcat_start()
+
+ def wait_until_process_done(self, process_name, timeout):
+ """Waits until the specified process has exited. Polls the process list
+ every 5 seconds until the process disappears.
+
+ :param process_name: string containing the package name of the
+ application.
+ :param timeout: integer specifying the maximum time in seconds
+ to wait for the application to finish.
+ :returns: boolean - True if the process exited within the indicated
+ timeout, False if the process had not exited by the timeout.
+ """
+ end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
+ while self.device.process_exist(process_name, timeout=timeout):
+ if datetime.datetime.now() > end_time:
+ return False
+ time.sleep(5)
+
+ return True
+
+ def setup_sdcard(self):
+ # Note that we hard-code /sdcard/wrench as the path here, rather than
+ # using something like self.device.test_root, because it needs to be
+ # kept in sync with the path hard-coded inside the wrench source code.
+ self.device.rm("/sdcard/wrench", recursive=True, force=True)
+ self.device.mkdir("/sdcard/wrench", parents=True)
+ self.device.push(
+ self.query_abs_dirs()["abs_reftests_path"], "/sdcard/wrench/reftests"
+ )
+ args_file = os.path.join(self.query_abs_dirs()["abs_work_dir"], "wrench_args")
+ with open(args_file, "w") as argfile:
+ if self.is_emulator:
+ argfile.write("env: WRENCH_REFTEST_CONDITION_EMULATOR=1\n")
+ else:
+ argfile.write("env: WRENCH_REFTEST_CONDITION_DEVICE=1\n")
+ argfile.write("reftest")
+ self.device.push(args_file, "/sdcard/wrench/args")
+
+ def run_tests(self):
+ self.timed_screenshots(None)
+ self.device.launch_application(
+ app_name="org.mozilla.wrench",
+ activity_name="android.app.NativeActivity",
+ intent=None,
+ )
+ self.info("App launched")
+ done = self.wait_until_process_done("org.mozilla.wrench", timeout=60 * 30)
+ if not done:
+ self._errored = True
+ self.error("Wrench still running after timeout")
+
+ def scrape_logcat(self):
+ """Wrench will dump the test output to logcat, but for convenience we
+ want it to show up in the main log. So we scrape it out of the logcat
+ and dump it to our own log. Note that all output from wrench goes
+ through the cargo-apk glue stuff, which uses the RustAndroidGlueStdouterr
+ tag on the output. Also it limits the line length to 512 bytes
+ (including the null terminator). For reftest unexpected-fail output
+ this means that the base64 image dump gets wrapped over multiple
+ lines, so part of what this function does is unwrap that so that the
+ resulting log is readable by the reftest analyzer."""
+
+ with open(self.logcat_path(), "r") as f:
+ self.info("=== scraped logcat output ===")
+ tag = "RustAndroidGlueStdouterr: "
+ long_line = None
+ for line in f:
+ tag_index = line.find(tag)
+ if tag_index == -1:
+ # not a line we care about
+ continue
+ line = line[tag_index + len(tag) :].rstrip()
+ if (
+ long_line is None
+ and "REFTEST " not in line
+ and "panicked" not in line
+ ):
+ # non-interesting line
+ continue
+ if long_line is not None:
+ # continuation of a wrapped line
+ long_line += line
+ if len(line) >= 511:
+ if long_line is None:
+ # start of a new long line
+ long_line = line
+ # else "middle" of a long line that keeps going to the next line
+ continue
+ # this line doesn't wrap over to the next, so we can
+ # print it
+ if long_line is not None:
+ line = long_line
+ long_line = None
+ if "UNEXPECTED-FAIL" in line or "panicked" in line:
+ self._errored = True
+ self.error(line)
+ else:
+ self.info(line)
+ self.info("=== end scraped logcat output ===")
+ self.info("(see logcat artifact for full logcat")
+
+ def setup_emulator(self):
+ # Running setup_avds will clobber the existing AVD and redownload it.
+ # For local testing that's kinda expensive, so we omit that if we
+ # already have that dir.
+ if not os.path.exists(self.query_abs_dirs()["abs_avds_dir"]):
+ self.setup_avds()
+
+ sdk_path = self.query_abs_dirs()["abs_sdk_dir"]
+ if not os.path.exists(sdk_path):
+ self.error("Unable to find android SDK at %s" % sdk_path)
+ return
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ self.start_emulator()
+ else:
+ # Can't use start_emulator because it tries to download a non-public
+ # artifact. Instead we just manually run the launch.
+ self._launch_emulator()
+
+ def do_test(self):
+ if self.is_emulator:
+ self.setup_emulator()
+
+ self.verify_device()
+ self.info("Logging device properties...")
+ self.info(self.shell_output("getprop"))
+ self.info("Installing APK...")
+ self.install_apk(self.query_abs_dirs()["abs_apk_path"], replace=True)
+ self.info("Setting up SD card...")
+ self.setup_sdcard()
+ self.info("Running tests...")
+ self.run_tests()
+ self.info("Tests done; parsing logcat...")
+ self.logcat_stop()
+ self.scrape_logcat()
+ self.info("All done!")
+
+ def check_errors(self):
+ if self._errored:
+ self.info("Errors encountered, terminating with error code...")
+ exit(EXIT_STATUS_DICT[TBPL_FAILURE])
+
+
+if __name__ == "__main__":
+ test = AndroidWrench()
+ test.do_test()
+ test.check_errors()
diff --git a/testing/mozharness/scripts/awsy_script.py b/testing/mozharness/scripts/awsy_script.py
new file mode 100644
index 0000000000..288c17aae1
--- /dev/null
+++ b/testing/mozharness/scripts/awsy_script.py
@@ -0,0 +1,337 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""
+run awsy tests in a virtualenv
+"""
+
+from __future__ import absolute_import
+import copy
+import json
+import os
+import re
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozinfo
+
+import mozharness
+
+from mozharness.base.script import PreScriptAction
+from mozharness.base.log import INFO, ERROR
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.tooltool import TooltoolMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+
+PY2 = sys.version_info.major == 2
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+
+
+class AWSY(TestingMixin, MercurialScript, TooltoolMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s). (Desktop builds only)",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Enable the WebRender compositor in Gecko.",
+ },
+ ],
+ [
+ ["--base"],
+ {
+ "action": "store_true",
+ "dest": "test_about_blank",
+ "default": False,
+ "help": "Runs the about:blank base case memory test.",
+ },
+ ],
+ [
+ ["--dmd"],
+ {
+ "action": "store_true",
+ "dest": "dmd",
+ "default": False,
+ "help": "Runs tests with DMD enabled.",
+ },
+ ],
+ [
+ ["--tp6"],
+ {
+ "action": "store_true",
+ "dest": "tp6",
+ "default": False,
+ "help": "Runs tests with the tp6 pageset.",
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ error_list = [
+ {"regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""), "level": ERROR},
+ ]
+
+ def __init__(self, **kwargs):
+
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(AWSY, self).__init__(**kwargs)
+ self.installer_url = self.config.get("installer_url")
+ self.tests = None
+
+ self.testdir = self.query_abs_dirs()["abs_test_install_dir"]
+ self.awsy_path = os.path.join(self.testdir, "awsy")
+ self.awsy_libdir = os.path.join(self.awsy_path, "awsy")
+ self.webroot_dir = os.path.join(self.testdir, "html")
+ self.results_dir = os.path.join(self.testdir, "results")
+ self.binary_path = self.config.get("binary_path")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AWSY, self).query_abs_dirs()
+
+ dirs = {}
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ ret = super(AWSY, self).download_and_extract(
+ suite_categories=["common", "awsy"]
+ )
+ return ret
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ requirements_file = os.path.join(
+ self.testdir, "config", "marionette_requirements.txt"
+ )
+
+ # marionette_requirements.txt must use the legacy resolver until bug 1684969 is resolved.
+ self.register_virtualenv_module(
+ requirements=[requirements_file], two_pass=True, legacy_resolver=True
+ )
+
+ self.register_virtualenv_module("awsy", self.awsy_path)
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.info("Downloading pageset with tooltool...")
+ manifest_file = os.path.join(self.awsy_path, "tp5n-pageset.manifest")
+ page_load_test_dir = os.path.join(self.webroot_dir, "page_load_test")
+ if not os.path.isdir(page_load_test_dir):
+ self.mkdir_p(page_load_test_dir)
+ self.tooltool_fetch(
+ manifest_file,
+ output_dir=page_load_test_dir,
+ cache=self.config.get("tooltool_cache"),
+ )
+ archive = os.path.join(page_load_test_dir, "tp5n.zip")
+ unzip = self.query_exe("unzip")
+ unzip_cmd = [unzip, "-q", "-o", archive, "-d", page_load_test_dir]
+ self.run_command(unzip_cmd, halt_on_failure=False)
+ self.run_command("ls %s" % page_load_test_dir)
+
+ def run_tests(self, args=None, **kw):
+ """
+ AWSY test should be implemented here
+ """
+ dirs = self.abs_dirs
+ env = {}
+ error_summary_file = os.path.join(
+ dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
+ )
+
+ runtime_testvars = {
+ "webRootDir": self.webroot_dir,
+ "resultsDir": self.results_dir,
+ "bin": self.binary_path,
+ }
+
+ # Check if this is a DMD build and if so enable it.
+ dmd_enabled = False
+ dmd_py_lib_dir = os.path.dirname(self.binary_path)
+ if mozinfo.os == "mac":
+ # On mac binary is in MacOS and dmd.py is in Resources, ie:
+ # Name.app/Contents/MacOS/libdmd.dylib
+ # Name.app/Contents/Resources/dmd.py
+ dmd_py_lib_dir = os.path.join(dmd_py_lib_dir, "../Resources/")
+
+ dmd_path = os.path.join(dmd_py_lib_dir, "dmd.py")
+ if self.config["dmd"] and os.path.isfile(dmd_path):
+ dmd_enabled = True
+ runtime_testvars["dmd"] = True
+
+ # Allow the child process to import dmd.py
+ python_path = os.environ.get("PYTHONPATH")
+
+ if python_path:
+ os.environ["PYTHONPATH"] = "%s%s%s" % (
+ python_path,
+ os.pathsep,
+ dmd_py_lib_dir,
+ )
+ else:
+ os.environ["PYTHONPATH"] = dmd_py_lib_dir
+
+ env["DMD"] = "--mode=dark-matter --stacks=full"
+
+ runtime_testvars["tp6"] = self.config["tp6"]
+ if self.config["tp6"]:
+ # mitmproxy needs path to mozharness when installing the cert, and tooltool
+ env["SCRIPTSPATH"] = scripts_path
+ env["EXTERNALTOOLSPATH"] = external_tools_path
+
+ runtime_testvars_path = os.path.join(self.awsy_path, "runtime-testvars.json")
+ runtime_testvars_file = open(runtime_testvars_path, "wb" if PY2 else "w")
+ runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
+ runtime_testvars_file.close()
+
+ cmd = ["marionette"]
+
+ test_vars_file = None
+ if self.config["test_about_blank"]:
+ test_vars_file = "base-testvars.json"
+ else:
+ if self.config["tp6"]:
+ test_vars_file = "tp6-testvars.json"
+ else:
+ test_vars_file = "testvars.json"
+
+ cmd.append(
+ "--testvars=%s" % os.path.join(self.awsy_path, "conf", test_vars_file)
+ )
+ cmd.append("--testvars=%s" % runtime_testvars_path)
+ cmd.append("--log-raw=-")
+ cmd.append("--log-errorsummary=%s" % error_summary_file)
+ cmd.append("--binary=%s" % self.binary_path)
+ cmd.append("--profile=%s" % (os.path.join(dirs["abs_work_dir"], "profile")))
+ if not self.config["e10s"]:
+ cmd.append("--disable-e10s")
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+ cmd.append(
+ "--gecko-log=%s" % os.path.join(dirs["abs_blob_upload_dir"], "gecko.log")
+ )
+ # TestingMixin._download_and_extract_symbols() should set
+ # self.symbols_path
+ cmd.append("--symbols-path=%s" % self.symbols_path)
+
+ if self.config["test_about_blank"]:
+ test_file = os.path.join(self.awsy_libdir, "test_base_memory_usage.py")
+ prefs_file = "base-prefs.json"
+ else:
+ test_file = os.path.join(self.awsy_libdir, "test_memory_usage.py")
+ if self.config["tp6"]:
+ prefs_file = "tp6-prefs.json"
+ else:
+ prefs_file = "prefs.json"
+
+ cmd.append(
+ "--preferences=%s" % os.path.join(self.awsy_path, "conf", prefs_file)
+ )
+ if dmd_enabled:
+ cmd.append("--setpref=security.sandbox.content.level=0")
+ cmd.append(test_file)
+
+ if self.config["enable_webrender"]:
+ cmd.append("--enable-webrender")
+
+ env["STYLO_THREADS"] = "4"
+
+ env["MOZ_UPLOAD_DIR"] = dirs["abs_blob_upload_dir"]
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ env["MINIDUMP_SAVE_PATH"] = dirs["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "1"
+ env = self.query_env(partial_env=env)
+ parser = StructuredOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=self.error_list,
+ strict=False,
+ )
+ return_code = self.run_command(
+ command=cmd,
+ cwd=self.awsy_path,
+ output_timeout=self.config.get("cmd_timeout"),
+ env=env,
+ output_parser=parser,
+ )
+
+ level = INFO
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code=return_code
+ )
+
+ self.log(
+ "AWSY exited with return code %s: %s" % (return_code, tbpl_status),
+ level=level,
+ )
+ self.record_status(tbpl_status)
+
+
+if __name__ == "__main__":
+ awsy_test = AWSY()
+ awsy_test.run_and_exit()
diff --git a/testing/mozharness/scripts/configtest.py b/testing/mozharness/scripts/configtest.py
new file mode 100755
index 0000000000..b551bbe30c
--- /dev/null
+++ b/testing/mozharness/scripts/configtest.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""configtest.py
+
+Verify the .json and .py files in the configs/ directory are well-formed.
+Further tests to verify validity would be desirable.
+
+This is also a good example script to look at to understand mozharness.
+"""
+
+from __future__ import absolute_import
+import os
+import pprint
+import sys
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+
+
+# ConfigTest {{{1
+class ConfigTest(BaseScript):
+ config_options = [
+ [
+ [
+ "--test-file",
+ ],
+ {
+ "action": "extend",
+ "dest": "test_files",
+ "help": "Specify which config files to test",
+ },
+ ]
+ ]
+
+ def __init__(self, require_config_file=False):
+ self.config_files = []
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ all_actions=[
+ "list-config-files",
+ "test-json-configs",
+ "test-python-configs",
+ "summary",
+ ],
+ default_actions=[
+ "test-json-configs",
+ "test-python-configs",
+ "summary",
+ ],
+ require_config_file=require_config_file,
+ )
+
+ def query_config_files(self):
+ """This query method, much like others, caches its runtime
+ settings in self.VAR so we don't have to figure out config_files
+ multiple times.
+ """
+ if self.config_files:
+ return self.config_files
+ c = self.config
+ if "test_files" in c:
+ self.config_files = c["test_files"]
+ return self.config_files
+ self.debug(
+ "No --test-file(s) specified; defaulting to crawling the configs/ directory."
+ )
+ config_files = []
+ for root, dirs, files in os.walk(os.path.join(sys.path[0], "..", "configs")):
+ for name in files:
+ # Hardcode =P
+ if name.endswith(".json") or name.endswith(".py"):
+ if not name.startswith("test_malformed"):
+ config_files.append(os.path.join(root, name))
+ self.config_files = config_files
+ return self.config_files
+
+ def list_config_files(self):
+ """Non-default action that is mainly here to demonstrate how
+ non-default actions work in a mozharness script.
+ """
+ config_files = self.query_config_files()
+ for config_file in config_files:
+ self.info(config_file)
+
+ def test_json_configs(self):
+ """Currently only "is this well-formed json?" """
+ config_files = self.query_config_files()
+ filecount = [0, 0]
+ for config_file in config_files:
+ if config_file.endswith(".json"):
+ filecount[0] += 1
+ self.info("Testing %s." % config_file)
+ contents = self.read_from_file(config_file, verbose=False)
+ try:
+ json.loads(contents)
+ except ValueError:
+ self.add_summary("%s is invalid json." % config_file, level="error")
+ self.error(pprint.pformat(sys.exc_info()[1]))
+ else:
+ self.info("Good.")
+ filecount[1] += 1
+ if filecount[0]:
+ self.add_summary(
+ "%d of %d json config files were good." % (filecount[1], filecount[0])
+ )
+ else:
+ self.add_summary("No json config files to test.")
+
+ def test_python_configs(self):
+ """Currently only "will this give me a config dictionary?" """
+ config_files = self.query_config_files()
+ filecount = [0, 0]
+ for config_file in config_files:
+ if config_file.endswith(".py"):
+ filecount[0] += 1
+ self.info("Testing %s." % config_file)
+ global_dict = {}
+ local_dict = {}
+ try:
+ with open(config_file, "r") as f:
+ exec(f.read(), global_dict, local_dict)
+ except Exception:
+ self.add_summary(
+ "%s is invalid python." % config_file, level="error"
+ )
+ self.error(pprint.pformat(sys.exc_info()[1]))
+ else:
+ if "config" in local_dict and isinstance(
+ local_dict["config"], dict
+ ):
+ self.info("Good.")
+ filecount[1] += 1
+ else:
+ self.add_summary(
+ "%s is valid python, "
+ "but doesn't create a config dictionary." % config_file,
+ level="error",
+ )
+ if filecount[0]:
+ self.add_summary(
+ "%d of %d python config files were good." % (filecount[1], filecount[0])
+ )
+ else:
+ self.add_summary("No python config files to test.")
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ config_test = ConfigTest()
+ config_test.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_l10n.py b/testing/mozharness/scripts/desktop_l10n.py
new file mode 100755
index 0000000000..2311eaf28f
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_l10n.py
@@ -0,0 +1,495 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_l10n.py
+
+This script manages Desktop repacks for nightly builds.
+"""
+from __future__ import absolute_import
+import os
+import glob
+import sys
+import shlex
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0])) # noqa
+
+from mozharness.base.errors import MakefileErrorList
+from mozharness.base.script import BaseScript
+from mozharness.base.vcs.vcsbase import VCSMixin
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.building.buildbase import (
+ MakeUploadOutputParser,
+ get_mozconfig_path,
+)
+from mozharness.mozilla.l10n.locales import LocalesMixin
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+
+# needed by _map
+SUCCESS = 0
+FAILURE = 1
+
+SUCCESS_STR = "Success"
+FAILURE_STR = "Failed"
+
+
+# DesktopSingleLocale {{{1
+class DesktopSingleLocale(LocalesMixin, AutomationMixin, VCSMixin, BaseScript):
+ """Manages desktop repacks"""
+
+ config_options = [
+ [
+ [
+ "--locale",
+ ],
+ {
+ "action": "extend",
+ "dest": "locales",
+ "type": "string",
+ "help": "Specify the locale(s) to sign and update. Optionally pass"
+ " revision separated by colon, en-GB:default.",
+ },
+ ],
+ [
+ [
+ "--tag-override",
+ ],
+ {
+ "action": "store",
+ "dest": "tag_override",
+ "type": "string",
+ "help": "Override the tags set for all repos",
+ },
+ ],
+ [
+ [
+ "--en-us-installer-url",
+ ],
+ {
+ "action": "store",
+ "dest": "en_us_installer_url",
+ "type": "string",
+ "help": "Specify the url of the en-us binary",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ # fxbuild style:
+ buildscript_kwargs = {
+ "all_actions": [
+ "clone-locales",
+ "list-locales",
+ "setup",
+ "repack",
+ "summary",
+ ],
+ "config": {
+ "ignore_locales": ["en-US"],
+ "locales_dir": "browser/locales",
+ "log_name": "single_locale",
+ "hg_l10n_base": "https://hg.mozilla.org/l10n-central",
+ },
+ }
+
+ LocalesMixin.__init__(self)
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ **buildscript_kwargs
+ )
+
+ self.bootstrap_env = None
+ self.upload_env = None
+ self.upload_urls = {}
+ self.pushdate = None
+ # upload_files is a dictionary of files to upload, keyed by locale.
+ self.upload_files = {}
+
+ # Helper methods {{{2
+ def query_bootstrap_env(self):
+ """returns the env for repacks"""
+ if self.bootstrap_env:
+ return self.bootstrap_env
+ config = self.config
+ abs_dirs = self.query_abs_dirs()
+
+ bootstrap_env = self.query_env(
+ partial_env=config.get("bootstrap_env"), replace_dict=abs_dirs
+ )
+
+ bootstrap_env["L10NBASEDIR"] = abs_dirs["abs_l10n_dir"]
+ if self.query_is_nightly():
+ # we might set update_channel explicitly
+ if config.get("update_channel"):
+ update_channel = config["update_channel"]
+ else: # Let's just give the generic channel based on branch.
+ update_channel = "nightly-%s" % (config["branch"],)
+ if not isinstance(update_channel, bytes):
+ update_channel = update_channel.encode("utf-8")
+ bootstrap_env["MOZ_UPDATE_CHANNEL"] = update_channel
+ self.info(
+ "Update channel set to: {}".format(bootstrap_env["MOZ_UPDATE_CHANNEL"])
+ )
+ self.bootstrap_env = bootstrap_env
+ return self.bootstrap_env
+
+ def _query_upload_env(self):
+ """returns the environment used for the upload step"""
+ if self.upload_env:
+ return self.upload_env
+ config = self.config
+
+ upload_env = self.query_env(partial_env=config.get("upload_env"))
+ # check if there are any extra option from the platform configuration
+ # and append them to the env
+
+ if "upload_env_extra" in config:
+ for extra in config["upload_env_extra"]:
+ upload_env[extra] = config["upload_env_extra"][extra]
+
+ self.upload_env = upload_env
+ return self.upload_env
+
+ def query_l10n_env(self):
+ l10n_env = self._query_upload_env().copy()
+ l10n_env.update(self.query_bootstrap_env())
+ return l10n_env
+
+ def _query_make_variable(self, variable, make_args=None):
+ """returns the value of make echo-variable-<variable>
+ it accepts extra make arguements (make_args)
+ """
+ dirs = self.query_abs_dirs()
+ make_args = make_args or []
+ target = ["echo-variable-%s" % variable] + make_args
+ cwd = dirs["abs_locales_dir"]
+ raw_output = self._get_output_from_make(
+ target, cwd=cwd, env=self.query_bootstrap_env()
+ )
+ # we want to log all the messages from make
+ output = []
+ for line in raw_output.split("\n"):
+ output.append(line.strip())
+ output = " ".join(output).strip()
+ self.info("echo-variable-%s: %s" % (variable, output))
+ return output
+
+ def _map(self, func, items):
+ """runs func for any item in items, calls the add_failure() for each
+ error. It assumes that function returns 0 when successful.
+ returns a two element tuple with (success_count, total_count)"""
+ success_count = 0
+ total_count = len(items)
+ name = func.__name__
+ for item in items:
+ result = func(item)
+ if result == SUCCESS:
+ # success!
+ success_count += 1
+ else:
+ # func failed...
+ message = "failure: %s(%s)" % (name, item)
+ self.add_failure(item, message)
+ return (success_count, total_count)
+
+ # Actions {{{2
+ def clone_locales(self):
+ self.pull_locale_source()
+
+ def setup(self):
+ """setup step"""
+ self._run_tooltool()
+ self._copy_mozconfig()
+ self._mach_configure()
+ self._run_make_in_config_dir()
+ self.make_wget_en_US()
+ self.make_unpack_en_US()
+
+ def _run_make_in_config_dir(self):
+ """this step creates nsinstall, needed my make_wget_en_US()"""
+ dirs = self.query_abs_dirs()
+ config_dir = os.path.join(dirs["abs_obj_dir"], "config")
+ env = self.query_bootstrap_env()
+ return self._make(target=["export"], cwd=config_dir, env=env)
+
+ def _copy_mozconfig(self):
+ """copies the mozconfig file into abs_src_dir/.mozconfig
+ and logs the content
+ """
+ config = self.config
+ dirs = self.query_abs_dirs()
+ src = get_mozconfig_path(self, config, dirs)
+ dst = os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ self.copyfile(src, dst)
+ self.read_from_file(dst, verbose=True)
+
+ def _mach(self, target, env, halt_on_failure=True, output_parser=None):
+ dirs = self.query_abs_dirs()
+ mach = self._get_mach_executable()
+ return self.run_command(
+ mach + target,
+ halt_on_failure=True,
+ env=env,
+ cwd=dirs["abs_src_dir"],
+ output_parser=None,
+ )
+
+ def _mach_configure(self):
+ """calls mach configure"""
+ env = self.query_bootstrap_env()
+ target = ["configure"]
+ return self._mach(target=target, env=env)
+
+ def _get_mach_executable(self):
+ return [sys.executable, "mach"]
+
+ def _get_make_executable(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+ if config.get("enable_mozmake"): # e.g. windows
+ make = r"/".join([dirs["abs_src_dir"], "mozmake.exe"])
+ # mysterious subprocess errors, let's try to fix this path...
+ make = make.replace("\\", "/")
+ make = [make]
+ else:
+ make = ["make"]
+ return make
+
+ def _make(
+ self,
+ target,
+ cwd,
+ env,
+ error_list=MakefileErrorList,
+ halt_on_failure=True,
+ output_parser=None,
+ ):
+ """Runs make. Returns the exit code"""
+ make = self._get_make_executable()
+ if target:
+ make = make + target
+ return self.run_command(
+ make,
+ cwd=cwd,
+ env=env,
+ error_list=error_list,
+ halt_on_failure=halt_on_failure,
+ output_parser=output_parser,
+ )
+
+ def _get_output_from_make(
+ self, target, cwd, env, halt_on_failure=True, ignore_errors=False
+ ):
+ """runs make and returns the output of the command"""
+ make = self._get_make_executable()
+ return self.get_output_from_command(
+ make + target,
+ cwd=cwd,
+ env=env,
+ silent=True,
+ halt_on_failure=halt_on_failure,
+ ignore_errors=ignore_errors,
+ )
+
+ def make_unpack_en_US(self):
+ """wrapper for make unpack"""
+ config = self.config
+ dirs = self.query_abs_dirs()
+ env = self.query_bootstrap_env()
+ cwd = os.path.join(dirs["abs_obj_dir"], config["locales_dir"])
+ return self._make(target=["unpack"], cwd=cwd, env=env)
+
+ def make_wget_en_US(self):
+ """wrapper for make wget-en-US"""
+ env = self.query_bootstrap_env()
+ dirs = self.query_abs_dirs()
+ cwd = dirs["abs_locales_dir"]
+ return self._make(target=["wget-en-US"], cwd=cwd, env=env)
+
+ def make_upload(self, locale):
+ """wrapper for make upload command"""
+ env = self.query_l10n_env()
+ dirs = self.query_abs_dirs()
+ target = ["upload", "AB_CD=%s" % (locale)]
+ cwd = dirs["abs_locales_dir"]
+ parser = MakeUploadOutputParser(config=self.config, log_obj=self.log_obj)
+ retval = self._make(
+ target=target, cwd=cwd, env=env, halt_on_failure=False, output_parser=parser
+ )
+ if retval == SUCCESS:
+ self.info("Upload successful (%s)" % locale)
+ ret = SUCCESS
+ else:
+ self.error("failed to upload %s" % locale)
+ ret = FAILURE
+
+ if ret == FAILURE:
+ # If we failed above, we shouldn't even attempt a SIMPLE_NAME move
+ # even if we are configured to do so
+ return ret
+
+ # XXX Move the files to a SIMPLE_NAME format until we can enable
+ # Simple names in the build system
+ if self.config.get("simple_name_move"):
+ # Assume an UPLOAD PATH
+ upload_target = self.config["upload_env"]["UPLOAD_PATH"]
+ target_path = os.path.join(upload_target, locale)
+ self.mkdir_p(target_path)
+ glob_name = "*.%s.*" % locale
+ matches = (
+ glob.glob(os.path.join(upload_target, glob_name))
+ + glob.glob(os.path.join(upload_target, "update", glob_name))
+ + glob.glob(os.path.join(upload_target, "*", "xpi", glob_name))
+ + glob.glob(os.path.join(upload_target, "install", "sea", glob_name))
+ + glob.glob(os.path.join(upload_target, "setup.exe"))
+ + glob.glob(os.path.join(upload_target, "setup-stub.exe"))
+ )
+ targets_exts = [
+ "tar.bz2",
+ "dmg",
+ "langpack.xpi",
+ "checksums",
+ "zip",
+ "installer.exe",
+ "installer-stub.exe",
+ ]
+ targets = [(".%s" % (ext,), "target.%s" % (ext,)) for ext in targets_exts]
+ targets.extend([(f, f) for f in ("setup.exe", "setup-stub.exe")])
+ for f in matches:
+ possible_targets = [
+ (tail, target_file)
+ for (tail, target_file) in targets
+ if f.endswith(tail)
+ ]
+ if len(possible_targets) == 1:
+ _, target_file = possible_targets[0]
+ # Remove from list of available options for this locale
+ targets.remove(possible_targets[0])
+ else:
+ # wasn't valid (or already matched)
+ raise RuntimeError(
+ "Unexpected matching file name encountered: %s" % f
+ )
+ self.move(os.path.join(f), os.path.join(target_path, target_file))
+ self.log("Converted uploads for %s to simple names" % locale)
+ return ret
+
+ def set_upload_files(self, locale):
+ # The tree doesn't have a good way of exporting the list of files
+ # created during locale generation, but we can grab them by echoing the
+ # UPLOAD_FILES variable for each locale.
+ env = self.query_l10n_env()
+ target = [
+ "echo-variable-UPLOAD_FILES",
+ "echo-variable-CHECKSUM_FILES",
+ "AB_CD=%s" % locale,
+ ]
+ dirs = self.query_abs_dirs()
+ cwd = dirs["abs_locales_dir"]
+ # Bug 1242771 - echo-variable-UPLOAD_FILES via mozharness fails when stderr is found
+ # we should ignore stderr as unfortunately it's expected when parsing for values
+ output = self._get_output_from_make(
+ target=target, cwd=cwd, env=env, ignore_errors=True
+ )
+ self.info('UPLOAD_FILES is "%s"' % output)
+ files = shlex.split(output)
+ if not files:
+ self.error("failed to get upload file list for locale %s" % locale)
+ return FAILURE
+
+ self.upload_files[locale] = [
+ os.path.abspath(os.path.join(cwd, f)) for f in files
+ ]
+ return SUCCESS
+
+ def make_installers(self, locale):
+ """wrapper for make installers-(locale)"""
+ env = self.query_l10n_env()
+ env["PYTHONIOENCODING"] = "utf-8"
+ self._copy_mozconfig()
+ dirs = self.query_abs_dirs()
+ cwd = os.path.join(dirs["abs_locales_dir"])
+ target = [
+ "installers-%s" % locale,
+ ]
+ return self._make(target=target, cwd=cwd, env=env, halt_on_failure=False)
+
+ def repack_locale(self, locale):
+ """wraps the logic for make installers and generating
+ complete updates."""
+
+ # run make installers
+ if self.make_installers(locale) != SUCCESS:
+ self.error("make installers-%s failed" % (locale))
+ return FAILURE
+
+ # now try to upload the artifacts
+ if self.make_upload(locale):
+ self.error("make upload for locale %s failed!" % (locale))
+ return FAILURE
+
+ # set_upload_files() should be called after make upload, to make sure
+ # we have all files in place (checksums, etc)
+ if self.set_upload_files(locale):
+ self.error("failed to get list of files to upload for locale %s" % locale)
+ return FAILURE
+
+ return SUCCESS
+
+ def repack(self):
+ """creates the repacks and udpates"""
+ self._map(self.repack_locale, self.query_locales())
+
+ def _run_tooltool(self):
+ env = self.query_bootstrap_env()
+ config = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = config.get("tooltool_manifest_src")
+ if not manifest_src:
+ return
+ python = sys.executable
+
+ cmd = [
+ python,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = config["bootstrap_env"].get("TOOLTOOL_CACHE")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True, env=env)
+
+
+# main {{{
+if __name__ == "__main__":
+ single_locale = DesktopSingleLocale()
+ single_locale.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_partner_repacks.py b/testing/mozharness/scripts/desktop_partner_repacks.py
new file mode 100755
index 0000000000..003b446cd7
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_partner_repacks.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_partner_repacks.py
+
+This script manages Desktop partner repacks for beta/release builds.
+"""
+from __future__ import absolute_import
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.secrets import SecretsMixin
+from mozharness.base.python import VirtualenvMixin
+from mozharness.base.log import FATAL
+
+
+# DesktopPartnerRepacks {{{1
+class DesktopPartnerRepacks(AutomationMixin, BaseScript, VirtualenvMixin, SecretsMixin):
+ """Manages desktop partner repacks"""
+
+ actions = [
+ "get-secrets",
+ "setup",
+ "repack",
+ "summary",
+ ]
+ config_options = [
+ [
+ ["--version", "-v"],
+ {
+ "dest": "version",
+ "help": "Version of Firefox to repack",
+ },
+ ],
+ [
+ ["--build-number", "-n"],
+ {
+ "dest": "build_number",
+ "help": "Build number of Firefox to repack",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "dest": "platform",
+ "help": "Platform to repack (e.g. linux64, macosx64, ...)",
+ },
+ ],
+ [
+ ["--partner", "-p"],
+ {
+ "dest": "partner",
+ "help": "Limit repackaging to partners matching this string",
+ },
+ ],
+ [
+ ["--taskid", "-t"],
+ {
+ "dest": "taskIds",
+ "action": "extend",
+ "help": "taskId(s) of upstream tasks for vanilla Firefox artifacts",
+ },
+ ],
+ [
+ ["--limit-locale", "-l"],
+ {
+ "dest": "limitLocales",
+ "action": "append",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ # fxbuild style:
+ buildscript_kwargs = {
+ "all_actions": DesktopPartnerRepacks.actions,
+ "default_actions": DesktopPartnerRepacks.actions,
+ "config": {
+ "log_name": "partner-repacks",
+ "hashType": "sha512",
+ "workdir": "partner-repacks",
+ },
+ }
+ #
+
+ BaseScript.__init__(
+ self, config_options=self.config_options, **buildscript_kwargs
+ )
+
+ def _pre_config_lock(self, rw_config):
+ if os.getenv("REPACK_MANIFESTS_URL"):
+ self.info(
+ "Overriding repack_manifests_url to %s"
+ % os.getenv("REPACK_MANIFESTS_URL")
+ )
+ self.config["repack_manifests_url"] = os.getenv("REPACK_MANIFESTS_URL")
+ if os.getenv("UPSTREAM_TASKIDS"):
+ self.info("Overriding taskIds with %s" % os.getenv("UPSTREAM_TASKIDS"))
+ self.config["taskIds"] = os.getenv("UPSTREAM_TASKIDS").split()
+
+ if "version" not in self.config:
+ self.fatal("Version (-v) not supplied.")
+ if "build_number" not in self.config:
+ self.fatal("Build number (-n) not supplied.")
+ if "repo_file" not in self.config:
+ self.fatal("repo_file not supplied.")
+ if "repack_manifests_url" not in self.config:
+ self.fatal(
+ "repack_manifests_url not supplied in config or via REPACK_MANIFESTS_URL"
+ )
+ if "taskIds" not in self.config:
+ self.fatal("Need upstream taskIds from command line or in UPSTREAM_TASKIDS")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(DesktopPartnerRepacks, self).query_abs_dirs()
+ for directory in abs_dirs:
+ value = abs_dirs[directory]
+ abs_dirs[directory] = value
+ dirs = {}
+ dirs["abs_repo_dir"] = os.path.join(abs_dirs["abs_work_dir"], ".repo")
+ dirs["abs_partners_dir"] = os.path.join(abs_dirs["abs_work_dir"], "partners")
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # Actions {{{
+ def _repo_cleanup(self):
+ self.rmtree(self.query_abs_dirs()["abs_repo_dir"])
+ self.rmtree(self.query_abs_dirs()["abs_partners_dir"])
+
+ def _repo_init(self, repo):
+ partial_env = {
+ "GIT_SSH_COMMAND": "ssh -oIdentityFile={}".format(self.config["ssh_key"])
+ }
+ status = self.run_command(
+ [
+ repo,
+ "init",
+ "--no-repo-verify",
+ "-u",
+ self.config["repack_manifests_url"],
+ ],
+ cwd=self.query_abs_dirs()["abs_work_dir"],
+ partial_env=partial_env,
+ )
+ if status:
+ return status
+ return self.run_command(
+ [repo, "sync", "--current-branch", "--no-tags"],
+ cwd=self.query_abs_dirs()["abs_work_dir"],
+ partial_env=partial_env,
+ )
+
+ def setup(self):
+ """setup step"""
+ repo = self.download_file(
+ self.config["repo_file"],
+ file_name="repo",
+ parent_dir=self.query_abs_dirs()["abs_work_dir"],
+ error_level=FATAL,
+ )
+ if not os.path.exists(repo):
+ self.fatal("Unable to download repo tool.")
+ self.chmod(repo, 0o755)
+ self.retry(
+ self._repo_init,
+ args=(repo,),
+ error_level=FATAL,
+ cleanup=self._repo_cleanup(),
+ good_statuses=[0],
+ sleeptime=5,
+ )
+
+ def repack(self):
+ """creates the repacks"""
+ repack_cmd = [
+ "./mach",
+ "python",
+ "python/mozrelease/mozrelease/partner_repack.py",
+ "-v",
+ self.config["version"],
+ "-n",
+ str(self.config["build_number"]),
+ ]
+ if self.config.get("platform"):
+ repack_cmd.extend(["--platform", self.config["platform"]])
+ if self.config.get("partner"):
+ repack_cmd.extend(["--partner", self.config["partner"]])
+ if self.config.get("taskIds"):
+ for taskId in self.config["taskIds"]:
+ repack_cmd.extend(["--taskid", taskId])
+ if self.config.get("limitLocales"):
+ for locale in self.config["limitLocales"]:
+ repack_cmd.extend(["--limit-locale", locale])
+
+ self.run_command(repack_cmd, cwd=os.environ["GECKO_PATH"], halt_on_failure=True)
+
+
+# main {{{
+if __name__ == "__main__":
+ partner_repacks = DesktopPartnerRepacks()
+ partner_repacks.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_unittest.py b/testing/mozharness/scripts/desktop_unittest.py
new file mode 100755
index 0000000000..ff87d7cd52
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_unittest.py
@@ -0,0 +1,1220 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_unittest.py
+
+author: Jordan Lund
+"""
+
+from __future__ import absolute_import
+import json
+import os
+import re
+import sys
+import copy
+import shutil
+import glob
+import imp
+import platform
+
+from datetime import datetime, timedelta
+
+# load modules from parent dir
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(1, os.path.dirname(here))
+
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import INFO, WARNING
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import TBPL_EXCEPTION, TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.errors import HarnessErrorList
+from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+PY2 = sys.version_info.major == 2
+SUITE_CATEGORIES = [
+ "gtest",
+ "cppunittest",
+ "jittest",
+ "mochitest",
+ "reftest",
+ "xpcshell",
+]
+SUITE_DEFAULT_E10S = ["mochitest", "reftest"]
+SUITE_NO_E10S = ["xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest"]
+
+
+# DesktopUnittest {{{1
+class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ [
+ "--mochitest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_mochitest_suites",
+ "type": "string",
+ "help": "Specify which mochi suite to run. "
+ "Suites are defined in the config file.\n"
+ "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'",
+ },
+ ],
+ [
+ [
+ "--reftest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_reftest_suites",
+ "type": "string",
+ "help": "Specify which reftest suite to run. "
+ "Suites are defined in the config file.\n"
+ "Examples: 'all', 'crashplan', or 'jsreftest'",
+ },
+ ],
+ [
+ [
+ "--xpcshell-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_xpcshell_suites",
+ "type": "string",
+ "help": "Specify which xpcshell suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'xpcshell'",
+ },
+ ],
+ [
+ [
+ "--cppunittest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_cppunittest_suites",
+ "type": "string",
+ "help": "Specify which cpp unittest suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'cppunittest'",
+ },
+ ],
+ [
+ [
+ "--gtest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_gtest_suites",
+ "type": "string",
+ "help": "Specify which gtest suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'gtest'",
+ },
+ ],
+ [
+ [
+ "--jittest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_jittest_suites",
+ "type": "string",
+ "help": "Specify which jit-test suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'jittest'",
+ },
+ ],
+ [
+ [
+ "--run-all-suites",
+ ],
+ {
+ "action": "store_true",
+ "dest": "run_all_suites",
+ "default": False,
+ "help": "This will run all suites that are specified "
+ "in the config file. You do not need to specify "
+ "any other suites.\nBeware, this may take a while ;)",
+ },
+ ],
+ [
+ [
+ "--disable-e10s",
+ ],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ [
+ "--headless",
+ ],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ [
+ "--no-random",
+ ],
+ {
+ "action": "store_true",
+ "dest": "no_random",
+ "default": False,
+ "help": "Run tests with no random intermittents and bisect in case of real failure.", # NOQA: E501
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use "
+ "the GL compositor.",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Enable the WebRender compositor in Gecko.",
+ },
+ ],
+ [
+ ["--gpu-required"],
+ {
+ "action": "store_true",
+ "dest": "gpu_required",
+ "default": False,
+ "help": "Run additional verification on modified tests using gpu instances.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Defines an extra user preference.",
+ },
+ ],
+ [
+ [
+ "--repeat",
+ ],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ ["--enable-xorigin-tests"],
+ {
+ "action": "store_true",
+ "dest": "enable_xorigin_tests",
+ "default": False,
+ "help": "Run tests in a cross origin iframe.",
+ },
+ ],
+ [
+ ["--enable-a11y-checks"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "a11y_checks",
+ "help": "Run tests with accessibility checks disabled.",
+ },
+ ],
+ [
+ ["--run-failures"],
+ {
+ "action": "store",
+ "default": "",
+ "type": "string",
+ "dest": "run_failures",
+ "help": "Run only failures matching keyword. "
+ "Examples: 'apple_silicon'",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=True):
+ # abs_dirs defined already in BaseScript but is here to make pylint happy
+ self.abs_dirs = None
+ super(DesktopUnittest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-pulseaudio",
+ "install",
+ "stage-files",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ c = self.config
+ self.global_test_options = []
+ self.installer_url = c.get("installer_url")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.symbols_url = c.get("symbols_url")
+ # this is so mozinstall in install() doesn't bug out if we don't run
+ # the download_and_extract action
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.abs_app_dir = None
+ self.abs_res_dir = None
+
+ # Construct an identifier to be used to identify Perfherder data
+ # for resource monitoring recording. This attempts to uniquely
+ # identify this test invocation configuration.
+ perfherder_parts = []
+ perfherder_options = []
+ suites = (
+ ("specified_mochitest_suites", "mochitest"),
+ ("specified_reftest_suites", "reftest"),
+ ("specified_xpcshell_suites", "xpcshell"),
+ ("specified_cppunittest_suites", "cppunit"),
+ ("specified_gtest_suites", "gtest"),
+ ("specified_jittest_suites", "jittest"),
+ )
+ for s, prefix in suites:
+ if s in c:
+ perfherder_parts.append(prefix)
+ perfherder_parts.extend(c[s])
+
+ if "this_chunk" in c:
+ perfherder_parts.append(c["this_chunk"])
+
+ if c["e10s"]:
+ perfherder_options.append("e10s")
+
+ self.resource_monitor_perfherder_id = (
+ ".".join(perfherder_parts),
+ perfherder_options,
+ )
+
+ # helper methods {{{2
+ def _pre_config_lock(self, rw_config):
+ super(DesktopUnittest, self)._pre_config_lock(rw_config)
+ c = self.config
+ if not c.get("run_all_suites"):
+ return # configs are valid
+ for category in SUITE_CATEGORIES:
+ specific_suites = c.get("specified_%s_suites" % (category))
+ if specific_suites:
+ if specific_suites != "all":
+ self.fatal(
+ "Config options are not valid. Please ensure"
+ " that if the '--run-all-suites' flag was enabled,"
+ " then do not specify to run only specific suites "
+ "like:\n '--mochitest-suite browser-chrome'"
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(DesktopUnittest, self).query_abs_dirs()
+
+ c = self.config
+ dirs = {}
+ dirs["abs_work_dir"] = abs_dirs["abs_work_dir"]
+ dirs["abs_app_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "application"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_extensions_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "extensions"
+ )
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+ dirs["abs_test_bin_plugins_dir"] = os.path.join(
+ dirs["abs_test_bin_dir"], "plugins"
+ )
+ dirs["abs_test_bin_components_dir"] = os.path.join(
+ dirs["abs_test_bin_dir"], "components"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+ dirs["abs_cppunittest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "cppunittest"
+ )
+ dirs["abs_gtest_dir"] = os.path.join(dirs["abs_test_install_dir"], "gtest")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_jittest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "jit-test", "jit-test"
+ )
+
+ if os.path.isabs(c["virtualenv_path"]):
+ dirs["abs_virtualenv_dir"] = c["virtualenv_path"]
+ else:
+ dirs["abs_virtualenv_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], c["virtualenv_path"]
+ )
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def query_abs_app_dir(self):
+ """We can't set this in advance, because OSX install directories
+ change depending on branding and opt/debug.
+ """
+ if self.abs_app_dir:
+ return self.abs_app_dir
+ if not self.binary_path:
+ self.fatal("Can't determine abs_app_dir (binary_path not set!)")
+ self.abs_app_dir = os.path.dirname(self.binary_path)
+ return self.abs_app_dir
+
+ def query_abs_res_dir(self):
+ """The directory containing resources like plugins and extensions. On
+ OSX this is Contents/Resources, on all other platforms its the same as
+ the app dir.
+
+ As with the app dir, we can't set this in advance, because OSX install
+ directories change depending on branding and opt/debug.
+ """
+ if self.abs_res_dir:
+ return self.abs_res_dir
+
+ abs_app_dir = self.query_abs_app_dir()
+ if self._is_darwin():
+ res_subdir = self.config.get("mac_res_subdir", "Resources")
+ self.abs_res_dir = os.path.join(os.path.dirname(abs_app_dir), res_subdir)
+ else:
+ self.abs_res_dir = abs_app_dir
+ return self.abs_res_dir
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ self.register_virtualenv_module(name="mock")
+ self.register_virtualenv_module(name="simplejson")
+
+ marionette_requirements_file = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+ # marionette_requirements.txt must use the legacy resolver until bug 1684969 is resolved.
+ self.register_virtualenv_module(
+ requirements=[marionette_requirements_file],
+ two_pass=True,
+ legacy_resolver=True,
+ )
+
+ requirements_files = []
+ if self._query_specified_suites("mochitest") is not None:
+ # mochitest is the only thing that needs this
+ if PY2:
+ wspb_requirements = "websocketprocessbridge_requirements.txt"
+ else:
+ wspb_requirements = "websocketprocessbridge_requirements_3.txt"
+ requirements_files.append(
+ os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ wspb_requirements,
+ )
+ )
+
+ for requirements_file in requirements_files:
+ self.register_virtualenv_module(
+ requirements=[requirements_file], two_pass=True
+ )
+
+ def _query_symbols_url(self):
+ """query the full symbols URL based upon binary URL"""
+ # may break with name convention changes but is one less 'input' for script
+ if self.symbols_url:
+ return self.symbols_url
+
+ symbols_url = None
+ self.info("finding symbols_url based upon self.installer_url")
+ if self.installer_url:
+ for ext in [".zip", ".dmg", ".tar.bz2"]:
+ if ext in self.installer_url:
+ symbols_url = self.installer_url.replace(
+ ext, ".crashreporter-symbols.zip"
+ )
+ if not symbols_url:
+ self.fatal(
+ "self.installer_url was found but symbols_url could \
+ not be determined"
+ )
+ else:
+ self.fatal("self.installer_url was not found in self.config")
+ self.info("setting symbols_url as %s" % (symbols_url))
+ self.symbols_url = symbols_url
+ return self.symbols_url
+
+ def _get_mozharness_test_paths(self, suite_category, suite):
+ test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ if "-coverage" in suite:
+ suite = suite[: suite.index("-coverage")]
+
+ if not test_paths or suite not in test_paths:
+ return None
+
+ suite_test_paths = test_paths[suite]
+
+ if suite_category == "reftest":
+ dirs = self.query_abs_dirs()
+ suite_test_paths = [
+ os.path.join(dirs["abs_reftest_dir"], "tests", p)
+ for p in suite_test_paths
+ ]
+
+ return suite_test_paths
+
+ def _query_abs_base_cmd(self, suite_category, suite):
+ if self.binary_path:
+ c = self.config
+ dirs = self.query_abs_dirs()
+ run_file = c["run_file_names"][suite_category]
+ base_cmd = [self.query_python_path("python"), "-u"]
+ base_cmd.append(os.path.join(dirs["abs_%s_dir" % suite_category], run_file))
+ abs_app_dir = self.query_abs_app_dir()
+ abs_res_dir = self.query_abs_res_dir()
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], suite
+ )
+
+ str_format_values = {
+ "binary_path": self.binary_path,
+ "symbols_path": self._query_symbols_url(),
+ "abs_work_dir": dirs["abs_work_dir"],
+ "abs_app_dir": abs_app_dir,
+ "abs_res_dir": abs_res_dir,
+ "raw_log_file": raw_log_file,
+ "error_summary_file": error_summary_file,
+ "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"),
+ }
+
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ if self.symbols_path:
+ str_format_values["symbols_path"] = self.symbols_path
+
+ if suite_category not in SUITE_NO_E10S:
+ if suite_category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ base_cmd.append("--disable-e10s")
+ elif suite_category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ base_cmd.append("--e10s")
+ if c.get("repeat"):
+ if suite_category in SUITE_REPEATABLE:
+ base_cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log(
+ "--repeat not supported in {}".format(suite_category),
+ level=WARNING,
+ )
+
+ # Ignore chunking if we have user specified test paths
+ if not (self.verify_enabled or self.per_test_coverage):
+ test_paths = self._get_mozharness_test_paths(suite_category, suite)
+ if test_paths:
+ base_cmd.extend(test_paths)
+ elif c.get("total_chunks") and c.get("this_chunk"):
+ base_cmd.extend(
+ [
+ "--total-chunks",
+ c["total_chunks"],
+ "--this-chunk",
+ c["this_chunk"],
+ ]
+ )
+
+ if c["no_random"]:
+ if suite_category == "mochitest":
+ base_cmd.append("--bisect-chunk=default")
+ else:
+ self.warning(
+ "--no-random does not currently work with suites other than "
+ "mochitest."
+ )
+
+ if c["headless"]:
+ base_cmd.append("--headless")
+
+ if c["enable_webrender"]:
+ base_cmd.append("--enable-webrender")
+
+ if c["enable_xorigin_tests"]:
+ base_cmd.append("--enable-xorigin-tests")
+
+ if c["extra_prefs"]:
+ base_cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])
+
+ if c["a11y_checks"]:
+ base_cmd.append("--enable-a11y-checks")
+
+ if c["run_failures"]:
+ base_cmd.extend(["--run-failures={}".format(c["run_failures"])])
+
+ # set pluginsPath
+ abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins")
+ str_format_values["test_plugin_path"] = abs_res_plugins_dir
+
+ if suite_category not in c["suite_definitions"]:
+ self.fatal("'%s' not defined in the config!")
+
+ if suite in (
+ "browser-chrome-coverage",
+ "xpcshell-coverage",
+ "mochitest-devtools-chrome-coverage",
+ "plain-coverage",
+ ):
+ base_cmd.append("--jscov-dir-prefix=%s" % dirs["abs_blob_upload_dir"])
+
+ options = c["suite_definitions"][suite_category]["options"]
+ if options:
+ for option in options:
+ option = option % str_format_values
+ if not option.endswith("None"):
+ base_cmd.append(option)
+ if self.structured_output(
+ suite_category, self._query_try_flavor(suite_category, suite)
+ ):
+ base_cmd.append("--log-raw=-")
+ return base_cmd
+ else:
+ self.warning(
+ "Suite options for %s could not be determined."
+ "\nIf you meant to have options for this suite, "
+ "please make sure they are specified in your "
+ "config under %s_options" % (suite_category, suite_category)
+ )
+
+ return base_cmd
+ else:
+ self.fatal(
+ "'binary_path' could not be determined.\n This should "
+ "be like '/path/build/application/firefox/firefox'"
+ "\nIf you are running this script without the 'install' "
+ "action (where binary_path is set), please ensure you are"
+ " either:\n(1) specifying it in the config file under "
+ "binary_path\n(2) specifying it on command line with the"
+ " '--binary-path' flag"
+ )
+
+ def _query_specified_suites(self, category):
+ """Checks if the provided suite does indeed exist.
+
+ If at least one suite was given and if it does exist, return the suite
+ as legitimate and line it up for execution.
+
+ Otherwise, do not run any suites and return a fatal error.
+ """
+ c = self.config
+ all_suites = c.get("all_{}_suites".format(category), None)
+ specified_suites = c.get("specified_{}_suites".format(category), None)
+
+ # Bug 1603842 - disallow selection of more than 1 suite at at time
+ if specified_suites is None:
+ # Path taken by test-verify
+ return self.query_per_test_category_suites(category, all_suites)
+ if specified_suites and len(specified_suites) > 1:
+ self.fatal(
+ """Selection of multiple suites is not permitted. \
+ Please select at most 1 test suite."""
+ )
+ return
+
+ # Normal path taken by most test suites as only one suite is specified
+ suite = specified_suites[0]
+ if suite not in all_suites:
+ self.fatal("""Selected suite does not exist!""")
+ return {suite: all_suites[suite]}
+
+ def _query_try_flavor(self, category, suite):
+ flavors = {
+ "mochitest": [
+ ("plain.*", "mochitest"),
+ ("browser-chrome.*", "browser-chrome"),
+ ("mochitest-devtools-chrome.*", "devtools-chrome"),
+ ("chrome", "chrome"),
+ ],
+ "xpcshell": [("xpcshell", "xpcshell")],
+ "reftest": [("reftest", "reftest"), ("crashtest", "crashtest")],
+ }
+ for suite_pattern, flavor in flavors.get(category, []):
+ if re.compile(suite_pattern).match(suite):
+ return flavor
+
+ def structured_output(self, suite_category, flavor=None):
+ unstructured_flavors = self.config.get("unstructured_flavors")
+ if not unstructured_flavors:
+ return True
+ if suite_category not in unstructured_flavors:
+ return True
+ if not unstructured_flavors.get(
+ suite_category
+ ) or flavor in unstructured_flavors.get(suite_category):
+ return False
+ return True
+
+ def get_test_output_parser(
+ self, suite_category, flavor=None, strict=False, **kwargs
+ ):
+ if not self.structured_output(suite_category, flavor):
+ return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
+ self.info("Structured output parser in use for %s." % suite_category)
+ return StructuredOutputParser(
+ suite_category=suite_category, strict=strict, **kwargs
+ )
+
+ # Actions {{{2
+
+ # clobber defined in BaseScript, deletes mozharness/build if exists
+ # preflight_download_and_extract is in TestingMixin.
+ # create_virtualenv is in VirtualenvMixin.
+ # preflight_install is in TestingMixin.
+ # install is in TestingMixin.
+
+ @PreScriptAction("download-and-extract")
+ def _pre_download_and_extract(self, action):
+ """Abort if --artifact try syntax is used with compiled-code tests"""
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ self.mkdir_p(dir)
+
+ if not self.try_message_has_flag("artifact"):
+ return
+ self.info("Artifact build requested in try syntax.")
+ rejected = []
+ compiled_code_suites = [
+ "cppunit",
+ "gtest",
+ "jittest",
+ ]
+ for category in SUITE_CATEGORIES:
+ suites = self._query_specified_suites(category) or []
+ for suite in suites:
+ if any([suite.startswith(c) for c in compiled_code_suites]):
+ rejected.append(suite)
+ break
+ if rejected:
+ self.record_status(TBPL_EXCEPTION)
+ self.fatal(
+ "There are specified suites that are incompatible with "
+ "--artifact try syntax flag: {}".format(", ".join(rejected)),
+ exit_code=self.return_code,
+ )
+
+ def download_and_extract(self):
+ """
+ download and extract test zip / download installer
+ optimizes which subfolders to extract from tests archive
+ """
+ c = self.config
+
+ extract_dirs = None
+
+ if c.get("run_all_suites"):
+ target_categories = SUITE_CATEGORIES
+ else:
+ target_categories = [
+ cat
+ for cat in SUITE_CATEGORIES
+ if self._query_specified_suites(cat) is not None
+ ]
+ super(DesktopUnittest, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=target_categories
+ )
+
+ def start_pulseaudio(self):
+ command = []
+ # Implies that underlying system is Linux.
+ if os.environ.get("NEED_PULSEAUDIO") == "true":
+ command.extend(
+ [
+ "pulseaudio",
+ "--daemonize",
+ "--log-level=4",
+ "--log-time=1",
+ "-vvvvv",
+ "--exit-idle-time=-1",
+ ]
+ )
+
+ # Only run the initialization for Debian.
+ # Ubuntu appears to have an alternate method of starting pulseaudio.
+ if self._is_debian():
+ self._kill_named_proc("pulseaudio")
+ self.run_command(command)
+
+ # All Linux systems need module-null-sink to be loaded, otherwise
+ # media tests fail.
+ self.run_command("pactl load-module module-null-sink")
+ self.run_command("pactl list modules short")
+
+ def stage_files(self):
+ for category in SUITE_CATEGORIES:
+ suites = self._query_specified_suites(category)
+ stage = getattr(self, "_stage_{}".format(category), None)
+ if suites and stage:
+ stage(suites)
+
+ def _stage_files(self, bin_name=None, fail_if_not_exists=True):
+ dirs = self.query_abs_dirs()
+ abs_app_dir = self.query_abs_app_dir()
+
+ # For mac these directories are in Contents/Resources, on other
+ # platforms abs_res_dir will point to abs_app_dir.
+ abs_res_dir = self.query_abs_res_dir()
+ abs_res_components_dir = os.path.join(abs_res_dir, "components")
+ abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins")
+ abs_res_extensions_dir = os.path.join(abs_res_dir, "extensions")
+
+ if bin_name:
+ src = os.path.join(dirs["abs_test_bin_dir"], bin_name)
+ if os.path.exists(src):
+ self.info(
+ "copying %s to %s" % (src, os.path.join(abs_app_dir, bin_name))
+ )
+ shutil.copy2(src, os.path.join(abs_app_dir, bin_name))
+ elif fail_if_not_exists:
+ raise OSError("File %s not found" % src)
+ self.copytree(
+ dirs["abs_test_bin_components_dir"],
+ abs_res_components_dir,
+ overwrite="overwrite_if_exists",
+ )
+ self.mkdir_p(abs_res_plugins_dir)
+ self.copytree(
+ dirs["abs_test_bin_plugins_dir"],
+ abs_res_plugins_dir,
+ overwrite="overwrite_if_exists",
+ )
+ if os.path.isdir(dirs["abs_test_extensions_dir"]):
+ self.mkdir_p(abs_res_extensions_dir)
+ self.copytree(
+ dirs["abs_test_extensions_dir"],
+ abs_res_extensions_dir,
+ overwrite="overwrite_if_exists",
+ )
+
+ def _stage_xpcshell(self, suites):
+ self._stage_files(self.config["xpcshell_name"])
+ # http3server isn't built for Windows tests or Linux asan/tsan
+ # builds. Only stage if the `http3server_name` config is set and if
+ # the file actually exists.
+ if self.config.get("http3server_name"):
+ self._stage_files(self.config["http3server_name"], fail_if_not_exists=False)
+
+ def _stage_cppunittest(self, suites):
+ abs_res_dir = self.query_abs_res_dir()
+ dirs = self.query_abs_dirs()
+ abs_cppunittest_dir = dirs["abs_cppunittest_dir"]
+
+ # move manifest and js fils to resources dir, where tests expect them
+ files = glob.glob(os.path.join(abs_cppunittest_dir, "*.js"))
+ files.extend(glob.glob(os.path.join(abs_cppunittest_dir, "*.manifest")))
+ for f in files:
+ self.move(f, abs_res_dir)
+
+ def _stage_gtest(self, suites):
+ abs_res_dir = self.query_abs_res_dir()
+ abs_app_dir = self.query_abs_app_dir()
+ dirs = self.query_abs_dirs()
+ abs_gtest_dir = dirs["abs_gtest_dir"]
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+
+ files = glob.glob(os.path.join(dirs["abs_test_bin_plugins_dir"], "gmp-*"))
+ files.append(os.path.join(abs_gtest_dir, "dependentlibs.list.gtest"))
+ for f in files:
+ self.move(f, abs_res_dir)
+
+ self.copytree(
+ os.path.join(abs_gtest_dir, "gtest_bin"), os.path.join(abs_app_dir)
+ )
+
+ def _kill_proc_tree(self, pid):
+ # Kill a process tree (including grandchildren) with signal.SIGTERM
+ try:
+ import signal
+ import psutil
+
+ if pid == os.getpid():
+ return (None, None)
+
+ parent = psutil.Process(pid)
+ children = parent.children(recursive=True)
+ children.append(parent)
+
+ for p in children:
+ p.send_signal(signal.SIGTERM)
+
+ # allow for 60 seconds to kill procs
+ timeout = 60
+ gone, alive = psutil.wait_procs(children, timeout=timeout)
+ for p in gone:
+ self.info("psutil found pid %s dead" % p.pid)
+ for p in alive:
+ self.error("failed to kill pid %d after %d" % (p.pid, timeout))
+
+ return (gone, alive)
+ except Exception as e:
+ self.error("Exception while trying to kill process tree: %s" % str(e))
+
+ def _kill_named_proc(self, pname):
+ try:
+ import psutil
+ except Exception as e:
+ self.info(
+ "Error importing psutil, not killing process %s: %s" % pname, str(e)
+ )
+ return
+
+ for proc in psutil.process_iter():
+ try:
+ if proc.name() == pname:
+ procd = proc.as_dict(attrs=["pid", "ppid", "name", "username"])
+ self.info("in _kill_named_proc, killing %s" % procd)
+ self._kill_proc_tree(proc.pid)
+ except Exception as e:
+ self.info("Warning: Unable to kill process %s: %s" % (pname, str(e)))
+ # may not be able to access process info for all processes
+ continue
+
+ def _remove_xen_clipboard(self):
+ """
+ When running on a Windows 7 VM, we have XenDPriv.exe running which
+ interferes with the clipboard, lets terminate this process and remove
+ the binary so it doesn't restart
+ """
+ if not self._is_windows():
+ return
+
+ self._kill_named_proc("XenDPriv.exe")
+ xenpath = os.path.join(
+ os.environ["ProgramFiles"], "Citrix", "XenTools", "XenDPriv.exe"
+ )
+ try:
+ if os.path.isfile(xenpath):
+ os.remove(xenpath)
+ except Exception as e:
+ self.error("Error: Failure to remove file %s: %s" % (xenpath, str(e)))
+
+ def _report_system_info(self):
+ """
+ Create the system-info.log artifact file, containing a variety of
+ system information that might be useful in diagnosing test failures.
+ """
+ try:
+ import psutil
+
+ path = os.path.join(
+ self.query_abs_dirs()["abs_blob_upload_dir"], "system-info.log"
+ )
+ with open(path, "w") as f:
+ f.write("System info collected at %s\n\n" % datetime.now())
+ f.write("\nBoot time %s\n" % datetime.fromtimestamp(psutil.boot_time()))
+ f.write("\nVirtual memory: %s\n" % str(psutil.virtual_memory()))
+ f.write("\nDisk partitions: %s\n" % str(psutil.disk_partitions()))
+ f.write("\nDisk usage (/): %s\n" % str(psutil.disk_usage(os.path.sep)))
+ if not self._is_windows():
+ # bug 1417189: frequent errors querying users on Windows
+ f.write("\nUsers: %s\n" % str(psutil.users()))
+ f.write("\nNetwork connections:\n")
+ try:
+ for nc in psutil.net_connections():
+ f.write(" %s\n" % str(nc))
+ except Exception:
+ f.write("Exception getting network info: %s\n" % sys.exc_info()[0])
+ f.write("\nProcesses:\n")
+ try:
+ for p in psutil.process_iter():
+ ctime = str(datetime.fromtimestamp(p.create_time()))
+ f.write(
+ " PID %d %s %s created at %s\n"
+ % (p.pid, p.name(), str(p.cmdline()), ctime)
+ )
+ except Exception:
+ f.write("Exception getting process info: %s\n" % sys.exc_info()[0])
+ except Exception:
+ # psutil throws a variety of intermittent exceptions
+ self.info("Unable to complete system-info.log: %s" % sys.exc_info()[0])
+
+ # pull defined in VCSScript.
+ # preflight_run_tests defined in TestingMixin.
+
+ def run_tests(self):
+ self._remove_xen_clipboard()
+ self._report_system_info()
+ self.start_time = datetime.now()
+ for category in SUITE_CATEGORIES:
+ if not self._run_category_suites(category):
+ break
+
+ def get_timeout_for_category(self, suite_category):
+ if suite_category == "cppunittest":
+ return 2500
+ return self.config["suite_definitions"][suite_category].get("run_timeout", 1000)
+
+ def _run_category_suites(self, suite_category):
+ """run suite(s) to a specific category"""
+ dirs = self.query_abs_dirs()
+ suites = self._query_specified_suites(suite_category)
+ abs_app_dir = self.query_abs_app_dir()
+ abs_res_dir = self.query_abs_res_dir()
+
+ max_per_test_time = timedelta(minutes=60)
+ max_per_test_tests = 10
+ if self.per_test_coverage:
+ max_per_test_tests = 30
+ executed_tests = 0
+ executed_too_many_tests = False
+
+ if suites:
+ self.info("#### Running %s suites" % suite_category)
+ for suite in suites:
+ if executed_too_many_tests and not self.per_test_coverage:
+ return False
+
+ replace_dict = {
+ "abs_app_dir": abs_app_dir,
+ # Mac specific, but points to abs_app_dir on other
+ # platforms.
+ "abs_res_dir": abs_res_dir,
+ }
+ options_list = []
+ env = {"TEST_SUITE": suite}
+ if isinstance(suites[suite], dict):
+ options_list = suites[suite].get("options", [])
+ if (
+ self.verify_enabled
+ or self.per_test_coverage
+ or self._get_mozharness_test_paths(suite_category, suite)
+ ):
+ # Ignore tests list in modes where we are running specific tests.
+ tests_list = []
+ else:
+ tests_list = suites[suite].get("tests", [])
+ env = copy.deepcopy(suites[suite].get("env", {}))
+ else:
+ options_list = suites[suite]
+ tests_list = []
+
+ flavor = self._query_try_flavor(suite_category, suite)
+ try_options, try_tests = self.try_args(flavor)
+
+ suite_name = suite_category + "-" + suite
+ tbpl_status, log_level = None, None
+ error_list = BaseErrorList + HarnessErrorList
+ parser = self.get_test_output_parser(
+ suite_category,
+ flavor=flavor,
+ config=self.config,
+ error_list=error_list,
+ log_obj=self.log_obj,
+ )
+
+ if suite_category == "reftest":
+ ref_formatter = imp.load_source(
+ "ReftestFormatter",
+ os.path.abspath(
+ os.path.join(dirs["abs_reftest_dir"], "output.py")
+ ),
+ )
+ parser.formatter = ref_formatter.ReftestFormatter()
+
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ if self.config["nodejs_path"]:
+ env["MOZ_NODE_PATH"] = self.config["nodejs_path"]
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ env["STYLO_THREADS"] = "4"
+
+ env = self.query_env(partial_env=env, log_level=INFO)
+ cmd_timeout = self.get_timeout_for_category(suite_category)
+
+ summary = {}
+ for per_test_args in self.query_args(suite):
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = (
+ "baselinecoverage" in per_test_args[-1]
+ if self.per_test_coverage
+ else False
+ )
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return False
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info(
+ "TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>"
+ )
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
+
+ abs_base_cmd = self._query_abs_base_cmd(suite_category, suite)
+ cmd = abs_base_cmd[:]
+ cmd.extend(
+ self.query_options(
+ options_list, try_options, str_format_values=replace_dict
+ )
+ )
+ cmd.extend(
+ self.query_tests_args(
+ tests_list, try_tests, str_format_values=replace_dict
+ )
+ )
+
+ final_cmd = copy.copy(cmd)
+ final_cmd.extend(per_test_args)
+
+ final_env = copy.copy(env)
+
+ if self.per_test_coverage:
+ self.set_coverage_env(final_env)
+
+ return_code = self.run_command(
+ final_cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=cmd_timeout,
+ output_parser=parser,
+ env=final_env,
+ )
+
+ if self.per_test_coverage:
+ self.add_per_test_coverage_report(
+ final_env, suite, per_test_args[-1]
+ )
+
+ # mochitest, reftest, and xpcshell suites do not return
+ # appropriate return codes. Therefore, we must parse the output
+ # to determine what the tbpl_status and worst_log_level must
+ # be. We do this by:
+ # 1) checking to see if our mozharness script ran into any
+ # errors itself with 'num_errors' <- OutputParser
+ # 2) if num_errors is 0 then we look in the subclassed 'parser'
+ # findings for harness/suite errors <- DesktopUnittestOutputParser
+ # 3) checking to see if the return code is in success_codes
+
+ success_codes = None
+ if (
+ suite_category == "reftest"
+ and "32bit" in platform.architecture()
+ and platform.system() == "Windows"
+ ):
+ # see bug 1120644, 1526777, 1531499
+ success_codes = [1]
+
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code, success_codes, summary
+ )
+ parser.append_tinderboxprint_line(suite_name)
+
+ self.record_status(tbpl_status, level=log_level)
+ if len(per_test_args) > 0:
+ self.log_per_test_status(
+ per_test_args[-1], tbpl_status, log_level
+ )
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return False
+ else:
+ self.log(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ level=log_level,
+ )
+
+ if executed_too_many_tests:
+ return False
+ else:
+ self.debug("There were no suites to run for %s" % suite_category)
+ return True
+
+
+# main {{{1
+if __name__ == "__main__":
+ desktop_unittest = DesktopUnittest()
+ desktop_unittest.run_and_exit()
diff --git a/testing/mozharness/scripts/firefox_ui_tests/functional.py b/testing/mozharness/scripts/firefox_ui_tests/functional.py
new file mode 100755
index 0000000000..6cd941b33e
--- /dev/null
+++ b/testing/mozharness/scripts/firefox_ui_tests/functional.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+from __future__ import absolute_import
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.mozilla.testing.firefox_ui_tests import FirefoxUIFunctionalTests
+
+
+if __name__ == "__main__":
+ myScript = FirefoxUIFunctionalTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/firefox_ui_tests/update.py b/testing/mozharness/scripts/firefox_ui_tests/update.py
new file mode 100755
index 0000000000..d7e71bc1cd
--- /dev/null
+++ b/testing/mozharness/scripts/firefox_ui_tests/update.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+from __future__ import absolute_import
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.mozilla.testing.firefox_ui_tests import FirefoxUIUpdateTests
+
+
+if __name__ == "__main__":
+ myScript = FirefoxUIUpdateTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/firefox_ui_tests/update_release.py b/testing/mozharness/scripts/firefox_ui_tests/update_release.py
new file mode 100755
index 0000000000..ddab94c5c8
--- /dev/null
+++ b/testing/mozharness/scripts/firefox_ui_tests/update_release.py
@@ -0,0 +1,371 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+from __future__ import absolute_import
+import copy
+import os
+import pprint
+import sys
+
+from six.moves.urllib.parse import quote
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import PreScriptAction
+from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WARNING, EXIT_STATUS_DICT
+from mozharness.mozilla.testing.firefox_ui_tests import (
+ FirefoxUIUpdateTests,
+ firefox_ui_update_config_options,
+)
+
+# Command line arguments for release update tests
+firefox_ui_update_release_config_options = [
+ [
+ ["--build-number"],
+ {
+ "dest": "build_number",
+ "help": "Build number of release, eg: 2",
+ },
+ ],
+ [
+ ["--limit-locales"],
+ {
+ "dest": "limit_locales",
+ "default": -1,
+ "type": int,
+ "help": "Limit the number of locales to run.",
+ },
+ ],
+ [
+ ["--release-update-config"],
+ {
+ "dest": "release_update_config",
+ "help": "Name of the release update verification config file to use.",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "dest": "this_chunk",
+ "default": 1,
+ "help": "What chunk of locales to process.",
+ },
+ ],
+ [
+ ["--tools-repo"],
+ {
+ "dest": "tools_repo",
+ "default": "http://hg.mozilla.org/build/tools",
+ "help": "Which tools repo to check out",
+ },
+ ],
+ [
+ ["--tools-tag"],
+ {
+ "dest": "tools_tag",
+ "help": "Which revision/tag to use for the tools repository.",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "dest": "total_chunks",
+ "default": 1,
+ "help": "Total chunks to dive the locales into.",
+ },
+ ],
+] + copy.deepcopy(firefox_ui_update_config_options)
+
+
+class ReleaseFirefoxUIUpdateTests(FirefoxUIUpdateTests):
+ def __init__(self):
+ all_actions = [
+ "clobber",
+ "checkout",
+ "create-virtualenv",
+ "query_minidump_stackwalk",
+ "read-release-update-config",
+ "run-tests",
+ ]
+
+ super(ReleaseFirefoxUIUpdateTests, self).__init__(
+ all_actions=all_actions,
+ default_actions=all_actions,
+ config_options=firefox_ui_update_release_config_options,
+ append_env_variables_from_configs=True,
+ )
+
+ self.tools_repo = self.config.get("tools_repo")
+ self.tools_tag = self.config.get("tools_tag")
+
+ assert (
+ self.tools_repo and self.tools_tag
+ ), "Without the \"--tools-tag\" we can't clone the releng's tools repository."
+
+ self.limit_locales = int(self.config.get("limit_locales"))
+
+ # This will be a list containing one item per release based on configs
+ # from tools/release/updates/*cfg
+ self.releases = None
+
+ def checkout(self):
+ """
+ We checkout the tools repository and update to the right branch
+ for it.
+ """
+ dirs = self.query_abs_dirs()
+
+ super(ReleaseFirefoxUIUpdateTests, self).checkout()
+
+ self.vcs_checkout(
+ repo=self.tools_repo,
+ dest=dirs["abs_tools_dir"],
+ branch=self.tools_tag,
+ vcs="hg",
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(ReleaseFirefoxUIUpdateTests, self).query_abs_dirs()
+ dirs = {
+ "abs_tools_dir": os.path.join(abs_dirs["abs_work_dir"], "tools"),
+ }
+
+ for key in dirs:
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def read_release_update_config(self):
+ """
+ Builds a testing matrix based on an update verification configuration
+ file under the tools repository (release/updates/*.cfg).
+
+ Each release info line of the update verification files look similar to the following.
+
+ NOTE: This shows each pair of information as a new line but in reality
+ there is one white space separting them. We only show the values we care for.
+
+ release="38.0"
+ platform="Linux_x86_64-gcc3"
+ build_id="20150429135941"
+ locales="ach af ... zh-TW"
+ channel="beta-localtest"
+ from="/firefox/releases/38.0b9/linux-x86_64/%locale%/firefox-38.0b9.tar.bz2"
+ ftp_server_from="http://archive.mozilla.org/pub"
+
+ We will store this information in self.releases as a list of releases.
+
+ NOTE: We will talk of full and quick releases. Full release info normally contains a subset
+ of all locales (except for the most recent releases). A quick release has all locales,
+ however, it misses the fields 'from' and 'ftp_server_from'.
+ Both pairs of information complement each other but differ in such manner.
+ """
+ dirs = self.query_abs_dirs()
+ assert os.path.exists(
+ dirs["abs_tools_dir"]
+ ), "Without the tools/ checkout we can't use releng's config parser."
+
+ if self.config.get("release_update_config"):
+ # The config file is part of the tools repository. Make sure that if specified
+ # we force a revision of that repository to be set.
+ if self.tools_tag is None:
+ self.fatal("Make sure to specify the --tools-tag")
+
+ self.release_update_config = self.config["release_update_config"]
+
+ # Import the config parser
+ sys.path.insert(1, os.path.join(dirs["abs_tools_dir"], "lib", "python"))
+ from release.updates.verify import UpdateVerifyConfig
+
+ uvc = UpdateVerifyConfig()
+ config_file = os.path.join(
+ dirs["abs_tools_dir"],
+ "release",
+ "updates",
+ self.config["release_update_config"],
+ )
+ uvc.read(config_file)
+ if not hasattr(self, "update_channel"):
+ self.update_channel = uvc.channel
+
+ # Filter out any releases that are less than Gecko 38
+ uvc.releases = [
+ r for r in uvc.releases if int(r["release"].split(".")[0]) >= 38
+ ]
+
+ temp_releases = []
+ for rel_info in uvc.releases:
+ # This is the full release info
+ if "from" in rel_info and rel_info["from"] is not None:
+ # Let's find the associated quick release which contains the remaining locales
+ # for all releases except for the most recent release which contain all locales
+ quick_release = uvc.getRelease(
+ build_id=rel_info["build_id"], from_path=None
+ )
+ if quick_release != {}:
+ rel_info["locales"] = sorted(
+ rel_info["locales"] + quick_release["locales"]
+ )
+ temp_releases.append(rel_info)
+
+ uvc.releases = temp_releases
+ chunked_config = uvc.getChunk(
+ chunks=int(self.config["total_chunks"]),
+ thisChunk=int(self.config["this_chunk"]),
+ )
+
+ self.releases = chunked_config.releases
+
+ @PreScriptAction("run-tests")
+ def _pre_run_tests(self, action):
+ assert (
+ "release_update_config" in self.config
+ or self.installer_url
+ or self.installer_path
+ ), "Either specify --update-verify-config, --installer-url or --installer-path."
+
+ def run_tests(self):
+ dirs = self.query_abs_dirs()
+
+ # We don't want multiple outputs of the same environment information. To prevent
+ # that, we can't make it an argument of run_command and have to print it on our own.
+ self.info("Using env: {}".format(pprint.pformat(self.query_env())))
+
+ results = {}
+
+ locales_counter = 0
+ for rel_info in sorted(self.releases, key=lambda release: release["build_id"]):
+ build_id = rel_info["build_id"]
+ results[build_id] = {}
+
+ self.info(
+ "About to run {buildid} {path} - {num_locales} locales".format(
+ buildid=build_id,
+ path=rel_info["from"],
+ num_locales=len(rel_info["locales"]),
+ )
+ )
+
+ # Each locale gets a fresh port to avoid address in use errors in case of
+ # tests that time out unexpectedly.
+ marionette_port = 2827
+ for locale in rel_info["locales"]:
+ locales_counter += 1
+ self.info(
+ "Running {buildid} {locale}".format(buildid=build_id, locale=locale)
+ )
+
+ if self.limit_locales > -1 and locales_counter > self.limit_locales:
+ self.info(
+ "We have reached the limit of locales we were intending to run"
+ )
+ break
+
+ if self.config["dry_run"]:
+ continue
+
+ # Determine from where to download the file
+ installer_url = "{server}/{fragment}".format(
+ server=rel_info["ftp_server_from"],
+ fragment=quote(rel_info["from"].replace("%locale%", locale)),
+ )
+ installer_path = self.download_file(
+ url=installer_url, parent_dir=dirs["abs_work_dir"]
+ )
+
+ binary_path = self.install_app(
+ app=self.config.get("application"), installer_path=installer_path
+ )
+
+ marionette_port += 1
+
+ retcode = self.run_test(
+ binary_path=binary_path,
+ env=self.query_env(avoid_host_env=True),
+ marionette_port=marionette_port,
+ )
+
+ self.uninstall_app()
+
+ # Remove installer which is not needed anymore
+ self.info("Removing {}".format(installer_path))
+ os.remove(installer_path)
+
+ if retcode:
+ self.warning("FAIL: {} has failed.".format(sys.argv[0]))
+
+ base_cmd = (
+ "python {command} --firefox-ui-branch {branch} "
+ "--release-update-config {config} --tools-tag {tag}".format(
+ command=sys.argv[0],
+ branch=self.firefox_ui_branch,
+ config=self.release_update_config,
+ tag=self.tools_tag,
+ )
+ )
+
+ for config in self.config["config_files"]:
+ base_cmd += " --cfg {}".format(config)
+
+ if self.symbols_url:
+ base_cmd += " --symbols-path {}".format(self.symbols_url)
+
+ base_cmd += " --installer-url {}".format(installer_url)
+
+ self.info(
+ "You can run the *specific* locale on the same machine with:"
+ )
+ self.info(base_cmd)
+
+ self.info(
+ "You can run the *specific* locale on *your* machine with:"
+ )
+ self.info("{} --cfg developer_config.py".format(base_cmd))
+
+ results[build_id][locale] = retcode
+
+ self.info(
+ "Completed {buildid} {locale} with return code: {retcode}".format(
+ buildid=build_id, locale=locale, retcode=retcode
+ )
+ )
+
+ if self.limit_locales > -1 and locales_counter > self.limit_locales:
+ break
+
+ # Determine which locales have failed and set scripts exit code
+ exit_status = TBPL_SUCCESS
+ for build_id in sorted(results.keys()):
+ failed_locales = []
+ for locale in sorted(results[build_id].keys()):
+ if results[build_id][locale] != 0:
+ failed_locales.append(locale)
+
+ if failed_locales:
+ if exit_status == TBPL_SUCCESS:
+ self.info(
+ "\nSUMMARY - Failed locales for {}:".format(self.cli_script)
+ )
+ self.info("====================================================")
+ exit_status = TBPL_WARNING
+
+ self.info(build_id)
+ self.info(" {}".format(", ".join(failed_locales)))
+
+ self.return_code = EXIT_STATUS_DICT[exit_status]
+
+
+if __name__ == "__main__":
+ myScript = ReleaseFirefoxUIUpdateTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/fx_desktop_build.py b/testing/mozharness/scripts/fx_desktop_build.py
new file mode 100755
index 0000000000..7d0f90cd71
--- /dev/null
+++ b/testing/mozharness/scripts/fx_desktop_build.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""fx_desktop_build.py.
+
+script harness to build nightly firefox within Mozilla's build environment
+and developer machines alike
+
+author: Jordan Lund
+
+"""
+
+from __future__ import absolute_import
+import sys
+import os
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozharness.base.script as script
+from mozharness.mozilla.building.buildbase import (
+ BUILD_BASE_CONFIG_OPTIONS,
+ BuildingConfig,
+ BuildScript,
+)
+
+
+class FxDesktopBuild(BuildScript, object):
+ def __init__(self):
+ buildscript_kwargs = {
+ "config_options": BUILD_BASE_CONFIG_OPTIONS,
+ "all_actions": [
+ "get-secrets",
+ "clobber",
+ "build",
+ "static-analysis-autotest",
+ "valgrind-test",
+ "multi-l10n",
+ "package-source",
+ ],
+ "require_config_file": True,
+ # Default configuration
+ "config": {
+ "is_automation": True,
+ "debug_build": False,
+ # nightly stuff
+ "nightly_build": False,
+ # Seed all clones with mozilla-unified. This ensures subsequent
+ # jobs have a minimal `hg pull`.
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ "repo_base": "https://hg.mozilla.org",
+ "build_resources_path": "%(upload_path)s/build_resources.json",
+ "nightly_promotion_branches": ["mozilla-central", "mozilla-aurora"],
+ # try will overwrite these
+ "clone_with_purge": False,
+ "clone_by_revision": False,
+ "virtualenv_modules": [
+ "requests==2.8.1",
+ ],
+ "virtualenv_path": "venv",
+ },
+ "ConfigClass": BuildingConfig,
+ }
+ super(FxDesktopBuild, self).__init__(**buildscript_kwargs)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(FxDesktopBuild, self).query_abs_dirs()
+
+ dirs = {
+ # BuildFactories in factory.py refer to a 'build' dir on the slave.
+ # This contains all the source code/objdir to compile. However,
+ # there is already a build dir in mozharness for every mh run. The
+ # 'build' that factory refers to I named: 'src' so
+ # there is a seperation in mh. for example, rather than having
+ # '{mozharness_repo}/build/build/', I have '{
+ # mozharness_repo}/build/src/'
+ "abs_obj_dir": os.path.join(abs_dirs["abs_work_dir"], self._query_objdir()),
+ "upload_path": self.config["upload_env"]["UPLOAD_PATH"],
+ }
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # Actions {{{2
+
+ @script.PreScriptRun
+ def suppress_windows_modal_dialogs(self, *args, **kwargs):
+ if self._is_windows():
+ # Suppress Windows modal dialogs to avoid hangs
+ import ctypes
+
+ ctypes.windll.kernel32.SetErrorMode(0x8001)
+
+
+if __name__ == "__main__":
+ fx_desktop_build = FxDesktopBuild()
+ fx_desktop_build.run_and_exit()
diff --git a/testing/mozharness/scripts/l10n_bumper.py b/testing/mozharness/scripts/l10n_bumper.py
new file mode 100755
index 0000000000..ed8baaae1e
--- /dev/null
+++ b/testing/mozharness/scripts/l10n_bumper.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" l10n_bumper.py
+
+ Updates a gecko repo with up to date changesets from l10n.mozilla.org.
+
+ Specifically, it updates l10n-changesets.json which is used by mobile releases.
+
+ This is to allow for `mach taskgraph` to reference specific l10n revisions
+ without having to resort to task.extra or commandline base64 json hacks.
+"""
+from __future__ import absolute_import
+import codecs
+import os
+import pprint
+import sys
+import time
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import HgErrorList
+from mozharness.base.vcs.vcsbase import VCSScript
+from mozharness.base.log import FATAL
+
+
+class L10nBumper(VCSScript):
+ config_options = [
+ [
+ [
+ "--ignore-closed-tree",
+ ],
+ {
+ "action": "store_true",
+ "dest": "ignore_closed_tree",
+ "default": False,
+ "help": "Bump l10n changesets on a closed tree.",
+ },
+ ],
+ [
+ [
+ "--build",
+ ],
+ {
+ "action": "store_false",
+ "dest": "dontbuild",
+ "default": True,
+ "help": "Trigger new builds on push.",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ super(L10nBumper, self).__init__(
+ all_actions=[
+ "clobber",
+ "check-treestatus",
+ "checkout-gecko",
+ "bump-changesets",
+ "push",
+ "push-loop",
+ ],
+ default_actions=[
+ "push-loop",
+ ],
+ require_config_file=require_config_file,
+ config_options=self.config_options,
+ # Default config options
+ config={
+ "treestatus_base_url": "https://treestatus.mozilla-releng.net",
+ "log_max_rotate": 99,
+ },
+ )
+
+ # Helper methods {{{1
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(L10nBumper, self).query_abs_dirs()
+
+ abs_dirs.update(
+ {
+ "gecko_local_dir": os.path.join(
+ abs_dirs["abs_work_dir"],
+ self.config.get(
+ "gecko_local_dir",
+ os.path.basename(self.config["gecko_pull_url"]),
+ ),
+ ),
+ }
+ )
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def hg_commit(self, path, repo_path, message):
+ """
+ Commits changes in repo_path, with specified user and commit message
+ """
+ user = self.config["hg_user"]
+ hg = self.query_exe("hg", return_type="list")
+ env = self.query_env(partial_env={"LANG": "en_US.UTF-8"})
+ cmd = hg + ["add", path]
+ self.run_command(cmd, cwd=repo_path, env=env)
+ cmd = hg + ["commit", "-u", user, "-m", message]
+ self.run_command(cmd, cwd=repo_path, env=env)
+
+ def hg_push(self, repo_path):
+ hg = self.query_exe("hg", return_type="list")
+ command = hg + [
+ "push",
+ "-e",
+ "ssh -oIdentityFile=%s -l %s"
+ % (
+ self.config["ssh_key"],
+ self.config["ssh_user"],
+ ),
+ "-r",
+ ".",
+ self.config["gecko_push_url"],
+ ]
+ status = self.run_command(command, cwd=repo_path, error_list=HgErrorList)
+ if status != 0:
+ # We failed; get back to a known state so we can either retry
+ # or fail out and continue later.
+ self.run_command(
+ hg
+ + ["--config", "extensions.mq=", "strip", "--no-backup", "outgoing()"],
+ cwd=repo_path,
+ )
+ self.run_command(hg + ["up", "-C"], cwd=repo_path)
+ self.run_command(
+ hg + ["--config", "extensions.purge=", "purge", "--all"], cwd=repo_path
+ )
+ return False
+ return True
+
+ def _read_json(self, path):
+ contents = self.read_from_file(path)
+ try:
+ json_contents = json.loads(contents)
+ return json_contents
+ except ValueError:
+ self.error("%s is invalid json!" % path)
+
+ def _read_version(self, path):
+ contents = self.read_from_file(path).split("\n")[0]
+ return contents.split(".")
+
+ def _build_locale_map(self, old_contents, new_contents):
+ locale_map = {}
+ for key in old_contents:
+ if key not in new_contents:
+ locale_map[key] = "removed"
+ for k, v in new_contents.items():
+ if old_contents.get(k, {}).get("revision") != v["revision"]:
+ locale_map[k] = v["revision"]
+ elif old_contents.get(k, {}).get("platforms") != v["platforms"]:
+ locale_map[k] = v["platforms"]
+ return locale_map
+
+ def _build_platform_dict(self, bump_config):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ platform_dict = {}
+ ignore_config = bump_config.get("ignore_config", {})
+ for platform_config in bump_config["platform_configs"]:
+ path = os.path.join(repo_path, platform_config["path"])
+ self.info(
+ "Reading %s for %s locales..." % (path, platform_config["platforms"])
+ )
+ contents = self.read_from_file(path)
+ for locale in contents.splitlines():
+ # locale is 1st word in line in shipped-locales
+ if platform_config.get("format") == "shipped-locales":
+ locale = locale.split(" ")[0]
+ existing_platforms = set(
+ platform_dict.get(locale, {}).get("platforms", [])
+ )
+ platforms = set(platform_config["platforms"])
+ ignore_platforms = set(ignore_config.get(locale, []))
+ platforms = (platforms | existing_platforms) - ignore_platforms
+ platform_dict[locale] = {"platforms": sorted(list(platforms))}
+ self.info("Built platform_dict:\n%s" % pprint.pformat(platform_dict))
+ return platform_dict
+
+ def _build_revision_dict(self, bump_config, version_list):
+ self.info("Building revision dict...")
+ platform_dict = self._build_platform_dict(bump_config)
+ revision_dict = {}
+ if bump_config.get("revision_url"):
+ repl_dict = {
+ "MAJOR_VERSION": version_list[0],
+ "COMBINED_MAJOR_VERSION": str(
+ int(version_list[0]) + int(version_list[1])
+ ),
+ }
+
+ url = bump_config["revision_url"] % repl_dict
+ path = self.download_file(url, error_level=FATAL)
+ revision_info = self.read_from_file(path)
+ self.info("Got %s" % revision_info)
+ for line in revision_info.splitlines():
+ locale, revision = line.split(" ")
+ if locale in platform_dict:
+ revision_dict[locale] = platform_dict[locale]
+ revision_dict[locale]["revision"] = revision
+ else:
+ for k, v in platform_dict.items():
+ v["revision"] = "default"
+ revision_dict[k] = v
+ self.info("revision_dict:\n%s" % pprint.pformat(revision_dict))
+ return revision_dict
+
+ def build_commit_message(self, name, locale_map):
+ comments = ""
+ approval_str = "r=release a=l10n-bump"
+ for locale, revision in sorted(locale_map.items()):
+ comments += "%s -> %s\n" % (locale, revision)
+ if self.config["dontbuild"]:
+ approval_str += " DONTBUILD"
+ if self.config["ignore_closed_tree"]:
+ approval_str += " CLOSED TREE"
+ message = "no bug - Bumping %s %s\n\n" % (name, approval_str)
+ message += comments
+ message = message.encode("utf-8")
+ return message
+
+ def query_treestatus(self):
+ "Return True if we can land based on treestatus"
+ c = self.config
+ dirs = self.query_abs_dirs()
+ tree = c.get(
+ "treestatus_tree", os.path.basename(c["gecko_pull_url"].rstrip("/"))
+ )
+ treestatus_url = "%s/trees/%s" % (c["treestatus_base_url"], tree)
+ treestatus_json = os.path.join(dirs["abs_work_dir"], "treestatus.json")
+ if not os.path.exists(dirs["abs_work_dir"]):
+ self.mkdir_p(dirs["abs_work_dir"])
+ self.rmtree(treestatus_json)
+
+ self.run_command(
+ ["curl", "--retry", "4", "-o", treestatus_json, treestatus_url],
+ throw_exception=True,
+ )
+
+ treestatus = self._read_json(treestatus_json)
+ if treestatus["result"]["status"] != "closed":
+ self.info(
+ "treestatus is %s - assuming we can land"
+ % repr(treestatus["result"]["status"])
+ )
+ return True
+
+ return False
+
+ # Actions {{{1
+ def check_treestatus(self):
+ if not self.config["ignore_closed_tree"] and not self.query_treestatus():
+ self.info("breaking early since treestatus is closed")
+ sys.exit(0)
+
+ def checkout_gecko(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+ dest = dirs["gecko_local_dir"]
+ repos = [
+ {
+ "repo": c["gecko_pull_url"],
+ "tag": c.get("gecko_tag", "default"),
+ "dest": dest,
+ "vcs": "hg",
+ }
+ ]
+ self.vcs_checkout_repos(repos)
+
+ def bump_changesets(self):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ version_path = os.path.join(repo_path, self.config["version_path"])
+ changes = False
+ version_list = self._read_version(version_path)
+ for bump_config in self.config["bump_configs"]:
+ path = os.path.join(repo_path, bump_config["path"])
+ # For now, assume format == 'json'. When we add desktop support,
+ # we may need to add flatfile support
+ if os.path.exists(path):
+ old_contents = self._read_json(path)
+ else:
+ old_contents = {}
+
+ new_contents = self._build_revision_dict(bump_config, version_list)
+
+ if new_contents == old_contents:
+ continue
+ # super basic sanity check
+ if not isinstance(new_contents, dict) or len(new_contents) < 5:
+ self.error(
+ "Cowardly refusing to land a broken-seeming changesets file!"
+ )
+ continue
+
+ # Write to disk
+ content_string = json.dumps(
+ new_contents,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ )
+ fh = codecs.open(path, encoding="utf-8", mode="w+")
+ fh.write(content_string + "\n")
+ fh.close()
+
+ locale_map = self._build_locale_map(old_contents, new_contents)
+
+ # Commit
+ message = self.build_commit_message(bump_config["name"], locale_map)
+ self.hg_commit(path, repo_path, message)
+ changes = True
+ return changes
+
+ def push(self):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ return self.hg_push(repo_path)
+
+ def push_loop(self):
+ max_retries = 5
+ for _ in range(max_retries):
+ changed = False
+ if not self.config["ignore_closed_tree"] and not self.query_treestatus():
+ # Tree is closed; exit early to avoid a bunch of wasted time
+ self.info("breaking early since treestatus is closed")
+ break
+
+ self.checkout_gecko()
+ if self.bump_changesets():
+ changed = True
+
+ if not changed:
+ # Nothing changed, we're all done
+ self.info("No changes - all done")
+ break
+
+ if self.push():
+ # We did it! Hurray!
+ self.info("Great success!")
+ break
+ # If we're here, then the push failed. It also stripped any
+ # outgoing commits, so we should be in a pristine state again
+ # Empty our local cache of manifests so they get loaded again next
+ # time through this loop. This makes sure we get fresh upstream
+ # manifests, and avoids problems like bug 979080
+ self.device_manifests = {}
+
+ # Sleep before trying again
+ self.info("Sleeping 60 before trying again")
+ time.sleep(60)
+ else:
+ self.fatal("Didn't complete successfully (hit max_retries)")
+
+ # touch status file for nagios
+ dirs = self.query_abs_dirs()
+ status_path = os.path.join(dirs["base_work_dir"], self.config["status_path"])
+ self._touch_file(status_path)
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ bumper = L10nBumper()
+ bumper.run_and_exit()
diff --git a/testing/mozharness/scripts/marionette.py b/testing/mozharness/scripts/marionette.py
new file mode 100755
index 0000000000..9524983d1d
--- /dev/null
+++ b/testing/mozharness/scripts/marionette.py
@@ -0,0 +1,468 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from __future__ import absolute_import
+import copy
+import json
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import BaseErrorList, TarErrorList
+from mozharness.base.log import INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.transfer import TransferMixin
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.testing.errors import LogcatErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList
+
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+
+
+class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ ["--application"],
+ {
+ "action": "store",
+ "dest": "application",
+ "default": None,
+ "help": "application name of binary",
+ },
+ ],
+ [
+ ["--app-arg"],
+ {
+ "action": "store",
+ "dest": "app_arg",
+ "default": None,
+ "help": "Optional command-line argument to pass to the browser",
+ },
+ ],
+ [
+ ["--marionette-address"],
+ {
+ "action": "store",
+ "dest": "marionette_address",
+ "default": None,
+ "help": "The host:port of the Marionette server running inside Gecko. "
+ "Unused for emulator testing",
+ },
+ ],
+ [
+ ["--emulator"],
+ {
+ "action": "store",
+ "type": "choice",
+ "choices": ["arm", "x86"],
+ "dest": "emulator",
+ "default": None,
+ "help": "Use an emulator for testing",
+ },
+ ],
+ [
+ ["--test-manifest"],
+ {
+ "action": "store",
+ "dest": "test_manifest",
+ "default": "unit-tests.ini",
+ "help": "Path to test manifest to run relative to the Marionette "
+ "tests directory",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--headless"],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ ["--headless-width"],
+ {
+ "action": "store",
+ "dest": "headless_width",
+ "default": "1600",
+ "help": "Specify headless virtual screen width (default: 1600).",
+ },
+ ],
+ [
+ ["--headless-height"],
+ {
+ "action": "store",
+ "dest": "headless_height",
+ "default": "1200",
+ "help": "Specify headless virtual screen height (default: 1200).",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.", # NOQA: E501
+ },
+ ],
+ [
+ ["--disable-actors"],
+ {
+ "action": "store_true",
+ "dest": "disable_actors",
+ "default": False,
+ "help": "Disable the usage of JSWindowActors in Marionette.",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Enable the WebRender compositor in Gecko.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ repos = []
+
+ def __init__(self, require_config_file=False):
+ super(MarionetteTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "pull",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ default_actions=[
+ "clobber",
+ "pull",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+
+ self.test_suite = self._get_test_suite(c.get("emulator"))
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("{} is not defined in the config!".format(self.test_suite))
+
+ if c.get("structured_output"):
+ self.parser_class = StructuredOutputParser
+ else:
+ self.parser_class = TestSummaryOutputParserHelper
+
+ def _pre_config_lock(self, rw_config):
+ super(MarionetteTest, self)._pre_config_lock(rw_config)
+ if not self.config.get("emulator") and not self.config.get(
+ "marionette_address"
+ ):
+ self.fatal(
+ "You need to specify a --marionette-address for non-emulator tests! "
+ "(Try --marionette-address localhost:2828 )"
+ )
+
+ def _query_tests_dir(self):
+ dirs = self.query_abs_dirs()
+ test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
+
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(MarionetteTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_marionette_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "marionette", "harness", "marionette_harness"
+ )
+ dirs["abs_marionette_tests_dir"] = os.path.join(
+ dirs["abs_test_install_dir"],
+ "marionette",
+ "tests",
+ "testing",
+ "marionette",
+ "harness",
+ "marionette_harness",
+ "tests",
+ )
+ dirs["abs_gecko_dir"] = os.path.join(abs_dirs["abs_work_dir"], "gecko")
+ dirs["abs_emulator_dir"] = os.path.join(abs_dirs["abs_work_dir"], "emulator")
+
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ @PreScriptAction("create-virtualenv")
+ def _configure_marionette_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+ if not os.path.isfile(requirements):
+ self.fatal(
+ "Could not find marionette requirements file: {}".format(requirements)
+ )
+
+ # marionette_requirements.txt must use the legacy resolver until bug 1684969 is resolved.
+ self.register_virtualenv_module(
+ requirements=[requirements], two_pass=True, legacy_resolver=True
+ )
+
+ def _get_test_suite(self, is_emulator):
+ """
+ Determine which in tree options group to use and return the
+ appropriate key.
+ """
+ platform = "emulator" if is_emulator else "desktop"
+ # Currently running marionette on an emulator means webapi
+ # tests. This method will need to change if this does.
+ testsuite = "webapi" if is_emulator else "marionette"
+ return "{}_{}".format(testsuite, platform)
+
+ def download_and_extract(self):
+ super(MarionetteTest, self).download_and_extract()
+
+ if self.config.get("emulator"):
+ dirs = self.query_abs_dirs()
+
+ self.mkdir_p(dirs["abs_emulator_dir"])
+ tar = self.query_exe("tar", return_type="list")
+ self.run_command(
+ tar + ["zxf", self.installer_path],
+ cwd=dirs["abs_emulator_dir"],
+ error_list=TarErrorList,
+ halt_on_failure=True,
+ fatal_exit_code=3,
+ )
+
+ def install(self):
+ if self.config.get("emulator"):
+ self.info("Emulator tests; skipping.")
+ else:
+ super(MarionetteTest, self).install()
+
+ def run_tests(self):
+ """
+ Run the Marionette tests
+ """
+ dirs = self.query_abs_dirs()
+
+ raw_log_file = os.path.join(dirs["abs_blob_upload_dir"], "marionette_raw.log")
+ error_summary_file = os.path.join(
+ dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
+ )
+ html_report_file = os.path.join(dirs["abs_blob_upload_dir"], "report.html")
+
+ config_fmt_args = {
+ # emulator builds require a longer timeout
+ "timeout": 60000 if self.config.get("emulator") else 10000,
+ "profile": os.path.join(dirs["abs_work_dir"], "profile"),
+ "xml_output": os.path.join(dirs["abs_work_dir"], "output.xml"),
+ "html_output": os.path.join(dirs["abs_blob_upload_dir"], "output.html"),
+ "logcat_dir": dirs["abs_work_dir"],
+ "emulator": "arm",
+ "symbols_path": self.symbols_path,
+ "binary": self.binary_path,
+ "address": self.config.get("marionette_address"),
+ "raw_log_file": raw_log_file,
+ "error_summary_file": error_summary_file,
+ "html_report_file": html_report_file,
+ "gecko_log": dirs["abs_blob_upload_dir"],
+ "this_chunk": self.config.get("this_chunk", 1),
+ "total_chunks": self.config.get("total_chunks", 1),
+ }
+
+ self.info("The emulator type: %s" % config_fmt_args["emulator"])
+ # build the marionette command arguments
+ python = self.query_python_path("python")
+
+ cmd = [python, "-u", os.path.join(dirs["abs_marionette_dir"], "runtests.py")]
+
+ manifest = os.path.join(
+ dirs["abs_marionette_tests_dir"], self.config["test_manifest"]
+ )
+
+ if self.config.get("app_arg"):
+ config_fmt_args["app_arg"] = self.config["app_arg"]
+
+ if self.config["disable_actors"]:
+ cmd.append("--disable-actors")
+
+ if self.config["enable_webrender"]:
+ cmd.append("--enable-webrender")
+
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+
+ cmd.append("--gecko-log=-")
+
+ if self.config.get("structured_output"):
+ cmd.append("--log-raw=-")
+
+ for arg in self.config["suite_definitions"][self.test_suite]["options"]:
+ cmd.append(arg % config_fmt_args)
+
+ if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
+ # Make sure that the logging directory exists
+ self.fatal("Could not create blobber upload directory")
+
+ test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ if test_paths and "marionette" in test_paths:
+ paths = [
+ os.path.join(dirs["abs_test_install_dir"], "marionette", "tests", p)
+ for p in test_paths["marionette"]
+ ]
+ cmd.extend(paths)
+ else:
+ cmd.append(manifest)
+
+ try_options, try_tests = self.try_args("marionette")
+ cmd.extend(self.query_tests_args(try_tests, str_format_values=config_fmt_args))
+
+ env = {}
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ if self.config["headless"]:
+ env["MOZ_HEADLESS"] = "1"
+ env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
+ env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
+
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ env = self.query_env(partial_env=env)
+
+ try:
+ cwd = self._query_tests_dir()
+ except Exception as e:
+ self.fatal(
+ "Don't know how to run --test-suite '{0}': {1}!".format(
+ self.test_suite, e
+ )
+ )
+
+ marionette_parser = self.parser_class(
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=BaseErrorList + HarnessErrorList,
+ strict=False,
+ )
+ return_code = self.run_command(
+ cmd, cwd=cwd, output_timeout=1000, output_parser=marionette_parser, env=env
+ )
+ level = INFO
+ tbpl_status, log_level, summary = marionette_parser.evaluate_parser(
+ return_code=return_code
+ )
+ marionette_parser.append_tinderboxprint_line("marionette")
+
+ qemu = os.path.join(dirs["abs_work_dir"], "qemu.log")
+ if os.path.isfile(qemu):
+ self.copyfile(qemu, os.path.join(dirs["abs_blob_upload_dir"], "qemu.log"))
+
+ # dump logcat output if there were failures
+ if self.config.get("emulator"):
+ if (
+ marionette_parser.failed != "0"
+ or "T-FAIL" in marionette_parser.tsummary
+ ):
+ logcat = os.path.join(dirs["abs_work_dir"], "emulator-5554.log")
+ if os.access(logcat, os.F_OK):
+ self.info("dumping logcat")
+ self.run_command(["cat", logcat], error_list=LogcatErrorList)
+ else:
+ self.info("no logcat file found")
+ else:
+ # .. or gecko.log if it exists
+ gecko_log = os.path.join(self.config["base_work_dir"], "gecko.log")
+ if os.access(gecko_log, os.F_OK):
+ self.info("dumping gecko.log")
+ self.run_command(["cat", gecko_log])
+ self.rmtree(gecko_log)
+ else:
+ self.info("gecko.log not found")
+
+ marionette_parser.print_summary("marionette")
+
+ self.log(
+ "Marionette exited with return code %s: %s" % (return_code, tbpl_status),
+ level=level,
+ )
+ self.record_status(tbpl_status)
+
+
+if __name__ == "__main__":
+ marionetteTest = MarionetteTest()
+ marionetteTest.run_and_exit()
diff --git a/testing/mozharness/scripts/merge_day/gecko_migration.py b/testing/mozharness/scripts/merge_day/gecko_migration.py
new file mode 100755
index 0000000000..2c92ccb63d
--- /dev/null
+++ b/testing/mozharness/scripts/merge_day/gecko_migration.py
@@ -0,0 +1,566 @@
+#!/usr/bin/env python
+# lint_ignore=E501
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" gecko_migration.py
+
+Merge day script for gecko (mozilla-central -> mozilla-beta,
+mozilla-beta -> mozilla-release).
+
+Ported largely from
+http://hg.mozilla.org/build/tools/file/084bc4e2fc76/release/beta2release.py
+and
+http://hg.mozilla.org/build/tools/file/084bc4e2fc76/release/merge_helper.py
+"""
+
+from __future__ import absolute_import
+import os
+import pprint
+import subprocess
+import sys
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.errors import HgErrorList
+from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.repo_manipulation import MercurialRepoManipulationMixin
+
+VALID_MIGRATION_BEHAVIORS = (
+ "beta_to_release",
+ "central_to_beta",
+ "release_to_esr",
+ "bump_second_digit",
+ "bump_and_tag_central",
+)
+
+
+# GeckoMigration {{{1
+class GeckoMigration(
+ MercurialScript, VirtualenvMixin, AutomationMixin, MercurialRepoManipulationMixin
+):
+ config_options = [
+ [
+ [
+ "--hg-user",
+ ],
+ {
+ "action": "store",
+ "dest": "hg_user",
+ "type": "string",
+ "default": "ffxbld <release@mozilla.com>",
+ "help": "Specify what user to use to commit to hg.",
+ },
+ ],
+ [
+ [
+ "--ssh-user",
+ ],
+ {
+ "action": "store",
+ "dest": "ssh_user",
+ "type": "string",
+ "default": "ffxbld-merge",
+ "help": "The user to push to hg.mozilla.org as.",
+ },
+ ],
+ [
+ [
+ "--remove-locale",
+ ],
+ {
+ "action": "extend",
+ "dest": "remove_locales",
+ "type": "string",
+ "help": "Comma separated list of locales to remove from the 'to' repo.",
+ },
+ ],
+ ]
+ gecko_repos = None
+
+ def __init__(self, require_config_file=True):
+ super(GeckoMigration, self).__init__(
+ config_options=virtualenv_config_options + self.config_options,
+ all_actions=[
+ "clobber",
+ "create-virtualenv",
+ "clean-repos",
+ "pull",
+ "set_push_to_ssh",
+ "migrate",
+ "bump_second_digit",
+ "bump_and_tag_central",
+ "commit-changes",
+ "push",
+ ],
+ default_actions=[
+ "clean-repos",
+ "pull",
+ "set_push_to_ssh",
+ "migrate",
+ ],
+ require_config_file=require_config_file,
+ )
+ self.run_sanity_check()
+
+ # Helper methods {{{1
+ def run_sanity_check(self):
+ """Verify the configs look sane before proceeding."""
+ message = ""
+ if self.config["migration_behavior"] not in VALID_MIGRATION_BEHAVIORS:
+ message += "%s must be one of %s!\n" % (
+ self.config["migration_behavior"],
+ VALID_MIGRATION_BEHAVIORS,
+ )
+ if self.config["migration_behavior"] == "beta_to_release":
+ if (
+ self.config.get("require_remove_locales")
+ and not self.config.get("remove_locales")
+ and "migrate" in self.actions
+ ):
+ message += "You must specify --remove-locale!\n"
+ else:
+ if self.config.get("require_remove_locales") or self.config.get(
+ "remove_locales"
+ ):
+ self.warning(
+ "--remove-locale isn't valid unless you're using beta_to_release "
+ "migration_behavior!\n"
+ )
+ if message:
+ self.fatal(message)
+
+ def query_abs_dirs(self):
+ """Allow for abs_from_dir and abs_to_dir"""
+ if self.abs_dirs:
+ return self.abs_dirs
+ dirs = super(GeckoMigration, self).query_abs_dirs()
+ for k in ("from", "to"):
+ url = self.config.get("%s_repo_url" % k)
+ if url:
+ dir_name = self.get_filename_from_url(url)
+ self.info("adding %s" % dir_name)
+ self.abs_dirs["abs_%s_dir" % k] = os.path.join(
+ dirs["abs_work_dir"], dir_name
+ )
+ return self.abs_dirs
+
+ def query_repos(self):
+ """Build a list of repos to clone."""
+ if self.gecko_repos:
+ return self.gecko_repos
+ self.info("Building gecko_repos list...")
+ dirs = self.query_abs_dirs()
+ self.gecko_repos = []
+ for k in ("from", "to"):
+ repo_key = "%s_repo_url" % k
+ url = self.config.get(repo_key)
+ if url:
+ self.gecko_repos.append(
+ {
+ "repo": url,
+ "branch": self.config.get("%s_repo_branch" % (k,), "default"),
+ "dest": dirs["abs_%s_dir" % k],
+ "vcs": "hg",
+ # "hg" vcs uses robustcheckout extension requires the use of a share
+ # but having a share breaks migration logic when merging repos.
+ # Solution: tell hg vcs to create a unique share directory for each
+ # gecko repo. see mozharness/base/vcs/mercurial.py for implementation
+ "use_vcs_unique_share": True,
+ }
+ )
+ else:
+ self.warning("Skipping %s" % repo_key)
+ self.info(pprint.pformat(self.gecko_repos))
+ return self.gecko_repos
+
+ def query_commit_dirs(self):
+ dirs = self.query_abs_dirs()
+ commit_dirs = [dirs["abs_to_dir"]]
+ return commit_dirs
+
+ def query_commit_message(self):
+ return "Update configs. IGNORE BROKEN CHANGESETS CLOSED TREE NO BUG a=release ba=release"
+
+ def query_push_dirs(self):
+ dirs = self.query_abs_dirs()
+ return dirs.get("abs_from_dir"), dirs.get("abs_to_dir")
+
+ def query_push_args(self, cwd):
+ if (
+ cwd == self.query_abs_dirs()["abs_to_dir"]
+ and self.config["migration_behavior"] == "beta_to_release"
+ ):
+ return ["--new-branch", "-r", "."]
+ else:
+ return ["-r", "."]
+
+ def set_push_to_ssh(self):
+ push_dirs = [d for d in self.query_push_dirs() if d is not None]
+ for cwd in push_dirs:
+ repo_url = self.read_repo_hg_rc(cwd).get("paths", "default")
+ username = self.config.get("ssh_user", "")
+ # Add a trailing @ to the username if it exists, otherwise it gets
+ # mushed up with the hostname.
+ if username:
+ username += "@"
+ push_dest = repo_url.replace("https://", "ssh://" + username)
+
+ if not push_dest.startswith("ssh://"):
+ raise Exception(
+ 'Warning: path "{}" is not supported. Protocol must be ssh'
+ )
+
+ self.edit_repo_hg_rc(cwd, "paths", "default-push", push_dest)
+
+ def query_from_revision(self):
+ """Shortcut to get the revision for the from repo"""
+ dirs = self.query_abs_dirs()
+ return self.query_hg_revision(dirs["abs_from_dir"])
+
+ def query_to_revision(self):
+ """Shortcut to get the revision for the to repo"""
+ dirs = self.query_abs_dirs()
+ return self.query_hg_revision(dirs["abs_to_dir"])
+
+ def hg_merge_via_debugsetparents(
+ self, cwd, old_head, new_head, preserve_tags=True, user=None
+ ):
+ """Merge 2 heads avoiding non-fastforward commits"""
+ hg = self.query_exe("hg", return_type="list")
+ cmd = hg + ["debugsetparents", new_head, old_head]
+ self.run_command(cmd, cwd=cwd, error_list=HgErrorList, halt_on_failure=True)
+ self.hg_commit(
+ cwd,
+ message="Merge old head via |hg debugsetparents %s %s|. "
+ "CLOSED TREE DONTBUILD a=release" % (new_head, old_head),
+ user=user,
+ )
+ if preserve_tags:
+ # I don't know how to do this elegantly.
+ # I'm reverting .hgtags to old_head, then appending the new tags
+ # from new_head to .hgtags, and hoping nothing goes wrong.
+ # I'd rather not write patch files from scratch, so this seems
+ # like a slightly more complex but less objectionable method?
+ self.info("Trying to preserve tags from before debugsetparents...")
+ dirs = self.query_abs_dirs()
+ patch_file = os.path.join(dirs["abs_work_dir"], "patch_file")
+ self.run_command(
+ subprocess.list2cmdline(
+ hg + ["diff", "-r", old_head, ".hgtags", "-U9", ">", patch_file]
+ ),
+ cwd=cwd,
+ )
+ self.run_command(
+ ["patch", "-R", "-p1", "-i", patch_file],
+ cwd=cwd,
+ halt_on_failure=True,
+ )
+ tag_diff = self.read_from_file(patch_file)
+ with self.opened(os.path.join(cwd, ".hgtags"), open_mode="a") as (fh, err):
+ if err:
+ self.fatal("Can't append to .hgtags!")
+ for n, line in enumerate(tag_diff.splitlines()):
+ # The first 4 lines of a patch are headers, so we ignore them.
+ if n < 5:
+ continue
+ # Even after that, the only lines we really care about are
+ # additions to the file.
+ # TODO: why do we only care about additions? I couldn't
+ # figure that out by reading this code.
+ if not line.startswith("+"):
+ continue
+ line = line.replace("+", "")
+ (changeset, tag) = line.split(" ")
+ if len(changeset) != 40:
+ continue
+ fh.write("%s\n" % line)
+ out = self.get_output_from_command(["hg", "status", ".hgtags"], cwd=cwd)
+ if out:
+ self.hg_commit(
+ cwd,
+ message="Preserve old tags after debugsetparents. "
+ "CLOSED TREE DONTBUILD a=release",
+ user=user,
+ )
+ else:
+ self.info(".hgtags file is identical, no need to commit")
+
+ def remove_locales(self, file_name, locales):
+ """Remove locales from shipped-locales (m-r only)"""
+ contents = self.read_from_file(file_name)
+ new_contents = ""
+ for line in contents.splitlines():
+ locale = line.split()[0]
+ if locale not in locales:
+ new_contents += "%s\n" % line
+ else:
+ self.info("Removed locale: %s" % locale)
+ self.write_to_file(file_name, new_contents)
+
+ def touch_clobber_file(self, cwd):
+ clobber_file = os.path.join(cwd, "CLOBBER")
+ contents = self.read_from_file(clobber_file)
+ new_contents = ""
+ for line in contents.splitlines():
+ line = line.strip()
+ if line.startswith("#") or line == "":
+ new_contents += "%s\n" % line
+ new_contents += "Merge day clobber"
+ self.write_to_file(clobber_file, new_contents)
+
+ def bump_version(
+ self,
+ cwd,
+ curr_version,
+ next_version,
+ curr_suffix,
+ next_suffix,
+ bump_major=False,
+ use_config_suffix=False,
+ ):
+ """Bump versions (m-c, m-b).
+
+ At some point we may want to unhardcode these filenames into config
+ """
+ curr_weave_version = str(int(curr_version) + 2)
+ next_weave_version = str(int(curr_weave_version) + 1)
+ for f in self.config["version_files"]:
+ from_ = "%s.0%s" % (curr_version, curr_suffix)
+ if use_config_suffix:
+ to = "%s.0%s%s" % (next_version, next_suffix, f["suffix"])
+ else:
+ to = "%s.0%s" % (next_version, next_suffix)
+ self.replace(os.path.join(cwd, f["file"]), from_, to)
+
+ # only applicable for m-c
+ if bump_major:
+ self.replace(
+ os.path.join(cwd, "xpcom/components/Module.h"),
+ "static const unsigned int kVersion = %s;" % curr_version,
+ "static const unsigned int kVersion = %s;" % next_version,
+ )
+ self.replace(
+ os.path.join(cwd, "services/sync/modules/constants.js"),
+ 'WEAVE_VERSION: "1.%s.0"' % curr_weave_version,
+ 'WEAVE_VERSION: "1.%s.0"' % next_weave_version,
+ )
+
+ # Branch-specific workflow helper methods {{{1
+ def bump_and_tag_central(self):
+ """No migrating. Just tag, bump version, and clobber mozilla-central.
+
+ Like bump_esr logic, to_dir is the target repo. In this case: mozilla-central. It's
+ needed due to the way this script is designed. There is no "from_dir" that we are
+ migrating from.
+ """
+ dirs = self.query_abs_dirs()
+ curr_mc_version = self.get_version(dirs["abs_to_dir"])[0]
+ next_mc_version = str(int(curr_mc_version) + 1)
+ to_fx_major_version = self.get_version(dirs["abs_to_dir"])[0]
+ end_tag = self.config["end_tag"] % {"major_version": to_fx_major_version}
+ base_to_rev = self.query_to_revision()
+
+ # tag m-c again since there are csets between tagging during m-c->m-b merge
+ # e.g.
+ # m-c tag during m-c->m-b migration: FIREFOX_BETA_60_BASE
+ # m-c tag we are doing in this method now: FIREFOX_NIGHTLY_60_END
+ # context: https://bugzilla.mozilla.org/show_bug.cgi?id=1431363#c14
+ self.hg_tag(
+ dirs["abs_to_dir"],
+ end_tag,
+ user=self.config["hg_user"],
+ revision=base_to_rev,
+ force=True,
+ )
+ self.bump_version(
+ dirs["abs_to_dir"],
+ curr_mc_version,
+ next_mc_version,
+ "a1",
+ "a1",
+ bump_major=True,
+ use_config_suffix=False,
+ )
+ # touch clobber files
+ self.touch_clobber_file(dirs["abs_to_dir"])
+
+ def central_to_beta(self, end_tag):
+ """mozilla-central -> mozilla-beta behavior.
+
+ We could have all of these individually toggled by flags, but
+ by separating into workflow methods we can be more precise about
+ what happens in each workflow, while allowing for things like
+ staging beta user repo migrations.
+ """
+ dirs = self.query_abs_dirs()
+ next_mb_version = self.get_version(dirs["abs_to_dir"])[0]
+ self.bump_version(
+ dirs["abs_to_dir"],
+ next_mb_version,
+ next_mb_version,
+ "a1",
+ "",
+ use_config_suffix=True,
+ )
+ self.apply_replacements()
+ # touch clobber files
+ self.touch_clobber_file(dirs["abs_to_dir"])
+
+ def beta_to_release(self, *args, **kwargs):
+ """mozilla-beta -> mozilla-release behavior.
+
+ We could have all of these individually toggled by flags, but
+ by separating into workflow methods we can be more precise about
+ what happens in each workflow, while allowing for things like
+ staging beta user repo migrations.
+ """
+ dirs = self.query_abs_dirs()
+ # Reset display_version.txt
+ for f in self.config["copy_files"]:
+ self.copyfile(
+ os.path.join(dirs["abs_to_dir"], f["src"]),
+ os.path.join(dirs["abs_to_dir"], f["dst"]),
+ )
+
+ self.apply_replacements()
+ if self.config.get("remove_locales"):
+ self.remove_locales(
+ os.path.join(dirs["abs_to_dir"], "browser/locales/shipped-locales"),
+ self.config["remove_locales"],
+ )
+ self.touch_clobber_file(dirs["abs_to_dir"])
+
+ def release_to_esr(self, *args, **kwargs):
+ """ mozilla-release -> mozilla-esrNN behavior. """
+ dirs = self.query_abs_dirs()
+ self.apply_replacements()
+ self.touch_clobber_file(dirs["abs_to_dir"])
+ next_esr_version = self.get_version(dirs["abs_to_dir"])[0]
+ self.bump_version(
+ dirs["abs_to_dir"],
+ next_esr_version,
+ next_esr_version,
+ "",
+ "",
+ use_config_suffix=True,
+ )
+
+ def apply_replacements(self):
+ dirs = self.query_abs_dirs()
+ for f, from_, to in self.config["replacements"]:
+ self.replace(os.path.join(dirs["abs_to_dir"], f), from_, to)
+
+ def pull_from_repo(self, from_dir, to_dir, revision=None, branch=None):
+ """ Pull from one repo to another. """
+ hg = self.query_exe("hg", return_type="list")
+ cmd = hg + ["pull"]
+ if revision:
+ cmd.extend(["-r", revision])
+ cmd.append(from_dir)
+ self.run_command(
+ cmd,
+ cwd=to_dir,
+ error_list=HgErrorList,
+ halt_on_failure=True,
+ )
+ cmd = hg + ["update", "-C"]
+ if branch or revision:
+ cmd.extend(["-r", branch or revision])
+ self.run_command(
+ cmd,
+ cwd=to_dir,
+ error_list=HgErrorList,
+ halt_on_failure=True,
+ )
+
+ # Actions {{{1
+ def bump_second_digit(self, *args, **kwargs):
+ """Bump second digit.
+
+ ESR need only the second digit bumped as a part of merge day."""
+ dirs = self.query_abs_dirs()
+ version = self.get_version(dirs["abs_to_dir"])
+ curr_version = ".".join(version)
+ next_version = list(version)
+ # bump the second digit
+ next_version[1] = str(int(next_version[1]) + 1)
+ # Take major+minor and append '0' accordng to Firefox version schema.
+ # 52.0 will become 52.1.0, not 52.1
+ next_version = ".".join(next_version[:2] + ["0"])
+ for f in self.config["version_files"]:
+ self.replace(
+ os.path.join(dirs["abs_to_dir"], f["file"]),
+ curr_version,
+ next_version + f["suffix"],
+ )
+ self.touch_clobber_file(dirs["abs_to_dir"])
+
+ def pull(self):
+ """Clone the gecko repos"""
+ repos = self.query_repos()
+ super(GeckoMigration, self).pull(repos=repos)
+
+ def migrate(self):
+ """Perform the migration."""
+ dirs = self.query_abs_dirs()
+ from_fx_major_version = self.get_version(dirs["abs_from_dir"])[0]
+ to_fx_major_version = self.get_version(dirs["abs_to_dir"])[0]
+ base_from_rev = self.query_from_revision()
+ base_to_rev = self.query_to_revision()
+ base_tag = self.config["base_tag"] % {"major_version": from_fx_major_version}
+ self.hg_tag( # tag the base of the from repo
+ dirs["abs_from_dir"],
+ base_tag,
+ user=self.config["hg_user"],
+ revision=base_from_rev,
+ )
+ new_from_rev = self.query_from_revision()
+ self.info("New revision %s" % new_from_rev)
+ pull_revision = None
+ if not self.config.get("pull_all_branches"):
+ pull_revision = new_from_rev
+ self.pull_from_repo(
+ dirs["abs_from_dir"],
+ dirs["abs_to_dir"],
+ revision=pull_revision,
+ branch="default",
+ )
+ if self.config.get("requires_head_merge") is not False:
+ self.hg_merge_via_debugsetparents(
+ dirs["abs_to_dir"],
+ old_head=base_to_rev,
+ new_head=new_from_rev,
+ user=self.config["hg_user"],
+ )
+
+ end_tag = self.config.get("end_tag") # tag the end of the to repo
+ if end_tag:
+ end_tag = end_tag % {"major_version": to_fx_major_version}
+ self.hg_tag(
+ dirs["abs_to_dir"],
+ end_tag,
+ user=self.config["hg_user"],
+ revision=base_to_rev,
+ force=True,
+ )
+ # Call beta_to_release etc.
+ if not hasattr(self, self.config["migration_behavior"]):
+ self.fatal(
+ "Don't know how to proceed with migration_behavior %s !"
+ % self.config["migration_behavior"]
+ )
+ getattr(self, self.config["migration_behavior"])(end_tag=end_tag)
+ self.info(
+ "Verify the diff, and apply any manual changes, such as disabling features, "
+ "and --commit-changes"
+ )
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ GeckoMigration().run_and_exit()
diff --git a/testing/mozharness/scripts/multil10n.py b/testing/mozharness/scripts/multil10n.py
new file mode 100755
index 0000000000..d221ad5b3e
--- /dev/null
+++ b/testing/mozharness/scripts/multil10n.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""multil10n.py
+
+"""
+
+from __future__ import absolute_import
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.l10n.multi_locale_build import MultiLocaleBuild
+
+if __name__ == "__main__":
+ multi_locale_build = MultiLocaleBuild()
+ multi_locale_build.run_and_exit()
diff --git a/testing/mozharness/scripts/openh264_build.py b/testing/mozharness/scripts/openh264_build.py
new file mode 100755
index 0000000000..4fa5501a0a
--- /dev/null
+++ b/testing/mozharness/scripts/openh264_build.py
@@ -0,0 +1,490 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+from __future__ import absolute_import
+import sys
+import os
+import glob
+import re
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+# import the guts
+import mozharness
+from mozharness.base.vcs.vcsbase import VCSScript
+from mozharness.base.log import ERROR, DEBUG
+from mozharness.base.transfer import TransferMixin
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+
+class OpenH264Build(TransferMixin, VCSScript, TooltoolMixin):
+ all_actions = [
+ "clobber",
+ "get-tooltool",
+ "checkout-sources",
+ "build",
+ "test",
+ "package",
+ "dump-symbols",
+ ]
+
+ default_actions = [
+ "get-tooltool",
+ "checkout-sources",
+ "build",
+ "package",
+ "dump-symbols",
+ ]
+
+ config_options = [
+ [
+ ["--repo"],
+ {
+ "dest": "repo",
+ "help": "OpenH264 repository to use",
+ "default": "https://github.com/dminor/openh264.git",
+ },
+ ],
+ [
+ ["--rev"],
+ {"dest": "revision", "help": "revision to checkout", "default": "master"},
+ ],
+ [
+ ["--debug"],
+ {
+ "dest": "debug_build",
+ "action": "store_true",
+ "help": "Do a debug build",
+ },
+ ],
+ [
+ ["--arch"],
+ {
+ "dest": "arch",
+ "help": "Arch type to use (x64, x86, arm, or aarch64)",
+ },
+ ],
+ [
+ ["--os"],
+ {
+ "dest": "operating_system",
+ "help": "Specify the operating system to build for",
+ },
+ ],
+ [
+ ["--use-yasm"],
+ {
+ "dest": "use_yasm",
+ "help": "use yasm instead of nasm",
+ "action": "store_true",
+ "default": False,
+ },
+ ],
+ [
+ ["--avoid-avx2"],
+ {
+ "dest": "avoid_avx2",
+ "help": "Pass HAVE_AVX2='false' through to Make to support older nasm",
+ "action": "store_true",
+ "default": False,
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "dest": "branch",
+ "help": "dummy option",
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "dest": "build_pool",
+ "help": "dummy option",
+ },
+ ],
+ ]
+
+ def __init__(
+ self,
+ require_config_file=False,
+ config={},
+ all_actions=all_actions,
+ default_actions=default_actions,
+ ):
+
+ # Default configuration
+ default_config = {
+ "debug_build": False,
+ "upload_ssh_key": "~/.ssh/ffxbld_rsa",
+ "upload_ssh_user": "ffxbld",
+ "upload_ssh_host": "upload.ffxbld.productdelivery.prod.mozaws.net",
+ "upload_path_base": "/tmp/openh264",
+ "use_yasm": False,
+ }
+ default_config.update(config)
+
+ VCSScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ config=default_config,
+ all_actions=all_actions,
+ default_actions=default_actions,
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ dirs = super(OpenH264Build, self).query_abs_dirs()
+ dirs["abs_upload_dir"] = os.path.join(dirs["abs_work_dir"], "upload")
+ self.abs_dirs = dirs
+ return self.abs_dirs
+
+ def get_tooltool(self):
+ c = self.config
+ if not c.get("tooltool_manifest_file"):
+ self.info("Skipping tooltool fetching since no tooltool manifest")
+ return
+ dirs = self.query_abs_dirs()
+ self.mkdir_p(dirs["abs_work_dir"])
+ manifest = os.path.join(
+ dirs["abs_src_dir"],
+ "testing",
+ "mozharness",
+ "configs",
+ "openh264",
+ "tooltool-manifests",
+ c["tooltool_manifest_file"],
+ )
+ self.info("Getting tooltool files from manifest (%s)" % manifest)
+ try:
+ self.tooltool_fetch(
+ manifest=manifest,
+ output_dir=os.path.join(dirs["abs_work_dir"]),
+ cache=c.get("tooltool_cache"),
+ )
+ except KeyError:
+ self.error("missing a required key.")
+
+ def query_package_name(self):
+ if self.config["arch"] in ("x64", "aarch64"):
+ bits = "64"
+ else:
+ bits = "32"
+ version = self.config["revision"]
+
+ if sys.platform in ("linux2", "linux"):
+ if self.config.get("operating_system") == "android":
+ return "openh264-android-{arch}-{version}.zip".format(
+ version=version, arch=self.config["arch"]
+ )
+ elif self.config.get("operating_system") == "darwin":
+ suffix = ""
+ if self.config["arch"] != "x64":
+ suffix = "-" + self.config["arch"]
+ return "openh264-macosx{bits}{suffix}-{version}.zip".format(
+ version=version, bits=bits, suffix=suffix
+ )
+ else:
+ return "openh264-linux{bits}-{version}.zip".format(
+ version=version, bits=bits
+ )
+ elif sys.platform == "win32":
+ if self.config["arch"] == "aarch64":
+ return "openh264-win64-aarch64-{version}.zip".format(version=version)
+ else:
+ return "openh264-win{bits}-{version}.zip".format(
+ version=version, bits=bits
+ )
+ self.fatal("can't determine platform")
+
+ def query_make_params(self):
+ retval = []
+ if self.config["debug_build"]:
+ retval.append("BUILDTYPE=Debug")
+
+ if self.config["avoid_avx2"]:
+ retval.append("HAVE_AVX2=false")
+
+ if self.config["arch"] in ("x64", "aarch64"):
+ retval.append("ENABLE64BIT=Yes")
+ else:
+ retval.append("ENABLE64BIT=No")
+
+ if self.config["arch"] == "x86":
+ retval.append("ARCH=x86")
+ elif self.config["arch"] == "x64":
+ retval.append("ARCH=x86_64")
+ elif self.config["arch"] == "aarch64":
+ retval.append("ARCH=arm64")
+ else:
+ self.fatal("Unknown arch: {}".format(self.config["arch"]))
+
+ if "operating_system" in self.config:
+ retval.append("OS=%s" % self.config["operating_system"])
+ if self.config["operating_system"] == "android":
+ retval.append("TARGET=invalid")
+ retval.append("NDKLEVEL=%s" % self.config["min_sdk"])
+ retval.append("NDKROOT=%s/android-ndk" % os.environ["MOZ_FETCHES_DIR"])
+ retval.append("NDK_TOOLCHAIN_VERSION=clang")
+ if self.config["operating_system"] == "darwin":
+ retval.append("OS=darwin")
+
+ if self.config["use_yasm"]:
+ retval.append("ASM=yasm")
+
+ if self._is_windows():
+ retval.append("OS=msvc")
+ retval.append("CC=clang-cl")
+ retval.append("CXX=clang-cl")
+ if self.config["arch"] == "x86":
+ retval.append("CFLAGS=-m32")
+ elif self.config["arch"] == "aarch64":
+ retval.append("CFLAGS=--target=aarch64-windows-msvc")
+ retval.append("CXX_LINK_O=-nologo --target=aarch64-windows-msvc -Fe$@")
+ else:
+ retval.append("CC=clang")
+ retval.append("CXX=clang++")
+
+ return retval
+
+ def query_upload_ssh_key(self):
+ return self.config["upload_ssh_key"]
+
+ def query_upload_ssh_host(self):
+ return self.config["upload_ssh_host"]
+
+ def query_upload_ssh_user(self):
+ return self.config["upload_ssh_user"]
+
+ def query_upload_ssh_path(self):
+ return "%s/%s" % (self.config["upload_path_base"], self.config["revision"])
+
+ def run_make(self, target, capture_output=False):
+ cmd = ["make", target] + self.query_make_params()
+ dirs = self.query_abs_dirs()
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ env = None
+ if self.config.get("partial_env"):
+ env = self.query_env(self.config["partial_env"])
+ kwargs = dict(cwd=repo_dir, env=env)
+ if capture_output:
+ return self.get_output_from_command(cmd, **kwargs)
+ else:
+ return self.run_command(cmd, **kwargs)
+
+ def checkout_sources(self):
+ repo = self.config["repo"]
+ rev = self.config["revision"]
+
+ dirs = self.query_abs_dirs()
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+
+ if self._is_windows():
+ # We don't have git on our windows builders, so download a zip
+ # package instead.
+ path = repo.replace(".git", "/archive/") + rev + ".zip"
+ self.download_file(path)
+ self.unzip(rev + ".zip", dirs["abs_work_dir"])
+ self.move(
+ os.path.join(dirs["abs_work_dir"], "openh264-" + rev),
+ os.path.join(dirs["abs_work_dir"], "openh264"),
+ )
+
+ # Retrieve in-tree version of gmp-api
+ self.copytree(
+ os.path.join(dirs["abs_src_dir"], "dom", "media", "gmp", "gmp-api"),
+ os.path.join(repo_dir, "gmp-api"),
+ )
+
+ # We need gas-preprocessor.pl for arm64 builds
+ if self.config["arch"] == "aarch64":
+ openh264_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ self.download_file(
+ (
+ "https://raw.githubusercontent.com/libav/"
+ "gas-preprocessor/c2bc63c96678d9739509e58"
+ "7aa30c94bdc0e636d/gas-preprocessor.pl"
+ ),
+ parent_dir=openh264_dir,
+ )
+ self.chmod(os.path.join(openh264_dir, "gas-preprocessor.pl"), 744)
+
+ # gas-preprocessor.pl expects cpp to exist
+ # os.symlink is not available on Windows until we switch to
+ # Python 3.
+ os.system(
+ "ln -s %s %s"
+ % (
+ os.path.join(
+ os.environ["MOZ_FETCHES_DIR"], "clang", "bin", "clang.exe"
+ ),
+ os.path.join(openh264_dir, "cpp"),
+ )
+ )
+ return 0
+
+ repos = [
+ {"vcs": "gittool", "repo": repo, "dest": repo_dir, "revision": rev},
+ ]
+
+ # self.vcs_checkout already retries, so no need to wrap it in
+ # self.retry. We set the error_level to ERROR to prevent it going fatal
+ # so we can do our own handling here.
+ retval = self.vcs_checkout_repos(repos, error_level=ERROR)
+ if not retval:
+ self.rmtree(repo_dir)
+ self.fatal("Automation Error: couldn't clone repo", exit_code=4)
+
+ # Checkout gmp-api
+ # TODO: Nothing here updates it yet, or enforces versions!
+ if not os.path.exists(os.path.join(repo_dir, "gmp-api")):
+ retval = self.run_make("gmp-bootstrap")
+ if retval != 0:
+ self.fatal("couldn't bootstrap gmp")
+ else:
+ self.info("skipping gmp bootstrap - we have it locally")
+
+ # Checkout gtest
+ # TODO: Requires svn!
+ if not os.path.exists(os.path.join(repo_dir, "gtest")):
+ retval = self.run_make("gtest-bootstrap")
+ if retval != 0:
+ self.fatal("couldn't bootstrap gtest")
+ else:
+ self.info("skipping gtest bootstrap - we have it locally")
+
+ return retval
+
+ def build(self):
+ retval = self.run_make("plugin")
+ if retval != 0:
+ self.fatal("couldn't build plugin")
+
+ def package(self):
+ dirs = self.query_abs_dirs()
+ srcdir = os.path.join(dirs["abs_work_dir"], "openh264")
+ package_name = self.query_package_name()
+ package_file = os.path.join(dirs["abs_work_dir"], package_name)
+ if os.path.exists(package_file):
+ os.unlink(package_file)
+ to_package = []
+ for f in glob.glob(os.path.join(srcdir, "*gmpopenh264*")):
+ if not re.search(
+ "(?:lib)?gmpopenh264(?!\.\d)\.(?:dylib|so|dll|info)(?!\.\d)", f
+ ):
+ # Don't package unnecessary zip bloat
+ # Blocks things like libgmpopenh264.2.dylib and libgmpopenh264.so.1
+ self.log("Skipping packaging of {package}".format(package=f))
+ continue
+ to_package.append(os.path.basename(f))
+ self.log("Packaging files %s" % to_package)
+ cmd = ["zip", package_file] + to_package
+ retval = self.run_command(cmd, cwd=srcdir)
+ if retval != 0:
+ self.fatal("couldn't make package")
+ self.copy_to_upload_dir(
+ package_file, dest=os.path.join(srcdir, "artifacts", package_name)
+ )
+
+ # Taskcluster expects this path to exist, but we don't use it
+ # because our builds are private.
+ path = os.path.join(
+ self.query_abs_dirs()["abs_work_dir"], "..", "public", "build"
+ )
+ self.mkdir_p(path)
+
+ def dump_symbols(self):
+ dirs = self.query_abs_dirs()
+ c = self.config
+ srcdir = os.path.join(dirs["abs_work_dir"], "openh264")
+ package_name = self.run_make("echo-plugin-name", capture_output=True)
+ if not package_name:
+ self.fatal("failure running make")
+ zip_package_name = self.query_package_name()
+ if not zip_package_name[-4:] == ".zip":
+ self.fatal("Unexpected zip_package_name")
+ symbol_package_name = "{base}.symbols.zip".format(base=zip_package_name[:-4])
+ symbol_zip_path = os.path.join(srcdir, "artifacts", symbol_package_name)
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ env = None
+ if self.config.get("partial_env"):
+ env = self.query_env(self.config["partial_env"])
+ kwargs = dict(cwd=repo_dir, env=env)
+ dump_syms = os.path.join(dirs["abs_work_dir"], c["dump_syms_binary"])
+ self.chmod(dump_syms, 0o755)
+ python = self.query_exe("python2.7")
+ cmd = [
+ python,
+ os.path.join(external_tools_path, "packagesymbols.py"),
+ "--symbol-zip",
+ symbol_zip_path,
+ dump_syms,
+ os.path.join(srcdir, package_name),
+ ]
+ self.run_command(cmd, **kwargs)
+
+ def test(self):
+ retval = self.run_make("test")
+ if retval != 0:
+ self.fatal("test failures")
+
+ def copy_to_upload_dir(
+ self,
+ target,
+ dest=None,
+ log_level=DEBUG,
+ error_level=ERROR,
+ compress=False,
+ upload_dir=None,
+ ):
+ """Copy target file to upload_dir/dest.
+
+ Potentially update a manifest in the future if we go that route.
+
+ Currently only copies a single file; would be nice to allow for
+ recursive copying; that would probably done by creating a helper
+ _copy_file_to_upload_dir().
+ """
+ dest_filename_given = dest is not None
+ if upload_dir is None:
+ upload_dir = self.query_abs_dirs()["abs_upload_dir"]
+ if dest is None:
+ dest = os.path.basename(target)
+ if dest.endswith("/"):
+ dest_file = os.path.basename(target)
+ dest_dir = os.path.join(upload_dir, dest)
+ dest_filename_given = False
+ else:
+ dest_file = os.path.basename(dest)
+ dest_dir = os.path.join(upload_dir, os.path.dirname(dest))
+ if compress and not dest_filename_given:
+ dest_file += ".gz"
+ dest = os.path.join(dest_dir, dest_file)
+ if not os.path.exists(target):
+ self.log("%s doesn't exist!" % target, level=error_level)
+ return None
+ self.mkdir_p(dest_dir)
+ self.copyfile(target, dest, log_level=log_level, compress=compress)
+ if os.path.exists(dest):
+ return dest
+ else:
+ self.log("%s doesn't exist after copy!" % dest, level=error_level)
+ return None
+
+
+# main {{{1
+if __name__ == "__main__":
+ myScript = OpenH264Build()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/raptor_script.py b/testing/mozharness/scripts/raptor_script.py
new file mode 100644
index 0000000000..5e3ce2c2a0
--- /dev/null
+++ b/testing/mozharness/scripts/raptor_script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""raptor
+
+"""
+
+from __future__ import absolute_import
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.testing.raptor import Raptor
+
+if __name__ == "__main__":
+ raptor = Raptor()
+ raptor.run_and_exit()
diff --git a/testing/mozharness/scripts/release/bouncer_check.py b/testing/mozharness/scripts/release/bouncer_check.py
new file mode 100644
index 0000000000..c0ec784735
--- /dev/null
+++ b/testing/mozharness/scripts/release/bouncer_check.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+# lint_ignore=E501
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" bouncer_check.py
+
+A script to check HTTP statuses of Bouncer products to be shipped.
+"""
+
+from __future__ import absolute_import
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_FAILURE
+
+BOUNCER_URL_PATTERN = "{bouncer_prefix}?product={product}&os={os}&lang={lang}"
+
+
+class BouncerCheck(BaseScript):
+ config_options = [
+ [
+ ["--version"],
+ {
+ "dest": "version",
+ "help": "Version of release, eg: 39.0b5",
+ },
+ ],
+ [
+ ["--product-field"],
+ {
+ "dest": "product_field",
+ "help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION", # NOQA: E501
+ },
+ ],
+ [
+ ["--products-url"],
+ {
+ "dest": "products_url",
+ "help": "The URL of the current Firefox product versions",
+ "type": str,
+ "default": "https://product-details.mozilla.org/1.0/firefox_versions.json",
+ },
+ ],
+ [
+ ["--previous-version"],
+ {
+ "dest": "prev_versions",
+ "action": "extend",
+ "help": "Previous version(s)",
+ },
+ ],
+ [
+ ["--locale"],
+ {
+ "dest": "locales",
+ # Intentionally limited for several reasons:
+ # 1) faster to check
+ # 2) do not need to deal with situation when a new locale
+ # introduced and we do not have partials for it yet
+ # 3) it mimics the old Sentry behaviour that worked for ages
+ # 4) no need to handle ja-JP-mac
+ "default": ["en-US", "de", "it", "zh-TW"],
+ "action": "append",
+ "help": "List of locales to check.",
+ },
+ ],
+ [
+ ["-j", "--parallelization"],
+ {
+ "dest": "parallelization",
+ "default": 20,
+ "type": int,
+ "help": "Number of HTTP sessions running in parallel",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ super(BouncerCheck, self).__init__(
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ config={
+ "cdn_urls": [
+ "download-installer.cdn.mozilla.net",
+ "download.cdn.mozilla.net",
+ "download.mozilla.org",
+ "archive.mozilla.org",
+ ],
+ },
+ all_actions=[
+ "check-bouncer",
+ ],
+ default_actions=[
+ "check-bouncer",
+ ],
+ )
+
+ def _pre_config_lock(self, rw_config):
+ super(BouncerCheck, self)._pre_config_lock(rw_config)
+
+ if "product_field" not in self.config:
+ return
+
+ firefox_versions = self.load_json_url(self.config["products_url"])
+
+ if self.config["product_field"] not in firefox_versions:
+ self.fatal("Unknown Firefox label: {}".format(self.config["product_field"]))
+ self.config["version"] = firefox_versions[self.config["product_field"]]
+ self.log("Set Firefox version {}".format(self.config["version"]))
+
+ def check_url(self, session, url):
+ from redo import retry
+ from requests.exceptions import HTTPError
+
+ try:
+ from urllib.parse import urlparse
+ except ImportError:
+ # Python 2
+ from urlparse import urlparse
+
+ def do_check_url():
+ self.log("Checking {}".format(url))
+ r = session.head(url, verify=True, timeout=10, allow_redirects=True)
+ try:
+ r.raise_for_status()
+ except HTTPError:
+ self.error("FAIL: {}, status: {}".format(url, r.status_code))
+ raise
+
+ final_url = urlparse(r.url)
+ if final_url.scheme != "https":
+ self.error("FAIL: URL scheme is not https: {}".format(r.url))
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+
+ if final_url.netloc not in self.config["cdn_urls"]:
+ self.error("FAIL: host not in allowed locations: {}".format(r.url))
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+
+ try:
+ retry(do_check_url, sleeptime=3, max_sleeptime=10, attempts=3)
+ except HTTPError:
+ # The error was already logged above.
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+ return
+
+ def get_urls(self):
+ for product in self.config["products"].values():
+ if not product["check_uptake"]:
+ continue
+ product_name = product["product-name"] % {"version": self.config["version"]}
+ for bouncer_platform in product["platforms"]:
+ for locale in self.config["locales"]:
+ url = BOUNCER_URL_PATTERN.format(
+ bouncer_prefix=self.config["bouncer_prefix"],
+ product=product_name,
+ os=bouncer_platform,
+ lang=locale,
+ )
+ yield url
+
+ for product in self.config.get("partials", {}).values():
+ if not product["check_uptake"]:
+ continue
+ for prev_version in self.config.get("prev_versions", []):
+ product_name = product["product-name"] % {
+ "version": self.config["version"],
+ "prev_version": prev_version,
+ }
+ for bouncer_platform in product["platforms"]:
+ for locale in self.config["locales"]:
+ url = BOUNCER_URL_PATTERN.format(
+ bouncer_prefix=self.config["bouncer_prefix"],
+ product=product_name,
+ os=bouncer_platform,
+ lang=locale,
+ )
+ yield url
+
+ def check_bouncer(self):
+ import requests
+ import concurrent.futures as futures
+
+ session = requests.Session()
+ http_adapter = requests.adapters.HTTPAdapter(
+ pool_connections=self.config["parallelization"],
+ pool_maxsize=self.config["parallelization"],
+ )
+ session.mount("https://", http_adapter)
+ session.mount("http://", http_adapter)
+
+ with futures.ThreadPoolExecutor(self.config["parallelization"]) as e:
+ fs = []
+ for url in self.get_urls():
+ fs.append(e.submit(self.check_url, session, url))
+ for f in futures.as_completed(fs):
+ f.result()
+
+
+if __name__ == "__main__":
+ BouncerCheck().run_and_exit()
diff --git a/testing/mozharness/scripts/release/generate-checksums.py b/testing/mozharness/scripts/release/generate-checksums.py
new file mode 100644
index 0000000000..81a781951b
--- /dev/null
+++ b/testing/mozharness/scripts/release/generate-checksums.py
@@ -0,0 +1,264 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+import binascii
+import hashlib
+import os
+import re
+import sys
+from multiprocessing.pool import ThreadPool
+
+import six
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.checksums import parse_checksums_file
+from mozharness.mozilla.merkle import MerkleTree
+
+
+class ChecksumsGenerator(BaseScript, VirtualenvMixin):
+ config_options = [
+ [
+ ["--stage-product"],
+ {
+ "dest": "stage_product",
+ "help": "Name of product used in file server's directory structure, "
+ "e.g.: firefox, mobile",
+ },
+ ],
+ [
+ ["--version"],
+ {
+ "dest": "version",
+ "help": "Version of release, e.g.: 59.0b5",
+ },
+ ],
+ [
+ ["--build-number"],
+ {
+ "dest": "build_number",
+ "help": "Build number of release, e.g.: 2",
+ },
+ ],
+ [
+ ["--bucket-name"],
+ {
+ "dest": "bucket_name",
+ "help": "Full bucket name e.g.: net-mozaws-prod-delivery-{firefox,archive}.",
+ },
+ ],
+ [
+ ["-j", "--parallelization"],
+ {
+ "dest": "parallelization",
+ "default": 20,
+ "type": int,
+ "help": "Number of checksums file to download concurrently",
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "dest": "branch",
+ "help": "dummy option",
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "dest": "build_pool",
+ "help": "dummy option",
+ },
+ ],
+ ] + virtualenv_config_options
+
+ def __init__(self):
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=False,
+ config={
+ "virtualenv_modules": [
+ "boto",
+ ],
+ "virtualenv_path": "venv",
+ },
+ all_actions=[
+ "create-virtualenv",
+ "collect-individual-checksums",
+ "create-big-checksums",
+ "create-summary",
+ ],
+ default_actions=[
+ "create-virtualenv",
+ "collect-individual-checksums",
+ "create-big-checksums",
+ "create-summary",
+ ],
+ )
+
+ self.checksums = {}
+ self.file_prefix = self._get_file_prefix()
+
+ def _pre_config_lock(self, rw_config):
+ super(ChecksumsGenerator, self)._pre_config_lock(rw_config)
+
+ # These defaults are set here rather in the config because default
+ # lists cannot be completely overidden, only appended to.
+ if not self.config.get("formats"):
+ self.config["formats"] = ["sha512", "sha256"]
+
+ if not self.config.get("includes"):
+ self.config["includes"] = [
+ r"^.*\.tar\.bz2$",
+ r"^.*\.tar\.xz$",
+ r"^.*\.snap$",
+ r"^.*\.dmg$",
+ r"^.*\.pkg$",
+ r"^.*\.bundle$",
+ r"^.*\.mar$",
+ r"^.*Setup.*\.exe$",
+ r"^.*Installer\.exe$",
+ r"^.*\.msi$",
+ r"^.*\.xpi$",
+ r"^.*fennec.*\.apk$",
+ r"^.*/jsshell.*$",
+ ]
+
+ def _get_file_prefix(self):
+ return "pub/{}/candidates/{}-candidates/build{}/".format(
+ self.config["stage_product"],
+ self.config["version"],
+ self.config["build_number"],
+ )
+
+ def _get_sums_filename(self, format_):
+ return "{}SUMS".format(format_.upper())
+
+ def _get_summary_filename(self, format_):
+ return "{}SUMMARY".format(format_.upper())
+
+ def _get_hash_function(self, format_):
+ if format_ in ("sha256", "sha384", "sha512"):
+ return getattr(hashlib, format_)
+ else:
+ self.fatal("Unsupported format {}".format(format_))
+
+ def _get_bucket(self):
+ self.activate_virtualenv()
+ from boto import connect_s3
+
+ self.info("Connecting to S3")
+ conn = connect_s3(anon=True)
+ self.info("Connecting to bucket {}".format(self.config["bucket_name"]))
+ self.bucket = conn.get_bucket(self.config["bucket_name"])
+ return self.bucket
+
+ def collect_individual_checksums(self):
+ """This step grabs all of the small checksums files for the release,
+ filters out any unwanted files from within them, and adds the remainder
+ to self.checksums for subsequent steps to use."""
+ bucket = self._get_bucket()
+ self.info("File prefix is: {}".format(self.file_prefix))
+
+ # temporary holding place for checksums
+ raw_checksums = []
+
+ def worker(item):
+ self.debug("Downloading {}".format(item))
+ sums = bucket.get_key(item).get_contents_as_string()
+ raw_checksums.append(sums)
+
+ def find_checksums_files():
+ self.info("Getting key names from bucket")
+ checksum_files = {"beets": [], "checksums": []}
+ for key in bucket.list(prefix=self.file_prefix):
+ if key.key.endswith(".checksums"):
+ self.debug("Found checksums file: {}".format(key.key))
+ checksum_files["checksums"].append(key.key)
+ elif key.key.endswith(".beet"):
+ self.debug("Found beet file: {}".format(key.key))
+ checksum_files["beets"].append(key.key)
+ else:
+ self.debug("Ignoring non-checksums file: {}".format(key.key))
+ if checksum_files["beets"]:
+ self.log("Using beet format")
+ return checksum_files["beets"]
+ else:
+ self.log("Using checksums format")
+ return checksum_files["checksums"]
+
+ pool = ThreadPool(self.config["parallelization"])
+ pool.map(worker, find_checksums_files())
+
+ for c in raw_checksums:
+ for f, info in six.iteritems(parse_checksums_file(c)):
+ for pattern in self.config["includes"]:
+ if re.search(pattern, f):
+ if f in self.checksums:
+ if info == self.checksums[f]:
+ self.debug(
+ "Duplicate checksum for file {}"
+ " but the data matches;"
+ " continuing...".format(f)
+ )
+ continue
+ self.fatal(
+ "Found duplicate checksum entry for {}, "
+ "don't know which one to pick.".format(f)
+ )
+ if not set(self.config["formats"]) <= set(info["hashes"]):
+ self.fatal("Missing necessary format for file {}".format(f))
+ self.debug("Adding checksums for file: {}".format(f))
+ self.checksums[f] = info
+ break
+ else:
+ self.debug("Ignoring checksums for file: {}".format(f))
+
+ def create_summary(self):
+ """
+ This step computes a Merkle tree over the checksums for each format
+ and writes a file containing the head of the tree and inclusion proofs
+ for each file.
+ """
+ for fmt in self.config["formats"]:
+ hash_fn = self._get_hash_function(fmt)
+ files = [fn for fn in sorted(self.checksums)]
+ data = [self.checksums[fn]["hashes"][fmt] for fn in files]
+
+ tree = MerkleTree(hash_fn, data)
+ head = binascii.hexlify(tree.head())
+ proofs = [
+ binascii.hexlify(tree.inclusion_proof(i).to_rfc6962_bis())
+ for i in range(len(files))
+ ]
+
+ summary = self._get_summary_filename(fmt)
+ self.info("Creating summary file: {}".format(summary))
+
+ content = "{} TREE_HEAD\n".format(head.decode("ascii"))
+ for i in range(len(files)):
+ content += "{} {}\n".format(proofs[i].decode("ascii"), files[i])
+
+ self.write_to_file(summary, content)
+
+ def create_big_checksums(self):
+ for fmt in self.config["formats"]:
+ sums = self._get_sums_filename(fmt)
+ self.info("Creating big checksums file: {}".format(sums))
+ with open(sums, "w+") as output_file:
+ for fn in sorted(self.checksums):
+ output_file.write(
+ "{} {}\n".format(
+ self.checksums[fn]["hashes"][fmt].decode("ascii"), fn
+ )
+ )
+
+
+if __name__ == "__main__":
+ myScript = ChecksumsGenerator()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/release/update-verify-config-creator.py b/testing/mozharness/scripts/release/update-verify-config-creator.py
new file mode 100644
index 0000000000..18dd27a068
--- /dev/null
+++ b/testing/mozharness/scripts/release/update-verify-config-creator.py
@@ -0,0 +1,623 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, division
+from distutils.version import LooseVersion
+import json
+import math
+import os
+import pprint
+import re
+import sys
+from six.moves.urllib.parse import urljoin
+
+from mozilla_version.gecko import GeckoVersion
+from mozilla_version.version import VersionType
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.log import DEBUG, INFO, FATAL, WARNING
+from mozharness.base.script import BaseScript
+
+
+def is_triangualar(x):
+ """Check if a number is triangular (0, 1, 3, 6, 10, 15, ...)
+ see: https://en.wikipedia.org/wiki/Triangular_number#Triangular_roots_and_tests_for_triangular_numbers # noqa
+
+ >>> is_triangualar(0)
+ True
+ >>> is_triangualar(1)
+ True
+ >>> is_triangualar(2)
+ False
+ >>> is_triangualar(3)
+ True
+ >>> is_triangualar(4)
+ False
+ >>> all(is_triangualar(x) for x in [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105])
+ True
+ >>> all(not is_triangualar(x) for x in [4, 5, 8, 9, 11, 17, 25, 29, 39, 44, 59, 61, 72, 98, 112])
+ True
+ """
+ # pylint --py3k W1619
+ n = (math.sqrt(8 * x + 1) - 1) / 2
+ return n == int(n)
+
+
+class UpdateVerifyConfigCreator(BaseScript):
+ config_options = [
+ [
+ ["--product"],
+ {
+ "dest": "product",
+ "help": "Product being tested, as used in the update URL and filenames. Eg: firefox", # NOQA: E501
+ },
+ ],
+ [
+ ["--stage-product"],
+ {
+ "dest": "stage_product",
+ "help": "Product being tested, as used in stage directories and ship it"
+ "If not passed this is assumed to be the same as product.",
+ },
+ ],
+ [
+ ["--app-name"],
+ {
+ "dest": "app_name",
+ "help": "App name being tested. Eg: browser",
+ },
+ ],
+ [
+ ["--branch-prefix"],
+ {
+ "dest": "branch_prefix",
+ "help": "Prefix of release branch names. Eg: mozilla, comm",
+ },
+ ],
+ [
+ ["--channel"],
+ {
+ "dest": "channel",
+ "help": "Channel to run update verify against",
+ },
+ ],
+ [
+ ["--aus-server"],
+ {
+ "dest": "aus_server",
+ "default": "https://aus5.mozilla.org",
+ "help": "AUS server to run update verify against",
+ },
+ ],
+ [
+ ["--to-version"],
+ {
+ "dest": "to_version",
+ "help": "The version of the release being updated to. Eg: 59.0b5",
+ },
+ ],
+ [
+ ["--to-app-version"],
+ {
+ "dest": "to_app_version",
+ "help": "The in-app version of the release being updated to. Eg: 59.0",
+ },
+ ],
+ [
+ ["--to-display-version"],
+ {
+ "dest": "to_display_version",
+ "help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9", # NOQA: E501
+ },
+ ],
+ [
+ ["--to-build-number"],
+ {
+ "dest": "to_build_number",
+ "help": "The build number of the release being updated to",
+ },
+ ],
+ [
+ ["--to-buildid"],
+ {
+ "dest": "to_buildid",
+ "help": "The buildid of the release being updated to",
+ },
+ ],
+ [
+ ["--to-revision"],
+ {
+ "dest": "to_revision",
+ "help": "The revision that the release being updated to was built against",
+ },
+ ],
+ [
+ ["--partial-version"],
+ {
+ "dest": "partial_versions",
+ "default": [],
+ "action": "append",
+ "help": "A previous release version that is expected to receive a partial update. "
+ "Eg: 59.0b4. May be specified multiple times.",
+ },
+ ],
+ [
+ ["--last-watershed"],
+ {
+ "dest": "last_watershed",
+ "help": "The earliest version to include in the update verify config. Eg: 57.0b10",
+ },
+ ],
+ [
+ ["--include-version"],
+ {
+ "dest": "include_versions",
+ "default": [],
+ "action": "append",
+ "help": "Only include versions that match one of these regexes. "
+ "May be passed multiple times",
+ },
+ ],
+ [
+ ["--mar-channel-id-override"],
+ {
+ "dest": "mar_channel_id_options",
+ "default": [],
+ "action": "append",
+ "help": "A version regex and channel id string to override those versions with."
+ "Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release "
+ "will set accepted mar channel ids to 'firefox-mozilla-beta' and "
+ "'firefox-mozilla-release for x.y and x.y.z versions. "
+ "May be passed multiple times",
+ },
+ ],
+ [
+ ["--override-certs"],
+ {
+ "dest": "override_certs",
+ "default": None,
+ "help": "Certs to override the updater with prior to running update verify."
+ "If passed, should be one of: dep, nightly, release"
+ "If not passed, no certificate overriding will be configured",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "dest": "platform",
+ "help": "The platform to generate the update verify config for, in FTP-style",
+ },
+ ],
+ [
+ ["--updater-platform"],
+ {
+ "dest": "updater_platform",
+ "help": "The platform to run the updater on, in FTP-style."
+ "If not specified, this is assumed to be the same as platform",
+ },
+ ],
+ [
+ ["--archive-prefix"],
+ {
+ "dest": "archive_prefix",
+ "help": "The server/path to pull the current release from. "
+ "Eg: https://archive.mozilla.org/pub",
+ },
+ ],
+ [
+ ["--previous-archive-prefix"],
+ {
+ "dest": "previous_archive_prefix",
+ "help": "The server/path to pull the previous releases from"
+ "If not specified, this is assumed to be the same as --archive-prefix",
+ },
+ ],
+ [
+ ["--repo-path"],
+ {
+ "dest": "repo_path",
+ "help": (
+ "The repository (relative to the hg server root) that the current "
+ "release was built from Eg: releases/mozilla-beta"
+ ),
+ },
+ ],
+ [
+ ["--output-file"],
+ {
+ "dest": "output_file",
+ "help": "Where to write the update verify config to",
+ },
+ ],
+ [
+ ["--product-details-server"],
+ {
+ "dest": "product_details_server",
+ "default": "https://product-details.mozilla.org",
+ "help": "Product Details server to pull previous release info from. "
+ "Using anything other than the production server is likely to "
+ "cause issues with update verify.",
+ },
+ ],
+ [
+ ["--hg-server"],
+ {
+ "dest": "hg_server",
+ "default": "https://hg.mozilla.org",
+ "help": "Mercurial server to pull various previous and current version info from",
+ },
+ ],
+ [
+ ["--full-check-locale"],
+ {
+ "dest": "full_check_locales",
+ "default": ["de", "en-US", "ru"],
+ "action": "append",
+ "help": "A list of locales to generate full update verify checks for",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ config={},
+ all_actions=[
+ "gather-info",
+ "create-config",
+ "write-config",
+ ],
+ default_actions=[
+ "gather-info",
+ "create-config",
+ "write-config",
+ ],
+ )
+
+ def _pre_config_lock(self, rw_config):
+ super(UpdateVerifyConfigCreator, self)._pre_config_lock(rw_config)
+
+ if "updater_platform" not in self.config:
+ self.config["updater_platform"] = self.config["platform"]
+ if "stage_product" not in self.config:
+ self.config["stage_product"] = self.config["product"]
+ if "previous_archive_prefix" not in self.config:
+ self.config["previous_archive_prefix"] = self.config["archive_prefix"]
+ self.config["archive_prefix"].rstrip("/")
+ self.config["previous_archive_prefix"].rstrip("/")
+ self.config["mar_channel_id_overrides"] = {}
+ for override in self.config["mar_channel_id_options"]:
+ pattern, override_str = override.split(",", 1)
+ self.config["mar_channel_id_overrides"][pattern] = override_str
+
+ def _get_branch_url(self, branch_prefix, version):
+ version = GeckoVersion.parse(version)
+ branch = None
+ if version.version_type == VersionType.BETA:
+ branch = "releases/{}-beta".format(branch_prefix)
+ elif version.version_type == VersionType.ESR:
+ branch = "releases/{}-esr{}".format(branch_prefix, version.major_number)
+ elif version.version_type == VersionType.RELEASE:
+ if branch_prefix == "comm":
+ # Thunderbird does not have ESR releases, regular releases
+ # go in an ESR branch
+ branch = "releases/{}-esr{}".format(branch_prefix, version.major_number)
+ else:
+ branch = "releases/{}-release".format(branch_prefix)
+ if not branch:
+ raise Exception("Cannot determine branch, cannot continue!")
+
+ return branch
+
+ def _get_update_paths(self):
+ from mozrelease.l10n import getPlatformLocales
+ from mozrelease.paths import getCandidatesDir
+ from mozrelease.platforms import ftp2infoFile
+ from mozrelease.versions import MozillaVersion
+
+ self.update_paths = {}
+
+ ret = self._retry_download(
+ "{}/1.0/{}.json".format(
+ self.config["product_details_server"],
+ self.config["stage_product"],
+ ),
+ "WARNING",
+ )
+ releases = json.load(ret)["releases"]
+ for release_name, release_info in reversed(
+ sorted(releases.items(), key=lambda x: MozillaVersion(x[1]["version"]))
+ ):
+ # we need to use releases_name instead of release_info since esr
+ # string is included in the name. later we rely on this.
+ product, version = release_name.split("-", 1)
+ tag = "{}_{}_RELEASE".format(product.upper(), version.replace(".", "_"))
+
+ # Exclude any releases that don't match one of our include version
+ # regexes. This is generally to avoid including versions from other
+ # channels. Eg: including betas when testing releases
+ for v in self.config["include_versions"]:
+ if re.match(v, version):
+ break
+ else:
+ self.log(
+ "Skipping release whose version doesn't match any "
+ "include_version pattern: %s" % release_name,
+ level=INFO,
+ )
+ continue
+
+ # We also have to trim out previous releases that aren't in the same
+ # product line, too old, etc.
+ if self.config["stage_product"] != product:
+ self.log(
+ "Skipping release that doesn't match product name: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if MozillaVersion(version) < MozillaVersion(self.config["last_watershed"]):
+ self.log(
+ "Skipping release that's behind the last watershed: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if version == self.config["to_version"]:
+ self.log(
+ "Skipping release that is the same as to version: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if MozillaVersion(version) > MozillaVersion(self.config["to_version"]):
+ self.log(
+ "Skipping release that's newer than to version: %s" % release_name,
+ level=INFO,
+ )
+ continue
+
+ if version in self.update_paths:
+ raise Exception("Found duplicate release for version: %s", version)
+
+ # This is a crappy place to get buildids from, but we don't have a better one.
+ # This will start to fail if old info files are deleted.
+ info_file_url = "{}{}/{}_info.txt".format(
+ self.config["previous_archive_prefix"],
+ getCandidatesDir(
+ self.config["stage_product"],
+ version,
+ release_info["build_number"],
+ ),
+ ftp2infoFile(self.config["platform"]),
+ )
+ self.log(
+ "Retrieving buildid from info file: %s" % info_file_url, level=DEBUG
+ )
+ ret = self._retry_download(info_file_url, "WARNING")
+ buildID = ret.read().split(b"=")[1].strip().decode("utf-8")
+
+ branch = self._get_branch_url(self.config["branch_prefix"], version)
+
+ shipped_locales_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/locales/shipped-locales".format(
+ branch,
+ tag,
+ self.config["app_name"],
+ ),
+ )
+ ret = self._retry_download(shipped_locales_url, "WARNING")
+ shipped_locales = ret.read().strip().decode("utf-8")
+
+ app_version_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/config/version.txt".format(
+ branch,
+ tag,
+ self.config["app_name"],
+ ),
+ )
+ app_version = (
+ self._retry_download(app_version_url, "WARNING")
+ .read()
+ .strip()
+ .decode("utf-8")
+ )
+
+ self.log("Adding {} to update paths".format(version), level=INFO)
+ self.update_paths[version] = {
+ "appVersion": app_version,
+ "locales": getPlatformLocales(shipped_locales, self.config["platform"]),
+ "buildID": buildID,
+ }
+ for pattern, mar_channel_ids in self.config[
+ "mar_channel_id_overrides"
+ ].items():
+ if re.match(pattern, version):
+ self.update_paths[version]["marChannelIds"] = mar_channel_ids
+
+ def gather_info(self):
+ from mozilla_version.gecko import GeckoVersion
+
+ self._get_update_paths()
+ if self.update_paths:
+ self.log("Found update paths:", level=DEBUG)
+ self.log(pprint.pformat(self.update_paths), level=DEBUG)
+ elif GeckoVersion.parse(self.config["to_version"]) <= GeckoVersion.parse(
+ self.config["last_watershed"]
+ ):
+ self.log(
+ "Didn't find any update paths, but to_version {} is before the last_"
+ "watershed {}, generating empty config".format(
+ self.config["to_version"],
+ self.config["last_watershed"],
+ ),
+ level=WARNING,
+ )
+ else:
+ self.log("Didn't find any update paths, cannot continue", level=FATAL)
+
+ def create_config(self):
+ from mozrelease.l10n import getPlatformLocales
+ from mozrelease.platforms import ftp2updatePlatforms
+ from mozrelease.update_verify import UpdateVerifyConfig
+ from mozrelease.paths import (
+ getCandidatesDir,
+ getReleasesDir,
+ getReleaseInstallerPath,
+ )
+ from mozrelease.versions import getPrettyVersion
+
+ candidates_dir = getCandidatesDir(
+ self.config["stage_product"],
+ self.config["to_version"],
+ self.config["to_build_number"],
+ )
+ to_ = getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ self.config["to_version"],
+ self.config["platform"],
+ locale="%locale%",
+ )
+ to_path = "{}/{}".format(candidates_dir, to_)
+
+ to_display_version = self.config.get("to_display_version")
+ if not to_display_version:
+ to_display_version = getPrettyVersion(self.config["to_version"])
+
+ self.update_verify_config = UpdateVerifyConfig(
+ product=self.config["product"].title(),
+ channel=self.config["channel"],
+ aus_server=self.config["aus_server"],
+ to=to_path,
+ to_build_id=self.config["to_buildid"],
+ to_app_version=self.config["to_app_version"],
+ to_display_version=to_display_version,
+ override_certs=self.config.get("override_certs"),
+ )
+
+ to_shipped_locales_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/locales/shipped-locales".format(
+ self.config["repo_path"],
+ self.config["to_revision"],
+ self.config["app_name"],
+ ),
+ )
+ to_shipped_locales = (
+ self._retry_download(to_shipped_locales_url, "WARNING")
+ .read()
+ .strip()
+ .decode("utf-8")
+ )
+ to_locales = set(
+ getPlatformLocales(to_shipped_locales, self.config["platform"])
+ )
+
+ completes_only_index = 0
+ for fromVersion in reversed(sorted(self.update_paths, key=LooseVersion)):
+ from_ = self.update_paths[fromVersion]
+ locales = sorted(list(set(from_["locales"]).intersection(to_locales)))
+ appVersion = from_["appVersion"]
+ build_id = from_["buildID"]
+ mar_channel_IDs = from_.get("marChannelIds")
+
+ # Use new build targets for Windows, but only on compatible
+ # versions (42+). See bug 1185456 for additional context.
+ if self.config["platform"] not in ("win32", "win64") or LooseVersion(
+ fromVersion
+ ) < LooseVersion("42.0"):
+ update_platform = ftp2updatePlatforms(self.config["platform"])[0]
+ else:
+ update_platform = ftp2updatePlatforms(self.config["platform"])[1]
+
+ release_dir = getReleasesDir(self.config["stage_product"], fromVersion)
+ path_ = getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ fromVersion,
+ self.config["platform"],
+ locale="%locale%",
+ )
+ from_path = "{}/{}".format(release_dir, path_)
+
+ updater_package = "{}/{}".format(
+ release_dir,
+ getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ fromVersion,
+ self.config["updater_platform"],
+ locale="%locale%",
+ ),
+ )
+
+ # Exclude locales being full checked
+ quick_check_locales = [
+ l for l in locales if l not in self.config["full_check_locales"]
+ ]
+ # Get the intersection of from and to full_check_locales
+ this_full_check_locales = [
+ l for l in self.config["full_check_locales"] if l in locales
+ ]
+
+ if fromVersion in self.config["partial_versions"]:
+ self.info(
+ "Generating configs for partial update checks for %s" % fromVersion
+ )
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=locales,
+ patch_types=["complete", "partial"],
+ from_path=from_path,
+ ftp_server_from=self.config["previous_archive_prefix"],
+ ftp_server_to=self.config["archive_prefix"],
+ mar_channel_IDs=mar_channel_IDs,
+ platform=update_platform,
+ updater_package=updater_package,
+ )
+ else:
+ if this_full_check_locales and is_triangualar(completes_only_index):
+ self.info("Generating full check configs for %s" % fromVersion)
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=this_full_check_locales,
+ from_path=from_path,
+ ftp_server_from=self.config["previous_archive_prefix"],
+ ftp_server_to=self.config["archive_prefix"],
+ mar_channel_IDs=mar_channel_IDs,
+ platform=update_platform,
+ updater_package=updater_package,
+ )
+ # Quick test for other locales, no download
+ if len(quick_check_locales) > 0:
+ self.info("Generating quick check configs for %s" % fromVersion)
+ if not is_triangualar(completes_only_index):
+ # Assuming we skipped full check locales, using all locales
+ _locales = locales
+ else:
+ # Excluding full check locales from the quick check
+ _locales = quick_check_locales
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=_locales,
+ platform=update_platform,
+ )
+ completes_only_index += 1
+
+ def write_config(self):
+ # Needs to be opened in "bytes" mode because we perform relative seeks on it
+ with open(self.config["output_file"], "wb+") as fh:
+ self.update_verify_config.write(fh)
+
+
+if __name__ == "__main__":
+ UpdateVerifyConfigCreator().run_and_exit()
diff --git a/testing/mozharness/scripts/repackage.py b/testing/mozharness/scripts/repackage.py
new file mode 100644
index 0000000000..41496159a2
--- /dev/null
+++ b/testing/mozharness/scripts/repackage.py
@@ -0,0 +1,176 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(sys.path[0])) # noqa - don't warn about imports
+
+from mozharness.base.log import FATAL
+from mozharness.base.script import BaseScript
+
+
+class Repackage(BaseScript):
+ def __init__(self, require_config_file=False):
+ script_kwargs = {
+ "all_actions": [
+ "setup",
+ "repackage",
+ ],
+ }
+ BaseScript.__init__(
+ self, require_config_file=require_config_file, **script_kwargs
+ )
+
+ def setup(self):
+ dirs = self.query_abs_dirs()
+
+ self._run_tooltool()
+
+ mar_path = os.path.join(dirs["abs_input_dir"], "mar")
+ if self._is_windows():
+ mar_path += ".exe"
+ if mar_path and os.path.exists(mar_path):
+ self.chmod(mar_path, 0o755)
+ if self.config.get("run_configure", True):
+ self._get_mozconfig()
+ self._run_configure()
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Repackage, self).query_abs_dirs()
+ config = self.config
+
+ dirs = {}
+ dirs["abs_input_dir"] = os.path.join(abs_dirs["base_work_dir"], "fetches")
+ output_dir_suffix = []
+ if config.get("locale"):
+ output_dir_suffix.append(config["locale"])
+ if config.get("repack_id"):
+ output_dir_suffix.append(config["repack_id"])
+ dirs["abs_output_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "outputs", *output_dir_suffix
+ )
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def repackage(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+
+ subst = {
+ "package-name": config["package-name"],
+ # sfx-stub is only defined for Windows targets
+ "sfx-stub": config.get("sfx-stub"),
+ "installer-tag": config["installer-tag"],
+ "stub-installer-tag": config["stub-installer-tag"],
+ "wsx-stub": config["wsx-stub"],
+ }
+ subst.update(dirs)
+ if config.get("fetch-dir"):
+ subst.update({"fetch-dir": os.path.abspath(config["fetch-dir"])})
+
+ # Make sure the upload dir is around.
+ self.mkdir_p(dirs["abs_output_dir"])
+
+ for repack_config in config["repackage_config"]:
+ command = [sys.executable, "mach", "--log-no-times", "repackage"]
+ command.extend([arg.format(**subst) for arg in repack_config["args"]])
+ for arg, filename in repack_config["inputs"].items():
+ command.extend(
+ [
+ "--{}".format(arg),
+ os.path.join(dirs["abs_input_dir"], filename),
+ ]
+ )
+ command.extend(
+ [
+ "--output",
+ os.path.join(dirs["abs_output_dir"], repack_config["output"]),
+ ]
+ )
+ self.run_command(
+ command=command,
+ cwd=dirs["abs_src_dir"],
+ halt_on_failure=True,
+ env=self.query_env(),
+ )
+
+ def _run_tooltool(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = config.get("tooltool_manifest_src")
+ if not manifest_src:
+ return
+
+ cmd = [
+ sys.executable,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = config.get("tooltool_cache")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True)
+
+ def _get_mozconfig(self):
+ """assign mozconfig."""
+ c = self.config
+ dirs = self.query_abs_dirs()
+ abs_mozconfig_path = ""
+
+ # first determine the mozconfig path
+ if c.get("src_mozconfig"):
+ self.info("Using in-tree mozconfig")
+ abs_mozconfig_path = os.path.join(dirs["abs_src_dir"], c["src_mozconfig"])
+ else:
+ self.fatal(
+ "'src_mozconfig' must be in the config "
+ "in order to determine the mozconfig."
+ )
+
+ # print its contents
+ self.read_from_file(abs_mozconfig_path, error_level=FATAL)
+
+ # finally, copy the mozconfig to a path that 'mach build' expects it to be
+ self.copyfile(
+ abs_mozconfig_path, os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ )
+
+ def _run_configure(self):
+ dirs = self.query_abs_dirs()
+ command = [sys.executable, "mach", "--log-no-times", "configure"]
+ return self.run_command(
+ command=command,
+ cwd=dirs["abs_src_dir"],
+ output_timeout=60 * 3,
+ halt_on_failure=True,
+ )
+
+
+if __name__ == "__main__":
+ repack = Repackage()
+ repack.run_and_exit()
diff --git a/testing/mozharness/scripts/talos_script.py b/testing/mozharness/scripts/talos_script.py
new file mode 100755
index 0000000000..0ce4792133
--- /dev/null
+++ b/testing/mozharness/scripts/talos_script.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""talos
+
+"""
+
+from __future__ import absolute_import
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.testing.talos import Talos
+
+if __name__ == "__main__":
+ talos = Talos()
+ talos.run_and_exit()
diff --git a/testing/mozharness/scripts/telemetry/telemetry_client.py b/testing/mozharness/scripts/telemetry/telemetry_client.py
new file mode 100755
index 0000000000..4f6dfa588d
--- /dev/null
+++ b/testing/mozharness/scripts/telemetry/telemetry_client.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+from __future__ import absolute_import
+import copy
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import PreScriptAction
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.testbase import (
+ TestingMixin,
+ testing_config_options,
+)
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.vcstools import VCSToolsScript
+
+# General command line arguments for Firefox ui tests
+telemetry_tests_config_options = (
+ [
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) "
+ "to use the GL compositor.",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Enable the WebRender compositor in Gecko.",
+ },
+ ],
+ [
+ ["--dry-run"],
+ {
+ "dest": "dry_run",
+ "default": False,
+ "help": "Only show what was going to be tested.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Disable multi-process (e10s) mode when running tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "dest": "extra_prefs",
+ "action": "append",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--symbols-path=SYMBOLS_PATH"],
+ {
+ "dest": "symbols_path",
+ "help": "absolute path to directory containing breakpad "
+ "symbols, or the url of a zip file containing symbols.",
+ },
+ ],
+ [
+ ["--tag=TAG"],
+ {
+ "dest": "tag",
+ "help": "Subset of tests to run (local, remote).",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+)
+
+
+class TelemetryTests(TestingMixin, VCSToolsScript, CodeCoverageMixin):
+ def __init__(
+ self,
+ config_options=None,
+ all_actions=None,
+ default_actions=None,
+ *args,
+ **kwargs
+ ):
+ config_options = config_options or telemetry_tests_config_options
+ actions = [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ "uninstall",
+ ]
+
+ super(TelemetryTests, self).__init__(
+ config_options=config_options,
+ all_actions=all_actions or actions,
+ default_actions=default_actions or actions,
+ *args,
+ **kwargs
+ )
+
+ # Code which runs in automation has to include the following properties
+ self.binary_path = self.config.get("binary_path")
+ self.installer_path = self.config.get("installer_path")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url = self.config.get("test_url")
+
+ if not self.test_url and not self.test_packages_url:
+ self.fatal("You must use --test-url, or --test-packages-url")
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ abs_dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ abs_dirs["abs_test_install_dir"],
+ "config",
+ "telemetry_tests_requirements.txt",
+ )
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(TelemetryTests, self).query_abs_dirs()
+
+ abs_test_install_dir = os.path.join(abs_dirs["abs_work_dir"], "tests")
+
+ dirs = {
+ "abs_test_install_dir": abs_test_install_dir,
+ "abs_telemetry_dir": os.path.join(
+ abs_test_install_dir, "telemetry", "marionette"
+ ),
+ "abs_blob_upload_dir": os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ ),
+ }
+
+ for key in dirs:
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def run_test(self, binary_path, env=None, marionette_port=2828):
+ """All required steps for running the tests against an installer."""
+ dirs = self.query_abs_dirs()
+
+ # Import the harness to retrieve the location of the cli scripts
+ import telemetry_harness
+
+ cmd = [
+ self.query_python_path(),
+ os.path.join(os.path.dirname(telemetry_harness.__file__), self.cli_script),
+ "--binary",
+ binary_path,
+ "--address",
+ "localhost:{}".format(marionette_port),
+ # Resource files to serve via local webserver
+ "--server-root",
+ os.path.join(dirs["abs_telemetry_dir"], "harness", "www"),
+ # Use the work dir to get temporary data stored
+ "--workspace",
+ dirs["abs_work_dir"],
+ # logging options
+ "--gecko-log=-", # output from the gecko process redirected to stdout
+ "--log-raw=-", # structured log for output parser redirected to stdout
+ # additional reports helpful for Jenkins and inpection via Treeherder
+ "--log-html",
+ os.path.join(dirs["abs_blob_upload_dir"], "report.html"),
+ "--log-xunit",
+ os.path.join(dirs["abs_blob_upload_dir"], "report.xml"),
+ # Enable tracing output to log transmission protocol
+ "-vv",
+ ]
+
+ if self.config["enable_webrender"]:
+ cmd.extend(["--enable-webrender"])
+
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+
+ if not self.config["e10s"]:
+ cmd.append("--disable-e10s")
+
+ parser = StructuredOutputParser(
+ config=self.config, log_obj=self.log_obj, strict=False
+ )
+
+ # Add the default tests to run
+ tests = [
+ os.path.join(dirs["abs_telemetry_dir"], "tests", test)
+ for test in self.default_tests
+ ]
+ cmd.extend(tests)
+
+ # Set further environment settings
+ env = env or self.query_env()
+ env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
+ if self.query_minidump_stackwalk():
+ env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
+ env["RUST_BACKTRACE"] = "1"
+ env["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"
+
+ # If code coverage is enabled, set GCOV_PREFIX env variable
+ if self.config.get("code_coverage"):
+ env["GCOV_PREFIX"] = self.gcov_dir
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=300,
+ output_parser=parser,
+ env=env,
+ )
+
+ tbpl_status, log_level, _ = parser.evaluate_parser(return_code)
+ self.record_status(tbpl_status, level=log_level)
+
+ return return_code
+
+ @PreScriptAction("run-tests")
+ def _pre_run_tests(self, action):
+ if not self.installer_path and not self.installer_url:
+ self.critical(
+ "Please specify an installer via --installer-path or --installer-url."
+ )
+ sys.exit(1)
+
+ def run_tests(self):
+ """Run all the tests"""
+ return self.run_test(
+ binary_path=self.binary_path,
+ env=self.query_env(),
+ )
+
+
+class TelemetryClientTests(TelemetryTests):
+ cli_script = "runtests.py"
+ default_tests = [
+ os.path.join("client", "manifest.ini"),
+ os.path.join("unit", "manifest.ini"),
+ ]
+
+
+if __name__ == "__main__":
+ myScript = TelemetryClientTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/web_platform_tests.py b/testing/mozharness/scripts/web_platform_tests.py
new file mode 100755
index 0000000000..d0fcf54c04
--- /dev/null
+++ b/testing/mozharness/scripts/web_platform_tests.py
@@ -0,0 +1,668 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+from __future__ import absolute_import
+import copy
+import gzip
+import json
+import os
+import sys
+
+from datetime import datetime, timedelta
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozinfo
+
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import WptHarnessErrorList
+
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.base.log import INFO
+
+
+class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin):
+ config_options = (
+ [
+ [
+ ["--test-type"],
+ {
+ "action": "extend",
+ "dest": "test_type",
+ "help": "Specify the test types to run.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run without e10s enabled",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) "
+ "to use the GL compositor.",
+ },
+ ],
+ [
+ ["--enable-webrender"],
+ {
+ "action": "store_true",
+ "dest": "enable_webrender",
+ "default": False,
+ "help": "Enable the WebRender compositor in Gecko.",
+ },
+ ],
+ [
+ ["--headless"],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ ["--headless-width"],
+ {
+ "action": "store",
+ "dest": "headless_width",
+ "default": "1600",
+ "help": "Specify headless virtual screen width (default: 1600).",
+ },
+ ],
+ [
+ ["--headless-height"],
+ {
+ "action": "store",
+ "dest": "headless_height",
+ "default": "1200",
+ "help": "Specify headless virtual screen height (default: 1200).",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Defines an extra user preference.",
+ },
+ ],
+ [
+ ["--skip-implementation-status"],
+ {
+ "action": "extend",
+ "dest": "skip_implementation_status",
+ "default": [],
+ "help": "Defines a way to not run a specific implementation status "
+ " (i.e. not implemented).",
+ },
+ ],
+ [
+ ["--backlog"],
+ {
+ "action": "store_true",
+ "dest": "backlog",
+ "default": False,
+ "help": "Defines if test category is backlog.",
+ },
+ ],
+ [
+ ["--skip-timeout"],
+ {
+ "action": "store_true",
+ "dest": "skip_timeout",
+ "default": False,
+ "help": "Ignore tests that are expected status of TIMEOUT",
+ },
+ ],
+ [
+ ["--include"],
+ {
+ "action": "store",
+ "dest": "include",
+ "default": None,
+ "help": "URL prefix to include.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=True):
+ super(WebPlatformTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "setup-avds",
+ "download-and-extract",
+ "download-and-process-manifest",
+ "create-virtualenv",
+ "pull",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ # Surely this should be in the superclass
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.abs_app_dir = None
+ self.xre_path = None
+ if self.is_emulator:
+ self.device_serial = "emulator-5554"
+
+ def query_abs_app_dir(self):
+ """We can't set this in advance, because OSX install directories
+ change depending on branding and opt/debug.
+ """
+ if self.abs_app_dir:
+ return self.abs_app_dir
+ if not self.binary_path:
+ self.fatal("Can't determine abs_app_dir (binary_path not set!)")
+ self.abs_app_dir = os.path.dirname(self.binary_path)
+ return self.abs_app_dir
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(WebPlatformTest, self).query_abs_dirs()
+
+ dirs = {}
+ dirs["abs_app_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "application"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+ dirs["abs_wpttest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "web-platform"
+ )
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_test_extensions_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "extensions"
+ )
+ if self.is_android:
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ if self.is_emulator:
+ dirs["abs_avds_dir"] = os.path.join(abs_dirs["abs_work_dir"], ".android")
+ fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
+ if fetches_dir:
+ dirs["abs_sdk_dir"] = os.path.join(fetches_dir, "android-sdk-linux")
+ else:
+ dirs["abs_sdk_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "android-sdk-linux"
+ )
+ if self.config["enable_webrender"]:
+ # AndroidMixin uses this when launching the emulator. We only want
+ # GLES3 if we're running WebRender
+ self.use_gles3 = True
+
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+
+ # marionette_requirements.txt must use the legacy resolver until bug 1684969 is resolved.
+ self.register_virtualenv_module(
+ requirements=[requirements], two_pass=True, legacy_resolver=True
+ )
+
+ def _query_geckodriver(self):
+ path = None
+ c = self.config
+ dirs = self.query_abs_dirs()
+ repl_dict = {}
+ repl_dict.update(dirs)
+ path = c.get("geckodriver", "geckodriver")
+ if path:
+ path = path % repl_dict
+ return path
+
+ def _query_cmd(self, test_types):
+ if not self.binary_path:
+ self.fatal("Binary path could not be determined")
+ # And exit
+
+ c = self.config
+ run_file_name = "runtests.py"
+
+ dirs = self.query_abs_dirs()
+ abs_app_dir = self.query_abs_app_dir()
+ str_format_values = {
+ "binary_path": self.binary_path,
+ "test_path": dirs["abs_wpttest_dir"],
+ "test_install_path": dirs["abs_test_install_dir"],
+ "abs_app_dir": abs_app_dir,
+ "abs_work_dir": dirs["abs_work_dir"],
+ "xre_path": self.xre_path,
+ }
+
+ cmd = [self.query_python_path("python"), "-u"]
+ cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))
+
+ mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], "wpt"
+ )
+
+ cmd += [
+ "--log-raw=-",
+ "--log-raw=%s" % raw_log_file,
+ "--log-wptreport=%s"
+ % os.path.join(dirs["abs_blob_upload_dir"], "wptreport.json"),
+ "--log-errorsummary=%s" % error_summary_file,
+ "--binary=%s" % self.binary_path,
+ "--symbols-path=%s" % self.symbols_path,
+ "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
+ "--stackfix-dir=%s" % os.path.join(dirs["abs_test_install_dir"], "bin"),
+ "--no-pause-after-test",
+ "--instrument-to-file=%s"
+ % os.path.join(dirs["abs_blob_upload_dir"], "wpt_instruments.txt"),
+ "--specialpowers-path=%s"
+ % os.path.join(
+ dirs["abs_test_extensions_dir"], "specialpowers@mozilla.org.xpi"
+ ),
+ ]
+
+ is_windows_7 = (
+ mozinfo.info["os"] == "win" and mozinfo.info["os_version"] == "6.1"
+ )
+
+ if (
+ self.is_android
+ or "wdspec" in test_types
+ or "fission.autostart=true" in c["extra_prefs"]
+ or
+ # Bug 1392106 - skia error 0x80070005: Access is denied.
+ is_windows_7
+ and mozinfo.info["debug"]
+ ):
+ processes = 1
+ else:
+ processes = 2
+ cmd.append("--processes=%s" % processes)
+
+ if self.is_android:
+ cmd += [
+ "--device-serial=%s" % self.device_serial,
+ "--package-name=%s" % self.query_package_name(),
+ ]
+
+ if is_windows_7:
+ # On Windows 7 --install-fonts fails, so fall back to a Firefox-specific codepath
+ self._install_fonts()
+ else:
+ cmd += ["--install-fonts"]
+
+ for test_type in test_types:
+ cmd.append("--test-type=%s" % test_type)
+
+ if c["extra_prefs"]:
+ cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])
+
+ if not c["e10s"]:
+ cmd.append("--disable-e10s")
+
+ if c["enable_webrender"]:
+ cmd.append("--enable-webrender")
+
+ if c["skip_timeout"]:
+ cmd.append("--skip-timeout")
+
+ for implementation_status in c["skip_implementation_status"]:
+ cmd.append("--skip-implementation-status=%s" % implementation_status)
+
+ # Bug 1643177 - reduce timeout multiplier for web-platform-tests backlog
+ if c["backlog"]:
+ cmd.append("--timeout-multiplier=0.25")
+
+ test_paths = set()
+ if not (self.verify_enabled or self.per_test_coverage):
+ mozharness_test_paths = json.loads(
+ os.environ.get("MOZHARNESS_TEST_PATHS", '""')
+ )
+ if mozharness_test_paths:
+ path = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")
+
+ if not os.path.exists(path):
+ self.critical("Unable to locate web-platform-test groups file.")
+
+ cmd.append("--test-groups={}".format(path))
+
+ for key in mozharness_test_paths.keys():
+ paths = mozharness_test_paths.get(key, [])
+ for path in paths:
+ if not path.startswith("/"):
+ # Assume this is a filesystem path rather than a test id
+ path = os.path.relpath(path, "testing/web-platform")
+ if ".." in path:
+ self.fatal("Invalid WPT path: {}".format(path))
+ path = os.path.join(dirs["abs_wpttest_dir"], path)
+ test_paths.add(path)
+ else:
+ # As per WPT harness, the --run-by-dir flag is incompatible with
+ # the --test-groups flag.
+ cmd.append("--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0))
+ for opt in ["total_chunks", "this_chunk"]:
+ val = c.get(opt)
+ if val:
+ cmd.append("--%s=%s" % (opt.replace("_", "-"), val))
+
+ options = list(c.get("options", []))
+
+ if "wdspec" in test_types:
+ geckodriver_path = self._query_geckodriver()
+ if not geckodriver_path or not os.path.isfile(geckodriver_path):
+ self.fatal(
+ "Unable to find geckodriver binary "
+ "in common test package: %s" % str(geckodriver_path)
+ )
+ cmd.append("--webdriver-binary=%s" % geckodriver_path)
+ cmd.append("--webdriver-arg=-vv") # enable trace logs
+
+ test_type_suite = {
+ "testharness": "web-platform-tests",
+ "crashtest": "web-platform-tests-crashtest",
+ "print-reftest": "web-platform-tests-print-reftest",
+ "reftest": "web-platform-tests-reftest",
+ "wdspec": "web-platform-tests-wdspec",
+ }
+ for test_type in test_types:
+ try_options, try_tests = self.try_args(test_type_suite[test_type])
+
+ cmd.extend(
+ self.query_options(
+ options, try_options, str_format_values=str_format_values
+ )
+ )
+ cmd.extend(
+ self.query_tests_args(try_tests, str_format_values=str_format_values)
+ )
+ if "include" in c and c["include"]:
+ cmd.append("--include=%s" % c["include"])
+
+ cmd.extend(test_paths)
+
+ return cmd
+
+ def download_and_extract(self):
+ super(WebPlatformTest, self).download_and_extract(
+ extract_dirs=[
+ "mach",
+ "bin/*",
+ "config/*",
+ "extensions/*",
+ "mozbase/*",
+ "marionette/*",
+ "tools/*",
+ "web-platform/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ],
+ suite_categories=["web-platform"],
+ )
+ dirs = self.query_abs_dirs()
+ if self.is_android:
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+ # Make sure that the logging directory exists
+ if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
+ self.fatal("Could not create blobber upload directory")
+ # Exit
+
+ def download_and_process_manifest(self):
+ """Downloads the tests-by-manifest JSON mapping generated by the decision task.
+
+ web-platform-tests are chunked in the decision task as of Bug 1608837
+ and this means tests are resolved by the TestResolver as part of this process.
+
+ The manifest file contains tests keyed by the groups generated in
+ TestResolver.get_wpt_group().
+
+ Upon successful call, a JSON file containing only the web-platform test
+ groups are saved in the fetch directory.
+
+ Bug:
+ 1634554
+ """
+ dirs = self.query_abs_dirs()
+ url = os.environ.get("TESTS_BY_MANIFEST_URL", "")
+ if not url:
+ self.fatal("TESTS_BY_MANIFEST_URL not defined.")
+
+ artifact_name = url.split("/")[-1]
+
+ # Save file to the MOZ_FETCHES dir.
+ self.download_file(
+ url, file_name=artifact_name, parent_dir=dirs["abs_fetches_dir"]
+ )
+
+ with gzip.open(os.path.join(dirs["abs_fetches_dir"], artifact_name), "r") as f:
+ tests_by_manifest = json.loads(f.read())
+
+ # We need to filter out non-web-platform-tests without knowing what the
+ # groups are. Fortunately, all web-platform test 'manifests' begin with a
+ # forward slash.
+ test_groups = {
+ key: tests_by_manifest[key]
+ for key in tests_by_manifest.keys()
+ if key.startswith("/")
+ }
+
+ outfile = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")
+ with open(outfile, "w+") as f:
+ json.dump(test_groups, f, indent=2, sort_keys=True)
+
+ def install(self):
+ if self.is_android:
+ self.install_apk(self.installer_path)
+ else:
+ super(WebPlatformTest, self).install()
+
+ def _install_fonts(self):
+ if self.is_android:
+ return
+ # Ensure the Ahem font is available
+ dirs = self.query_abs_dirs()
+
+ if not sys.platform.startswith("darwin"):
+ font_path = os.path.join(os.path.dirname(self.binary_path), "fonts")
+ else:
+ font_path = os.path.join(
+ os.path.dirname(self.binary_path),
+ os.pardir,
+ "Resources",
+ "res",
+ "fonts",
+ )
+ if not os.path.exists(font_path):
+ os.makedirs(font_path)
+ ahem_src = os.path.join(dirs["abs_wpttest_dir"], "tests", "fonts", "Ahem.ttf")
+ ahem_dest = os.path.join(font_path, "Ahem.ttf")
+ with open(ahem_src, "rb") as src, open(ahem_dest, "wb") as dest:
+ dest.write(src.read())
+
+ def run_tests(self):
+ dirs = self.query_abs_dirs()
+
+ parser = StructuredOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ log_compact=True,
+ error_list=BaseErrorList + WptHarnessErrorList,
+ allow_crashes=True,
+ )
+
+ env = {"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]}
+ env["RUST_BACKTRACE"] = "full"
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+ if self.config["headless"]:
+ env["MOZ_HEADLESS"] = "1"
+ env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
+ env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
+
+ env["STYLO_THREADS"] = "4"
+
+ if self.is_android:
+ env["ADB_PATH"] = self.adb_path
+
+ env = self.query_env(partial_env=env, log_level=INFO)
+
+ start_time = datetime.now()
+ max_per_test_time = timedelta(minutes=60)
+ max_per_test_tests = 10
+ if self.per_test_coverage:
+ max_per_test_tests = 30
+ executed_tests = 0
+ executed_too_many_tests = False
+
+ if self.per_test_coverage or self.verify_enabled:
+ suites = self.query_per_test_category_suites(None, None)
+ if "wdspec" in suites:
+ # geckodriver is required for wdspec, but not always available
+ geckodriver_path = self._query_geckodriver()
+ if not geckodriver_path or not os.path.isfile(geckodriver_path):
+ suites.remove("wdspec")
+ self.info("Skipping 'wdspec' tests - no geckodriver")
+ else:
+ test_types = self.config.get("test_type", [])
+ suites = [None]
+ for suite in suites:
+ if executed_too_many_tests and not self.per_test_coverage:
+ continue
+
+ if suite:
+ test_types = [suite]
+
+ summary = {}
+ for per_test_args in self.query_args(suite):
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = (
+ "baselinecoverage" in per_test_args[-1]
+ if self.per_test_coverage
+ else False
+ )
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>"
+ )
+ return
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info(
+ "TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>"
+ )
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
+
+ cmd = self._query_cmd(test_types)
+ cmd.extend(per_test_args)
+
+ final_env = copy.copy(env)
+
+ if self.per_test_coverage:
+ self.set_coverage_env(final_env, is_baseline_test)
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=final_env,
+ )
+
+ if self.per_test_coverage:
+ self.add_per_test_coverage_report(
+ final_env, suite, per_test_args[-1]
+ )
+
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code, previous_summary=summary
+ )
+ self.record_status(tbpl_status, level=log_level)
+
+ if len(per_test_args) > 0:
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+
+
+# main {{{1
+if __name__ == "__main__":
+ web_platform_tests = WebPlatformTest()
+ web_platform_tests.run_and_exit()