summaryrefslogtreecommitdiffstats
path: root/testing/mozharness
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /testing/mozharness
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--testing/mozharness/LICENSE373
-rw-r--r--testing/mozharness/README.txt32
-rw-r--r--testing/mozharness/configs/android/android-x86_64-profile-generation.py50
-rw-r--r--testing/mozharness/configs/android/android-x86_64.py47
-rw-r--r--testing/mozharness/configs/android/android_common.py349
-rw-r--r--testing/mozharness/configs/android/android_hw.py28
-rw-r--r--testing/mozharness/configs/android/android_pgo.py20
-rw-r--r--testing/mozharness/configs/android/wrench.py41
-rw-r--r--testing/mozharness/configs/awsy/linux_config.py30
-rw-r--r--testing/mozharness/configs/awsy/macosx_config.py28
-rw-r--r--testing/mozharness/configs/awsy/taskcluster_windows_config.py30
-rw-r--r--testing/mozharness/configs/balrog/production.py35
-rw-r--r--testing/mozharness/configs/balrog/staging.py18
-rw-r--r--testing/mozharness/configs/builds/build_pool_specifics.py14
-rw-r--r--testing/mozharness/configs/builds/releng_base_android_64_builds.py58
-rw-r--r--testing/mozharness/configs/builds/releng_base_firefox.py7
-rw-r--r--testing/mozharness/configs/builds/releng_base_linux_32_builds.py60
-rw-r--r--testing/mozharness/configs/builds/releng_base_linux_64_builds.py59
-rw-r--r--testing/mozharness/configs/builds/releng_base_mac_64_cross_builds.py55
-rw-r--r--testing/mozharness/configs/builds/releng_base_windows_32_mingw_builds.py53
-rw-r--r--testing/mozharness/configs/builds/releng_base_windows_64_mingw_builds.py54
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug_lite.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_lite.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_ccov.py15
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_lite.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_searchfox.py12
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_gradle_dependencies.py16
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_lite.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1.py10
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1_lite.py10
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_profile_generate_lite.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_asan_tc.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_geckoview_docs.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_isolated_process.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_lite.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_lite.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_profile_generate.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug_lite.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_lite.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_profile_generate.py8
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/32_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_asan_tc.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests.py28
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests_debug.py28
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_add-on-devel.py24
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan.py30
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_and_debug.py31
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_reporter_tc.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc_and_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_debug.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_opt.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_asan_tc.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_debug.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_tsan_tc.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_noopt_debug.py24
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_searchfox_and_debug.py40
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_source.py15
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_stat_and_debug.py37
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_tsan_tc.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py30
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_add-on-devel.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_opt.py25
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug_searchfox.py33
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_asan.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_debug.py28
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_noopt_debug.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_debug.py32
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/64_stat_and_debug.py33
-rw-r--r--testing/mozharness/configs/builds/releng_sub_mac_configs/rusttests.py12
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/32_add-on-devel.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/32_debug.py28
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/32_mingwclang.py9
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/32_stat_and_debug.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/64_add-on-devel.py26
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/64_debug.py27
-rw-r--r--testing/mozharness/configs/builds/releng_sub_windows_configs/64_mingwclang.py9
-rw-r--r--testing/mozharness/configs/builds/taskcluster_base_macosx.py44
-rw-r--r--testing/mozharness/configs/builds/taskcluster_base_win32.py8
-rw-r--r--testing/mozharness/configs/builds/taskcluster_base_win64.py8
-rw-r--r--testing/mozharness/configs/builds/taskcluster_base_windows.py46
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win32/debug.py11
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win32/noopt_debug.py11
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/asan_debug.py11
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/asan_reporter_opt.py8
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/ccov_opt.py10
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/debug.py11
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/noopt_debug.py11
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/plain_opt.py12
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/rusttests_opt.py15
-rw-r--r--testing/mozharness/configs/builds/taskcluster_sub_win64/searchfox_debug.py15
-rw-r--r--testing/mozharness/configs/developer_config.py44
-rw-r--r--testing/mozharness/configs/firefox_ui_tests/qa_jenkins.py13
-rw-r--r--testing/mozharness/configs/firefox_ui_tests/releng_release.py29
-rw-r--r--testing/mozharness/configs/firefox_ui_tests/taskcluster.py11
-rw-r--r--testing/mozharness/configs/firefox_ui_tests/taskcluster_mac.py8
-rw-r--r--testing/mozharness/configs/firefox_ui_tests/taskcluster_windows.py17
-rw-r--r--testing/mozharness/configs/l10n_bumper/jamun.py83
-rw-r--r--testing/mozharness/configs/l10n_bumper/mozilla-beta.py87
-rw-r--r--testing/mozharness/configs/l10n_bumper/mozilla-central.py88
-rw-r--r--testing/mozharness/configs/l10n_bumper/mozilla-esr68.py46
-rw-r--r--testing/mozharness/configs/marionette/mac_taskcluster_config.py39
-rw-r--r--testing/mozharness/configs/marionette/prod_config.py68
-rw-r--r--testing/mozharness/configs/marionette/test_config.py29
-rw-r--r--testing/mozharness/configs/marionette/windows_config.py38
-rw-r--r--testing/mozharness/configs/marionette/windows_taskcluster_config.py148
-rw-r--r--testing/mozharness/configs/multi_locale/android-mozharness-build.json11
-rw-r--r--testing/mozharness/configs/openh264/linux32.py41
-rw-r--r--testing/mozharness/configs/openh264/linux64.py41
-rw-r--r--testing/mozharness/configs/openh264/macosx64-aarch64.py41
-rw-r--r--testing/mozharness/configs/openh264/macosx64.py45
-rw-r--r--testing/mozharness/configs/openh264/tooltool-manifests/win.manifest10
-rw-r--r--testing/mozharness/configs/openh264/tooltool-manifests/win64-aarch64.manifest11
-rw-r--r--testing/mozharness/configs/openh264/win32.py52
-rw-r--r--testing/mozharness/configs/openh264/win64-aarch64.py53
-rw-r--r--testing/mozharness/configs/openh264/win64.py51
-rw-r--r--testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop.py19
-rw-r--r--testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop_EME-free.py19
-rw-r--r--testing/mozharness/configs/partner_repacks/staging_release_mozilla-release_desktop.py19
-rw-r--r--testing/mozharness/configs/raptor/android_hw_config.py27
-rw-r--r--testing/mozharness/configs/raptor/linux64_config_taskcluster.py32
-rw-r--r--testing/mozharness/configs/raptor/linux_config.py24
-rw-r--r--testing/mozharness/configs/raptor/mac_config.py27
-rw-r--r--testing/mozharness/configs/raptor/windows_config.py84
-rw-r--r--testing/mozharness/configs/raptor/windows_vm_config.py53
-rw-r--r--testing/mozharness/configs/releases/bouncer_firefox_beta.py152
-rw-r--r--testing/mozharness/configs/releases/bouncer_firefox_devedition.py124
-rw-r--r--testing/mozharness/configs/releases/bouncer_firefox_esr.py156
-rw-r--r--testing/mozharness/configs/releases/bouncer_firefox_nightly.py80
-rw-r--r--testing/mozharness/configs/releases/bouncer_firefox_release.py152
-rw-r--r--testing/mozharness/configs/releases/dev_postrelease_fennec_beta.py24
-rw-r--r--testing/mozharness/configs/releases/dev_postrelease_fennec_release.py26
-rw-r--r--testing/mozharness/configs/releases/dev_postrelease_firefox_beta.py24
-rw-r--r--testing/mozharness/configs/releases/dev_postrelease_firefox_release.py26
-rw-r--r--testing/mozharness/configs/releases/dev_postrelease_firefox_release_birch.py26
-rw-r--r--testing/mozharness/configs/releases/dev_updates_firefox_beta.py42
-rw-r--r--testing/mozharness/configs/releases/dev_updates_firefox_devedition.py45
-rw-r--r--testing/mozharness/configs/releases/dev_updates_firefox_release.py55
-rw-r--r--testing/mozharness/configs/releases/dev_updates_firefox_release_birch.py55
-rw-r--r--testing/mozharness/configs/releases/updates_firefox_beta.py38
-rw-r--r--testing/mozharness/configs/releases/updates_firefox_devedition.py42
-rw-r--r--testing/mozharness/configs/releases/updates_firefox_release.py52
-rw-r--r--testing/mozharness/configs/remove_executables.py7
-rw-r--r--testing/mozharness/configs/repackage/base.py13
-rw-r--r--testing/mozharness/configs/repackage/linux32_signed.py14
-rw-r--r--testing/mozharness/configs/repackage/linux64_signed.py14
-rw-r--r--testing/mozharness/configs/repackage/osx_partner.py12
-rw-r--r--testing/mozharness/configs/repackage/osx_signed.py13
-rw-r--r--testing/mozharness/configs/repackage/win32_partner.py15
-rw-r--r--testing/mozharness/configs/repackage/win32_sfx_stub.py7
-rw-r--r--testing/mozharness/configs/repackage/win32_signed.py15
-rw-r--r--testing/mozharness/configs/repackage/win64-aarch64_sfx_stub.py7
-rw-r--r--testing/mozharness/configs/repackage/win64_partner.py15
-rw-r--r--testing/mozharness/configs/repackage/win64_signed.py15
-rw-r--r--testing/mozharness/configs/servo/mac.py7
-rw-r--r--testing/mozharness/configs/single_locale/devedition.py9
-rw-r--r--testing/mozharness/configs/single_locale/firefox.py9
-rw-r--r--testing/mozharness/configs/single_locale/linux32.py11
-rw-r--r--testing/mozharness/configs/single_locale/linux64.py10
-rw-r--r--testing/mozharness/configs/single_locale/macosx64.py12
-rw-r--r--testing/mozharness/configs/single_locale/tc_android-arm.py35
-rw-r--r--testing/mozharness/configs/single_locale/tc_common.py11
-rw-r--r--testing/mozharness/configs/single_locale/tc_linux32.py16
-rw-r--r--testing/mozharness/configs/single_locale/tc_linux_common.py16
-rw-r--r--testing/mozharness/configs/single_locale/tc_macosx64.py16
-rw-r--r--testing/mozharness/configs/single_locale/tc_win32.py17
-rw-r--r--testing/mozharness/configs/single_locale/tc_win64.py17
-rw-r--r--testing/mozharness/configs/single_locale/win32.py10
-rw-r--r--testing/mozharness/configs/single_locale/win64-aarch64.py9
-rw-r--r--testing/mozharness/configs/single_locale/win64.py9
-rw-r--r--testing/mozharness/configs/talos/linux64_config_taskcluster.py32
-rw-r--r--testing/mozharness/configs/talos/linux_config.py23
-rw-r--r--testing/mozharness/configs/talos/mac_config.py26
-rw-r--r--testing/mozharness/configs/talos/windows_config.py62
-rw-r--r--testing/mozharness/configs/talos/windows_taskcluster_config.py29
-rw-r--r--testing/mozharness/configs/talos/windows_vm_config.py31
-rw-r--r--testing/mozharness/configs/taskcluster_nightly.py8
-rw-r--r--testing/mozharness/configs/test/example_config1.json5
-rw-r--r--testing/mozharness/configs/test/example_config2.py5
-rw-r--r--testing/mozharness/configs/test/test.illegal_suffix20
-rw-r--r--testing/mozharness/configs/test/test.json20
-rw-r--r--testing/mozharness/configs/test/test.py13
-rw-r--r--testing/mozharness/configs/test/test_malformed.json20
-rw-r--r--testing/mozharness/configs/test/test_malformed.py22
-rw-r--r--testing/mozharness/configs/test/test_optional.py4
-rw-r--r--testing/mozharness/configs/test/test_override.py7
-rw-r--r--testing/mozharness/configs/test/test_override2.py6
-rw-r--r--testing/mozharness/configs/unittests/linux_unittest.py292
-rw-r--r--testing/mozharness/configs/unittests/mac_unittest.py231
-rw-r--r--testing/mozharness/configs/unittests/win_unittest.py341
-rw-r--r--testing/mozharness/configs/web_platform_tests/prod_config.py50
-rw-r--r--testing/mozharness/configs/web_platform_tests/prod_config_android.py25
-rw-r--r--testing/mozharness/configs/web_platform_tests/prod_config_mac.py50
-rw-r--r--testing/mozharness/configs/web_platform_tests/prod_config_windows.py58
-rw-r--r--testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py134
-rw-r--r--testing/mozharness/configs/web_platform_tests/test_config.py24
-rw-r--r--testing/mozharness/configs/web_platform_tests/test_config_windows.py30
-rw-r--r--testing/mozharness/docs/Makefile177
-rw-r--r--testing/mozharness/docs/android_emulator_build.rst7
-rw-r--r--testing/mozharness/docs/android_emulator_unittest.rst7
-rw-r--r--testing/mozharness/docs/bouncer_submitter.rst8
-rw-r--r--testing/mozharness/docs/conf.py268
-rw-r--r--testing/mozharness/docs/configtest.rst7
-rw-r--r--testing/mozharness/docs/desktop_l10n.rst7
-rw-r--r--testing/mozharness/docs/desktop_unittest.rst7
-rw-r--r--testing/mozharness/docs/fx_desktop_build.rst7
-rw-r--r--testing/mozharness/docs/index.rst23
-rw-r--r--testing/mozharness/docs/marionette.rst7
-rw-r--r--testing/mozharness/docs/mobile_partner_repack.rst7
-rw-r--r--testing/mozharness/docs/modules.rst13
-rw-r--r--testing/mozharness/docs/mozharness.base.rst85
-rw-r--r--testing/mozharness/docs/mozharness.base.vcs.rst37
-rw-r--r--testing/mozharness/docs/mozharness.mozilla.building.rst22
-rw-r--r--testing/mozharness/docs/mozharness.mozilla.l10n.rst30
-rw-r--r--testing/mozharness/docs/mozharness.mozilla.rst63
-rw-r--r--testing/mozharness/docs/mozharness.mozilla.testing.rst46
-rw-r--r--testing/mozharness/docs/mozharness.rst18
-rw-r--r--testing/mozharness/docs/multil10n.rst7
-rw-r--r--testing/mozharness/docs/scripts.rst16
-rw-r--r--testing/mozharness/docs/talos_script.rst7
-rw-r--r--testing/mozharness/docs/web_platform_tests.rst7
-rwxr-xr-xtesting/mozharness/examples/action_config_script.py158
-rwxr-xr-xtesting/mozharness/examples/silent_script.py26
-rwxr-xr-xtesting/mozharness/examples/venv.py51
-rwxr-xr-xtesting/mozharness/examples/verbose_script.py70
-rw-r--r--testing/mozharness/external_tools/__init__.py0
-rwxr-xr-xtesting/mozharness/external_tools/gittool.py139
-rw-r--r--testing/mozharness/external_tools/machine-configuration.json32
-rwxr-xr-xtesting/mozharness/external_tools/mouse_and_screen_resolution.py191
-rw-r--r--testing/mozharness/external_tools/packagesymbols.py81
-rw-r--r--testing/mozharness/external_tools/performance-artifact-schema.json233
-rw-r--r--testing/mozharness/external_tools/robustcheckout.py832
-rwxr-xr-xtesting/mozharness/external_tools/tooltool.py1679
-rw-r--r--testing/mozharness/mach_commands.py226
-rw-r--r--testing/mozharness/moz.build8
-rw-r--r--testing/mozharness/mozharness/__init__.py6
-rw-r--r--testing/mozharness/mozharness/base/__init__.py0
-rw-r--r--testing/mozharness/mozharness/base/config.py693
-rw-r--r--testing/mozharness/mozharness/base/diskutils.py169
-rwxr-xr-xtesting/mozharness/mozharness/base/errors.py164
-rwxr-xr-xtesting/mozharness/mozharness/base/log.py783
-rwxr-xr-xtesting/mozharness/mozharness/base/parallel.py35
-rw-r--r--testing/mozharness/mozharness/base/python.py1186
-rw-r--r--testing/mozharness/mozharness/base/script.py2513
-rwxr-xr-xtesting/mozharness/mozharness/base/transfer.py41
-rw-r--r--testing/mozharness/mozharness/base/vcs/__init__.py0
-rw-r--r--testing/mozharness/mozharness/base/vcs/gittool.py107
-rwxr-xr-xtesting/mozharness/mozharness/base/vcs/mercurial.py478
-rwxr-xr-xtesting/mozharness/mozharness/base/vcs/vcsbase.py149
-rw-r--r--testing/mozharness/mozharness/lib/__init__.py0
-rw-r--r--testing/mozharness/mozharness/lib/python/__init__.py0
-rw-r--r--testing/mozharness/mozharness/lib/python/authentication.py60
-rw-r--r--testing/mozharness/mozharness/mozilla/__init__.py0
-rw-r--r--testing/mozharness/mozharness/mozilla/automation.py82
-rw-r--r--testing/mozharness/mozharness/mozilla/bouncer/__init__.py0
-rw-r--r--testing/mozharness/mozharness/mozilla/bouncer/submitter.py134
-rw-r--r--testing/mozharness/mozharness/mozilla/building/__init__.py0
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/building/buildbase.py1522
-rw-r--r--testing/mozharness/mozharness/mozilla/checksums.py41
-rw-r--r--testing/mozharness/mozharness/mozilla/firefox/__init__.py0
-rw-r--r--testing/mozharness/mozharness/mozilla/firefox/autoconfig.py72
-rw-r--r--testing/mozharness/mozharness/mozilla/l10n/__init__.py0
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/l10n/locales.py174
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/l10n/multi_locale_build.py122
-rw-r--r--testing/mozharness/mozharness/mozilla/merkle.py190
-rw-r--r--testing/mozharness/mozharness/mozilla/mozbase.py32
-rw-r--r--testing/mozharness/mozharness/mozilla/repo_manipulation.py222
-rw-r--r--testing/mozharness/mozharness/mozilla/secrets.py82
-rw-r--r--testing/mozharness/mozharness/mozilla/structuredlog.py309
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/__init__.py0
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/android.py723
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/codecoverage.py663
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/errors.py177
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/per_test_base.py541
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/raptor.py1415
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/testing/talos.py896
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/testing/testbase.py772
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/try_tools.py245
-rwxr-xr-xtesting/mozharness/mozharness/mozilla/testing/unittest.py255
-rw-r--r--testing/mozharness/mozharness/mozilla/testing/verify_tools.py69
-rw-r--r--testing/mozharness/mozharness/mozilla/tooltool.py86
-rw-r--r--testing/mozharness/mozharness/mozilla/vcstools.py60
-rw-r--r--testing/mozharness/requirements.txt26
-rw-r--r--testing/mozharness/scripts/android_emulator_pgo.py331
-rw-r--r--testing/mozharness/scripts/android_emulator_unittest.py550
-rw-r--r--testing/mozharness/scripts/android_hardware_unittest.py477
-rw-r--r--testing/mozharness/scripts/android_wrench.py283
-rw-r--r--testing/mozharness/scripts/awsy_script.py322
-rwxr-xr-xtesting/mozharness/scripts/configtest.py160
-rwxr-xr-xtesting/mozharness/scripts/desktop_l10n.py481
-rwxr-xr-xtesting/mozharness/scripts/desktop_partner_repacks.py213
-rwxr-xr-xtesting/mozharness/scripts/desktop_unittest.py1316
-rwxr-xr-xtesting/mozharness/scripts/does_it_crash.py146
-rw-r--r--testing/mozharness/scripts/firefox_ui_tests.py299
-rwxr-xr-xtesting/mozharness/scripts/fx_desktop_build.py101
-rwxr-xr-xtesting/mozharness/scripts/l10n_bumper.py380
-rwxr-xr-xtesting/mozharness/scripts/marionette.py455
-rwxr-xr-xtesting/mozharness/scripts/multil10n.py21
-rwxr-xr-xtesting/mozharness/scripts/openh264_build.py470
-rw-r--r--testing/mozharness/scripts/raptor_script.py20
-rw-r--r--testing/mozharness/scripts/release/bouncer_check.py202
-rw-r--r--testing/mozharness/scripts/release/generate-checksums.py263
-rw-r--r--testing/mozharness/scripts/release/update-verify-config-creator.py642
-rw-r--r--testing/mozharness/scripts/repackage.py175
-rwxr-xr-xtesting/mozharness/scripts/talos_script.py21
-rwxr-xr-xtesting/mozharness/scripts/telemetry/telemetry_client.py277
-rwxr-xr-xtesting/mozharness/scripts/web_platform_tests.py656
-rw-r--r--testing/mozharness/setup.cfg2
-rw-r--r--testing/mozharness/setup.py44
-rw-r--r--testing/mozharness/test/README2
-rw-r--r--testing/mozharness/test/helper_files/.noserc2
-rw-r--r--testing/mozharness/test/helper_files/archives/archive.tarbin0 -> 10240 bytes
-rw-r--r--testing/mozharness/test/helper_files/archives/archive.tar.bz2bin0 -> 256 bytes
-rw-r--r--testing/mozharness/test/helper_files/archives/archive.tar.gzbin0 -> 260 bytes
-rw-r--r--testing/mozharness/test/helper_files/archives/archive.zipbin0 -> 517 bytes
-rw-r--r--testing/mozharness/test/helper_files/archives/archive_invalid_filename.zipbin0 -> 166 bytes
-rwxr-xr-xtesting/mozharness/test/helper_files/archives/reference/bin/script.sh3
-rw-r--r--testing/mozharness/test/helper_files/archives/reference/lorem.txt1
-rwxr-xr-xtesting/mozharness/test/helper_files/create_archives.sh11
-rwxr-xr-xtesting/mozharness/test/helper_files/init_hgrepo.sh23
-rw-r--r--testing/mozharness/test/helper_files/locales.json18
-rw-r--r--testing/mozharness/test/helper_files/locales.txt4
-rw-r--r--testing/mozharness/test/helper_files/mozconfig_manifest.json3
-rw-r--r--testing/mozharness/test/hgrc9
-rw-r--r--testing/mozharness/test/pip-freeze.example.txt19
-rw-r--r--testing/mozharness/test/test_base_config.py376
-rw-r--r--testing/mozharness/test/test_base_diskutils.py89
-rw-r--r--testing/mozharness/test/test_base_log.py43
-rw-r--r--testing/mozharness/test/test_base_parallel.py28
-rw-r--r--testing/mozharness/test/test_base_python.py39
-rw-r--r--testing/mozharness/test/test_base_script.py960
-rw-r--r--testing/mozharness/test/test_base_vcs_mercurial.py396
-rw-r--r--testing/mozharness/test/test_l10n_locales.py118
-rw-r--r--testing/mozharness/test/test_mozilla_automation.py45
-rw-r--r--testing/mozharness/test/test_mozilla_building_buildbase.py146
-rw-r--r--testing/mozharness/test/test_mozilla_merkle.py134
-rw-r--r--testing/mozharness/test/test_mozilla_structured.py68
-rw-r--r--testing/mozharness/tox.ini26
-rwxr-xr-xtesting/mozharness/unit.sh85
362 files changed, 38442 insertions, 0 deletions
diff --git a/testing/mozharness/LICENSE b/testing/mozharness/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/testing/mozharness/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/testing/mozharness/README.txt b/testing/mozharness/README.txt
new file mode 100644
index 0000000000..d2a2ce60a4
--- /dev/null
+++ b/testing/mozharness/README.txt
@@ -0,0 +1,32 @@
+# Mozharness
+
+## Docs
+* https://developer.mozilla.org/en-US/docs/Mozharness_FAQ
+* https://wiki.mozilla.org/ReleaseEngineering/Mozharness
+* http://moz-releng-mozharness.readthedocs.org/en/latest/mozharness.mozilla.html
+* http://moz-releng-docs.readthedocs.org/en/latest/software.html#mozharness
+
+## Submitting changes
+Like any Gecko change, please create a patch or submit to Mozreview and
+open a Bugzilla ticket under the Mozharness component:
+https://bugzilla.mozilla.org/enter_bug.cgi?product=Release%20Engineering&component=Mozharness
+
+This bug will get triaged by Release Engineering
+
+## Run unit tests
+To run the unit tests of mozharness the `tox` package needs to be installed:
+
+```
+pip install tox
+```
+
+There are various ways to run the unit tests. Just make sure you are within the `$gecko_repo/testing/mozharness` directory before running one of the commands below:
+
+```
+tox # run all unit tests
+tox -- -x # run all unit tests but stop after first failure
+tox -- test/test_base_log.py # only run the base log unit test
+```
+
+Happy contributing! =)
+
diff --git a/testing/mozharness/configs/android/android-x86_64-profile-generation.py b/testing/mozharness/configs/android/android-x86_64-profile-generation.py
new file mode 100644
index 0000000000..72a328051c
--- /dev/null
+++ b/testing/mozharness/configs/android/android-x86_64-profile-generation.py
@@ -0,0 +1,50 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# mozharness configuration for Android x86/x86_64 7.0 unit tests
+#
+# This configuration should be combined with suite definitions and other
+# mozharness configuration from android_common.py, or similar.
+
+config = {
+ "emulator_avd_name": "mozemulator-x86_64",
+ "emulator_process_name": "qemu-system-x86_64",
+ "emulator_extra_args": [
+ "-gpu",
+ "on",
+ "-skip-adb-auth",
+ "-verbose",
+ "-show-kernel",
+ "-ranchu",
+ "-selinux",
+ "permissive",
+ "-memory",
+ "3072",
+ "-cores",
+ "4",
+ "-skin",
+ "800x1280",
+ # Build machines cannot use KVM (because it requires a privileged docker
+ # container) so we run the profile generation with -no-accel which disables
+ # hardware acceleration.
+ "-no-accel",
+ "-no-snapstorage",
+ "-no-snapshot",
+ # Disables first-run dialogs
+ "-prop",
+ "ro.test_harness=true",
+ ],
+ "exes": {
+ "adb": "%(abs_sdk_dir)s/platform-tools/adb",
+ },
+ "env": {
+ "DISPLAY": ":0.0",
+ "PATH": "%(PATH)s:%(abs_sdk_dir)s/emulator:%(abs_sdk_dir)s/tools:%(abs_sdk_dir)s/tools/bin:%(abs_sdk_dir)s/platform-tools",
+ # "LIBGL_DEBUG": "verbose"
+ },
+ "bogomips_minimum": 3000,
+ "android_version": 24,
+ "is_fennec": False,
+ "is_emulator": True,
+}
diff --git a/testing/mozharness/configs/android/android-x86_64.py b/testing/mozharness/configs/android/android-x86_64.py
new file mode 100644
index 0000000000..676c93ccfc
--- /dev/null
+++ b/testing/mozharness/configs/android/android-x86_64.py
@@ -0,0 +1,47 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# mozharness configuration for Android x86/x86_64 7.0 unit tests
+#
+# This configuration should be combined with suite definitions and other
+# mozharness configuration from android_common.py, or similar.
+
+config = {
+ "emulator_avd_name": "mozemulator-x86_64",
+ "emulator_process_name": "qemu-system-x86_64",
+ "emulator_extra_args": [
+ "-gpu",
+ "on",
+ "-skip-adb-auth",
+ "-verbose",
+ "-show-kernel",
+ "-ranchu",
+ "-selinux",
+ "permissive",
+ "-memory",
+ "3072",
+ "-cores",
+ "4",
+ "-skin",
+ "800x1280",
+ "-no-snapstorage",
+ "-no-snapshot",
+ # Skips first-run dialogs
+ "-prop",
+ "ro.test_harness=true",
+ ],
+ "exes": {
+ "adb": "%(abs_sdk_dir)s/platform-tools/adb",
+ },
+ "env": {
+ "DISPLAY": ":0.0",
+ "PATH": "%(PATH)s:%(abs_sdk_dir)s/emulator:%(abs_sdk_dir)s/tools:%(abs_sdk_dir)s/platform-tools",
+ # "LIBGL_DEBUG": "verbose"
+ },
+ "bogomips_minimum": 3000,
+ # in support of test-verify
+ "android_version": 24,
+ "is_fennec": False,
+ "is_emulator": True,
+}
diff --git a/testing/mozharness/configs/android/android_common.py b/testing/mozharness/configs/android/android_common.py
new file mode 100644
index 0000000000..91711e08ed
--- /dev/null
+++ b/testing/mozharness/configs/android/android_common.py
@@ -0,0 +1,349 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Shared/common mozharness configuration for Android unit tests.
+#
+# This configuration should be combined with platform-specific mozharness
+# configuration such as android-x86_64.py, android_hw, or similar.
+
+import os
+
+NODEJS_PATH = None
+if "MOZ_FETCHES_DIR" in os.environ:
+ NODEJS_PATH = os.path.join(os.environ["MOZ_FETCHES_DIR"], "node/bin/node")
+
+
+def WebglSuite(name):
+ return {
+ "run_filename": "runtestsremote.py",
+ "testsdir": "mochitest",
+ "options": [
+ "--app=%(app)s",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--certificate-path=%(certs_path)s",
+ "--symbols-path=%(symbols_path)s",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--screenshot-on-fail",
+ "--subsuite=" + name,
+ "--deviceSerial=%(device_serial)s",
+ ],
+ }
+
+
+config = {
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.environ.get("TOOLTOOL_CACHE"),
+ "hostutils_manifest_path": "testing/config/tooltool-manifests/linux64/hostutils.manifest",
+ # "log_format": "%(levelname)8s - %(message)s",
+ "log_tbpl_level": "info",
+ "log_raw_level": "info",
+ # To take device screenshots at timed intervals (each time in seconds, relative
+ # to the start of the run-tests step) specify screenshot_times. For example, to
+ # take 4 screenshots at one minute intervals you could specify:
+ # "screenshot_times": [60, 120, 180, 240],
+ "nodejs_path": NODEJS_PATH,
+ "suite_definitions": {
+ "mochitest-plain": {
+ "run_filename": "runtestsremote.py",
+ "testsdir": "mochitest",
+ "options": [
+ "--app=%(app)s",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--certificate-path=%(certs_path)s",
+ "--symbols-path=%(symbols_path)s",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--extra-profile-file=fonts",
+ "--extra-profile-file=hyphenation",
+ "--screenshot-on-fail",
+ "--deviceSerial=%(device_serial)s",
+ ],
+ },
+ "mochitest-webgl1-core": WebglSuite("webgl1-core"),
+ "mochitest-webgl2-core": WebglSuite("webgl2-core"),
+ "mochitest-webgl1-ext": WebglSuite("webgl1-ext"),
+ "mochitest-webgl2-ext": WebglSuite("webgl2-ext"),
+ "mochitest-webgl2-deqp": WebglSuite("webgl2-deqp"),
+ "mochitest-webgpu": WebglSuite("webgpu"),
+ "mochitest-plain-gpu": {
+ "run_filename": "runtestsremote.py",
+ "testsdir": "mochitest",
+ "options": [
+ "--app=%(app)s",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--certificate-path=%(certs_path)s",
+ "--symbols-path=%(symbols_path)s",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--screenshot-on-fail",
+ "--subsuite=gpu",
+ "--deviceSerial=%(device_serial)s",
+ ],
+ },
+ "mochitest-media": {
+ "run_filename": "runtestsremote.py",
+ "testsdir": "mochitest",
+ "options": [
+ "--app=%(app)s",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--certificate-path=%(certs_path)s",
+ "--symbols-path=%(symbols_path)s",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--screenshot-on-fail",
+ "--chunk-by-runtime",
+ "--subsuite=media",
+ "--deviceSerial=%(device_serial)s",
+ ],
+ },
+ "reftest": {
+ "run_filename": "remotereftest.py",
+ "testsdir": "reftest",
+ "options": [
+ "--app=%(app)s",
+ "--ignore-window-size",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--httpd-path",
+ "%(modules_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--extra-profile-file=fonts",
+ "--extra-profile-file=hyphenation",
+ "--suite=reftest",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--deviceSerial=%(device_serial)s",
+ "--topsrcdir=tests",
+ ],
+ "tests": [
+ "tests/layout/reftests/reftest.list",
+ ],
+ },
+ "reftest-qr": {
+ "run_filename": "remotereftest.py",
+ "testsdir": "reftest",
+ "options": [
+ "--app=%(app)s",
+ "--ignore-window-size",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--httpd-path",
+ "%(modules_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--extra-profile-file=fonts",
+ "--extra-profile-file=hyphenation",
+ "--suite=reftest",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--deviceSerial=%(device_serial)s",
+ "--topsrcdir=tests",
+ ],
+ "tests": [
+ "tests/layout/reftests/reftest-qr.list",
+ ],
+ },
+ "crashtest": {
+ "run_filename": "remotereftest.py",
+ "testsdir": "reftest",
+ "options": [
+ "--app=%(app)s",
+ "--ignore-window-size",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--httpd-path",
+ "%(modules_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--suite=crashtest",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--deviceSerial=%(device_serial)s",
+ "--topsrcdir=tests",
+ ],
+ "tests": [
+ "tests/testing/crashtest/crashtests.list",
+ ],
+ },
+ "crashtest-qr": {
+ "run_filename": "remotereftest.py",
+ "testsdir": "reftest",
+ "options": [
+ "--app=%(app)s",
+ "--ignore-window-size",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--httpd-path",
+ "%(modules_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--suite=crashtest",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--deviceSerial=%(device_serial)s",
+ "--topsrcdir=tests",
+ ],
+ "tests": [
+ "tests/testing/crashtest/crashtests-qr.list",
+ ],
+ },
+ "jittest": {
+ "run_filename": "jit_test.py",
+ "testsdir": "jit-test/jit-test",
+ "options": [
+ "../../bin/js",
+ "--remote",
+ "-j",
+ "1",
+ "--localLib=../../bin",
+ "--no-slow",
+ "--no-progress",
+ "--format=automation",
+ "--jitflags=%(jittest_flags)s",
+ "--deviceSerial=%(device_serial)s",
+ ],
+ },
+ "jsreftest": {
+ "run_filename": "remotereftest.py",
+ "testsdir": "reftest",
+ "options": [
+ "--app=%(app)s",
+ "--ignore-window-size",
+ "--remote-webserver=%(remote_webserver)s",
+ "--xre-path=%(xre_path)s",
+ "--utility-path=%(utility_path)s",
+ "--http-port=%(http_port)s",
+ "--ssl-port=%(ssl_port)s",
+ "--httpd-path",
+ "%(modules_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--extra-profile-file=jsreftest/tests/js/src/tests/user.js",
+ "--suite=jstestbrowser",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--deviceSerial=%(device_serial)s",
+ "--topsrcdir=../jsreftest/tests",
+ ],
+ "tests": [
+ "../jsreftest/tests/js/src/tests/jstests.list",
+ ],
+ },
+ "xpcshell": {
+ "run_filename": "remotexpcshelltests.py",
+ "testsdir": "xpcshell",
+ "options": [
+ "--xre-path=%(xre_path)s",
+ "--testing-modules-dir=%(modules_dir)s",
+ "--apk=%(installer_path)s",
+ "--no-logfiles",
+ "--symbols-path=%(symbols_path)s",
+ "--manifest=tests/xpcshell.ini",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-tbpl-level=%(log_tbpl_level)s",
+ "--threads=4",
+ "--deviceSerial=%(device_serial)s",
+ "%(xpcshell_extra)s",
+ ],
+ },
+ "cppunittest": {
+ "run_filename": "remotecppunittests.py",
+ "testsdir": "cppunittest",
+ "install": False,
+ "options": [
+ "--symbols-path=%(symbols_path)s",
+ "--xre-path=%(xre_path)s",
+ "--localBinDir=../bin",
+ "--apk=%(installer_path)s",
+ ".",
+ "--deviceSerial=%(device_serial)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ ],
+ },
+ "geckoview-junit": {
+ "run_filename": "runjunit.py",
+ "testsdir": "mochitest",
+ "options": [
+ "--certificate-path=%(certs_path)s",
+ "--remote-webserver=%(remote_webserver)s",
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=%(utility_path)s",
+ "--deviceSerial=%(device_serial)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-raw-level=%(log_raw_level)s",
+ ],
+ },
+ "gtest": {
+ "run_filename": "remotegtests.py",
+ "testsdir": "gtest",
+ "install": True,
+ "options": [
+ "--symbols-path=%(symbols_path)s",
+ "--tests-path=%(gtest_dir)s",
+ "--libxul=%(gtest_dir)s/gtest_bin/gtest/libxul.so",
+ "--package=%(app)s",
+ "--deviceSerial=%(device_serial)s",
+ ],
+ },
+ }, # end suite_definitions
+}
diff --git a/testing/mozharness/configs/android/android_hw.py b/testing/mozharness/configs/android/android_hw.py
new file mode 100644
index 0000000000..f88568b43b
--- /dev/null
+++ b/testing/mozharness/configs/android/android_hw.py
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# mozharness configuration for Android hardware unit tests
+#
+# This configuration should be combined with suite definitions and other
+# mozharness configuration from android_common.py, or similar.
+
+config = {
+ "exes": {},
+ "env": {
+ "DISPLAY": ":0.0",
+ "PATH": "%(PATH)s",
+ },
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": "/builds/tooltool_cache",
+ # from android_common.py
+ "download_tooltool": True,
+ "xpcshell_extra": "--remoteTestRoot=/data/local/tmp/test_root",
+}
diff --git a/testing/mozharness/configs/android/android_pgo.py b/testing/mozharness/configs/android/android_pgo.py
new file mode 100644
index 0000000000..a118ea6740
--- /dev/null
+++ b/testing/mozharness/configs/android/android_pgo.py
@@ -0,0 +1,20 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Mozharness configuration for Android PGO.
+#
+# This configuration should be combined with platform-specific mozharness
+# configuration such as androidarm.py, or similar.
+
+config = {
+ "default_actions": [
+ "download",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ "output_directory": "/sdcard/pgo_profile",
+}
diff --git a/testing/mozharness/configs/android/wrench.py b/testing/mozharness/configs/android/wrench.py
new file mode 100644
index 0000000000..6df1190131
--- /dev/null
+++ b/testing/mozharness/configs/android/wrench.py
@@ -0,0 +1,41 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# mozharness configuration for Android x86 7.0 wrench tests
+#
+# This configuration should be combined with suite definitions and other
+# mozharness configuration from android_common.py, or similar.
+
+config = {
+ "emulator_avd_name": "mozemulator-x86_64",
+ "emulator_process_name": "qemu-system-x86_64",
+ "emulator_extra_args": [
+ "-gpu",
+ "on",
+ "-skip-adb-auth",
+ "-verbose",
+ "-show-kernel",
+ "-ranchu",
+ "-selinux",
+ "permissive",
+ "-memory",
+ "3072",
+ "-cores",
+ "4",
+ "-skin",
+ "800x1280",
+ "-no-snapstorage",
+ "-no-snapshot",
+ # Skips first-run dialogs
+ "-prop",
+ "ro.test_harness=true",
+ ],
+ "exes": {
+ "adb": "%(abs_sdk_dir)s/platform-tools/adb",
+ },
+ "env": {
+ "DISPLAY": ":0.0",
+ "PATH": "%(PATH)s:%(abs_sdk_dir)s/emulator:%(abs_sdk_dir)s/tools:%(abs_sdk_dir)s/platform-tools",
+ },
+}
diff --git a/testing/mozharness/configs/awsy/linux_config.py b/testing/mozharness/configs/awsy/linux_config.py
new file mode 100644
index 0000000000..0786dfd286
--- /dev/null
+++ b/testing/mozharness/configs/awsy/linux_config.py
@@ -0,0 +1,30 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+PYTHON = "/usr/bin/env python"
+VENV_PATH = "%s/build/venv" % os.getcwd()
+ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
+BINARY_PATH = os.path.join(ABS_WORK_DIR, "application", "firefox", "firefox-bin")
+INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.tar.bz2")
+
+config = {
+ "log_name": "awsy",
+ "binary_path": BINARY_PATH,
+ "installer_path": INSTALLER_PATH,
+ "virtualenv_path": VENV_PATH,
+ "cmd_timeout": 6500,
+ "exes": {},
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join(os.getcwd(), "tooltool_cache"),
+}
diff --git a/testing/mozharness/configs/awsy/macosx_config.py b/testing/mozharness/configs/awsy/macosx_config.py
new file mode 100644
index 0000000000..f765d0e3f9
--- /dev/null
+++ b/testing/mozharness/configs/awsy/macosx_config.py
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+PYTHON = "/usr/bin/env python"
+VENV_PATH = "%s/build/venv" % os.getcwd()
+ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
+INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.dmg")
+
+config = {
+ "log_name": "awsy",
+ "installer_path": INSTALLER_PATH,
+ "virtualenv_path": VENV_PATH,
+ "cmd_timeout": 6500,
+ "exes": {},
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join(os.getcwd(), "tooltool_cache"),
+}
diff --git a/testing/mozharness/configs/awsy/taskcluster_windows_config.py b/testing/mozharness/configs/awsy/taskcluster_windows_config.py
new file mode 100644
index 0000000000..7d69594f67
--- /dev/null
+++ b/testing/mozharness/configs/awsy/taskcluster_windows_config.py
@@ -0,0 +1,30 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+config = {
+ "virtualenv_path": "venv",
+ "exes": {
+ "python": sys.executable,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "download_symbols": "ondemand",
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+}
diff --git a/testing/mozharness/configs/balrog/production.py b/testing/mozharness/configs/balrog/production.py
new file mode 100644
index 0000000000..fbbdf53e0e
--- /dev/null
+++ b/testing/mozharness/configs/balrog/production.py
@@ -0,0 +1,35 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "balrog_servers": [
+ {
+ "balrog_api_root": "https://aus4-admin.mozilla.org/api",
+ "ignore_failures": False,
+ "url_replacements": [
+ (
+ "http://archive.mozilla.org/pub",
+ "http://download.cdn.mozilla.net/pub",
+ ),
+ ],
+ "balrog_usernames": {
+ "firefox": "balrog-ffxbld",
+ "thunderbird": "balrog-tbirdbld",
+ "mobile": "balrog-ffxbld",
+ "Fennec": "balrog-ffxbld",
+ },
+ },
+ # Bug 1261346 - temporarily disable staging balrog submissions
+ # {
+ # 'balrog_api_root': 'https://aus4-admin-dev.allizom.org/api',
+ # 'ignore_failures': True,
+ # 'balrog_usernames': {
+ # 'firefox': 'stage-ffxbld',
+ # 'thunderbird': 'stage-tbirdbld',
+ # 'mobile': 'stage-ffxbld',
+ # 'Fennec': 'stage-ffxbld',
+ # }
+ # }
+ ]
+}
diff --git a/testing/mozharness/configs/balrog/staging.py b/testing/mozharness/configs/balrog/staging.py
new file mode 100644
index 0000000000..7329a4d128
--- /dev/null
+++ b/testing/mozharness/configs/balrog/staging.py
@@ -0,0 +1,18 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "balrog_servers": [
+ {
+ "balrog_api_root": "https://balrog-admin.stage.mozaws.net/api",
+ "ignore_failures": False,
+ "balrog_usernames": {
+ "firefox": "balrog-stage-ffxbld",
+ "thunderbird": "balrog-stage-tbirdbld",
+ "mobile": "balrog-stage-ffxbld",
+ "Fennec": "balrog-stage-ffxbld",
+ },
+ }
+ ]
+}
diff --git a/testing/mozharness/configs/builds/build_pool_specifics.py b/testing/mozharness/configs/builds/build_pool_specifics.py
new file mode 100644
index 0000000000..837060b649
--- /dev/null
+++ b/testing/mozharness/configs/builds/build_pool_specifics.py
@@ -0,0 +1,14 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# this is a dict of pool specific keys/values. As this fills up and more
+# fx build factories are ported, we might deal with this differently
+
+config = {
+ "taskcluster": {
+ "upload_env": {
+ "UPLOAD_PATH": "/builds/worker/artifacts",
+ },
+ },
+}
diff --git a/testing/mozharness/configs/builds/releng_base_android_64_builds.py b/testing/mozharness/configs/builds/releng_base_android_64_builds.py
new file mode 100644
index 0000000000..33ae870d02
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_android_64_builds.py
@@ -0,0 +1,58 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ #########################################################################
+ ######## ANDROID GENERIC CONFIG KEYS/VAlUES
+ # note: overridden by MOZHARNESS_ACTIONS in TaskCluster tasks
+ "default_actions": [
+ "build",
+ "multi-l10n",
+ ],
+ "max_build_output_timeout": 0,
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-fennec-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-fennec-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ "multi_locale": True,
+ #########################################################################
+ #########################################################################
+ "platform": "android",
+ "stage_platform": "android",
+ "enable_max_vsize": False,
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "LC_ALL": "C",
+ "PATH": "/usr/local/bin:/bin:/usr/bin",
+ "SHIP_LICENSED_FONTS": "1",
+ },
+ "src_mozconfig": "mobile/android/config/mozconfigs/android/nightly",
+ # Bug 1583594: GeckoView doesn't (yet) produce have a package file
+ # from which to extract package metrics.
+ "disable_package_metrics": True,
+ #########################################################################
+}
diff --git a/testing/mozharness/configs/builds/releng_base_firefox.py b/testing/mozharness/configs/builds/releng_base_firefox.py
new file mode 100644
index 0000000000..b634aff677
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_firefox.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "app_name": "browser",
+}
diff --git a/testing/mozharness/configs/builds/releng_base_linux_32_builds.py b/testing/mozharness/configs/builds/releng_base_linux_32_builds.py
new file mode 100644
index 0000000000..03c333a5f9
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_linux_32_builds.py
@@ -0,0 +1,60 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ #########################################################################
+ ######## LINUX GENERIC CONFIG KEYS/VAlUES
+ # if you are updating this with custom 32 bit keys/values please add them
+ # below under the '32 bit specific' code block otherwise, update in this
+ # code block and also make sure this is synced with
+ # releng_base_linux_64_builds.py
+ # note: overridden by MOZHARNESS_ACTIONS in TaskCluster tasks
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 32 bit specific ######
+ "platform": "linux",
+ "stage_platform": "linux",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:\
+/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ "mozconfig_platform": "linux32",
+ "mozconfig_variant": "nightly",
+ #########################################################################
+}
diff --git a/testing/mozharness/configs/builds/releng_base_linux_64_builds.py b/testing/mozharness/configs/builds/releng_base_linux_64_builds.py
new file mode 100644
index 0000000000..8904c9ca9e
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_linux_64_builds.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ #########################################################################
+ ######## LINUX GENERIC CONFIG KEYS/VAlUES
+ # if you are updating this with custom 64 bit keys/values please add them
+ # below under the '64 bit specific' code block otherwise, update in this
+ # code block and also make sure this is synced with
+ # releng_base_linux_64_builds.py
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 64 bit specific ######
+ "platform": "linux64",
+ "stage_platform": "linux64",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:" "/usr/bin:/usr/local/sbin:/usr/sbin:" "/sbin"
+ ##
+ },
+ "mozconfig_platform": "linux64",
+ "mozconfig_variant": "nightly",
+ #########################################################################
+}
diff --git a/testing/mozharness/configs/builds/releng_base_mac_64_cross_builds.py b/testing/mozharness/configs/builds/releng_base_mac_64_cross_builds.py
new file mode 100644
index 0000000000..fbbddf1c3d
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_mac_64_cross_builds.py
@@ -0,0 +1,55 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ #########################################################################
+ ######## MACOSX CROSS GENERIC CONFIG KEYS/VAlUES
+ # note: overridden by MOZHARNESS_ACTIONS in TaskCluster tasks
+ "default_actions": [
+ "build",
+ ],
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+ "enable_check_test": False,
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 64 bit specific ######
+ "platform": "macosx64",
+ "stage_platform": "macosx64",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:" "/usr/bin:/usr/local/sbin:/usr/sbin:/sbin"
+ ##
+ },
+ "mozconfig_platform": "macosx64",
+ "mozconfig_variant": "nightly",
+ #########################################################################
+}
diff --git a/testing/mozharness/configs/builds/releng_base_windows_32_mingw_builds.py b/testing/mozharness/configs/builds/releng_base_windows_32_mingw_builds.py
new file mode 100644
index 0000000000..0c1ede6454
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_windows_32_mingw_builds.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ #########################################################################
+ ######## LINUX GENERIC CONFIG KEYS/VAlUES
+ # if you are updating this with custom 32 bit keys/values please add them
+ # below under the '32 bit specific' code block otherwise, update in this
+ # code block and also make sure this is synced with
+ # releng_base_linux_64_builds.py
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 32 bit specific ######
+ "platform": "win32-mingw32",
+ "stage_platform": "win32-mingw32",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:/bin:/usr/bin",
+ },
+ "mozconfig_platform": "win32",
+ #########################################################################
+}
diff --git a/testing/mozharness/configs/builds/releng_base_windows_64_mingw_builds.py b/testing/mozharness/configs/builds/releng_base_windows_64_mingw_builds.py
new file mode 100644
index 0000000000..3019dec3c9
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_base_windows_64_mingw_builds.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ #########################################################################
+ ######## LINUX GENERIC CONFIG KEYS/VAlUES
+ # if you are updating this with custom 32 bit keys/values please add them
+ # below under the '32 bit specific' code block otherwise, update in this
+ # code block and also make sure this is synced with
+ # releng_base_linux_64_builds.py
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 64 bit specific ######
+ "platform": "win64-mingw32",
+ "stage_platform": "win64-mingw32",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:/bin:/usr/bin",
+ },
+ "mozconfig_platform": "win64",
+ "mozconfig_variant": "mingw32",
+ #########################################################################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64.py
new file mode 100644
index 0000000000..77879cae1b
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-aarch64",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-aarch64/nightly",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta.py
new file mode 100644
index 0000000000..e340422568
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-aarch64",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-aarch64/beta",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta_debug.py
new file mode 100644
index 0000000000..42462096cc
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_beta_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-aarch64-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-aarch64/debug-beta",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug.py
new file mode 100644
index 0000000000..21ccc11cf8
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-aarch64-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-aarch64/debug",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug_lite.py
new file mode 100644
index 0000000000..6e3471c4f7
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_debug_lite.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-aarch64-lite-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-aarch64/debug-lite",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_lite.py
new file mode 100644
index 0000000000..5e10364f01
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_aarch64_lite.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-aarch64-lite",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-aarch64/nightly-lite",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm.py
new file mode 100644
index 0000000000..cbe75b6f0f
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/nightly",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta.py
new file mode 100644
index 0000000000..d611be21f1
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/beta",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta_debug.py
new file mode 100644
index 0000000000..7df4c699f1
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_beta_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/debug-beta",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug.py
new file mode 100644
index 0000000000..f0651edbf8
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/debug",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_ccov.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_ccov.py
new file mode 100644
index 0000000000..c41aeebaa4
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_ccov.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-debug-ccov",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/debug-ccov",
+ "debug_build": True,
+ "postflight_build_mach_commands": [
+ [
+ "android",
+ "archive-coverage-artifacts",
+ ],
+ ],
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_lite.py
new file mode 100644
index 0000000000..b6541a4f8e
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_lite.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-lite-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/debug-lite",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_searchfox.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_searchfox.py
new file mode 100644
index 0000000000..7549364fb9
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_debug_searchfox.py
@@ -0,0 +1,12 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-debug",
+ "env": {
+ "SCCACHE_DISABLE": "1",
+ },
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/debug-searchfox",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_gradle_dependencies.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_gradle_dependencies.py
new file mode 100644
index 0000000000..00983637a6
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_gradle_dependencies.py
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-gradle-dependencies",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm-gradle-dependencies/nightly",
+ # gradle-dependencies doesn't produce a package. So don't collect package metrics.
+ "postflight_build_mach_commands": [
+ [
+ "android",
+ "gradle-dependencies",
+ ],
+ ],
+ "max_build_output_timeout": 0,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_lite.py
new file mode 100644
index 0000000000..65d9c3ba4d
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_lite.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-lite",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/nightly-lite",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1.py
new file mode 100644
index 0000000000..6ddace34da
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1.py
@@ -0,0 +1,10 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-partner-sample1",
+ "src_mozconfig": None, # use manifest to determine mozconfig src
+ "src_mozconfig_manifest": "partner/mozconfigs/mozconfig1.json",
+ "tooltool_manifest_src": "mobile/android/config/tooltool-manifests/android/releng.manifest",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1_lite.py
new file mode 100644
index 0000000000..535ebca2da
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_partner_sample1_lite.py
@@ -0,0 +1,10 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-partner-sample1-lite",
+ "src_mozconfig": None, # use manifest to determine mozconfig src
+ "src_mozconfig_manifest": "partner/mozconfigs/mozconfig1.json",
+ "tooltool_manifest_src": "mobile/android/config/tooltool-manifests/android/releng.manifest",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_profile_generate_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_profile_generate_lite.py
new file mode 100644
index 0000000000..04a85025cd
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_arm_profile_generate_lite.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-arm-lite",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/profile-generate-lite",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_asan_tc.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_asan_tc.py
new file mode 100644
index 0000000000..1095c18c68
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_asan_tc.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-asan-fuzzing",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/nightly-fuzzing-asan",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_debug.py
new file mode 100644
index 0000000000..dc98ba0244
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_fuzzing_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-fuzzing-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/debug-fuzzing",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_geckoview_docs.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_geckoview_docs.py
new file mode 100644
index 0000000000..55022fa99b
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_geckoview_docs.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-geckoview-docs",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-arm/nightly-android-lints",
+ # geckoview-docs doesn't produce a package. So don't collect package metrics.
+ "disable_package_metrics": True,
+ "postflight_build_mach_commands": [
+ [
+ "android",
+ "geckoview-docs",
+ "--archive",
+ "--upload",
+ "mozilla/geckoview",
+ "--upload-branch",
+ "gh-pages",
+ "--javadoc-path",
+ "javadoc/{project}",
+ "--upload-message",
+ "Update {project} documentation to rev {revision}",
+ ],
+ ],
+ "max_build_output_timeout": 0,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86.py
new file mode 100644
index 0000000000..01fda10b32
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/nightly",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64.py
new file mode 100644
index 0000000000..839779f71f
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/nightly",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta.py
new file mode 100644
index 0000000000..838c76ec70
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/beta",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta_debug.py
new file mode 100644
index 0000000000..4a345506b5
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_beta_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/debug-beta",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug.py
new file mode 100644
index 0000000000..3cc64c7257
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/debug",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_isolated_process.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_isolated_process.py
new file mode 100644
index 0000000000..a026eb71c0
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_isolated_process.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/debug-isolated-process",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_lite.py
new file mode 100644
index 0000000000..e3dd2a72f7
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_debug_lite.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-lite-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/debug-lite",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_lite.py
new file mode 100644
index 0000000000..2764ed2a16
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_lite.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64-lite",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/nightly-lite",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_profile_generate.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_profile_generate.py
new file mode 100644
index 0000000000..9358021723
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_64_profile_generate.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86_64",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86_64/profile-generate",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta.py
new file mode 100644
index 0000000000..1bf5078885
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/beta",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta_debug.py
new file mode 100644
index 0000000000..50cb83c454
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_beta_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/debug-beta",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug.py
new file mode 100644
index 0000000000..eaa4c09ed0
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/debug",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug_lite.py
new file mode 100644
index 0000000000..ce49ad8e1d
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_debug_lite.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86-lite-debug",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/debug-lite",
+ "debug_build": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_lite.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_lite.py
new file mode 100644
index 0000000000..1dba75afe0
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_lite.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86-lite",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/nightly-lite",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_profile_generate.py b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_profile_generate.py
new file mode 100644
index 0000000000..08ce1529c3
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_android_configs/64_x86_profile_generate.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "android-x86",
+ "src_mozconfig": "mobile/android/config/mozconfigs/android-x86/profile-generate",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/32_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_debug.py
new file mode 100644
index 0000000000..ea0a6d17b0
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "debug_build": True,
+ "stage_platform": "linux-debug",
+ #### 32 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_asan_tc.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_asan_tc.py
new file mode 100644
index 0000000000..6b288ccc0a
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_asan_tc.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux32-fuzzing-asan",
+ #### 32 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_AUTOMATION": "1",
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "ASAN_OPTIONS": "detect_leaks=0",
+ ## 32 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_debug.py
new file mode 100644
index 0000000000..41fe4cddcf
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_fuzzing_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux-fuzzing-debug",
+ "debug_build": True,
+ #### 32 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests.py
new file mode 100644
index 0000000000..7aecddf896
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests.py
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux-rusttests",
+ #### 32 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ "app_name": "tools/rusttests",
+ "mozconfig_variant": "rusttests",
+ "disable_package_metrics": True,
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests_debug.py
new file mode 100644
index 0000000000..0f7537ee5d
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/32_rusttests_debug.py
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "debug_build": True,
+ "stage_platform": "linux-rusttests-debug",
+ #### 32 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # 32 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ "app_name": "tools/rusttests",
+ "disable_package_metrics": True,
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_add-on-devel.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_add-on-devel.py
new file mode 100644
index 0000000000..ede8363c6a
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_add-on-devel.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-add-on-devel",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/builds/worker/workspace/build/src/gcc/bin:/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan.py
new file mode 100644
index 0000000000..ac024b67bd
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan.py
@@ -0,0 +1,30 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "linux64-asan",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ "mozconfig_variant": "nightly-asan",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_and_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_and_debug.py
new file mode 100644
index 0000000000..44d08e02af
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_and_debug.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "linux64-asan-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ "mozconfig_variant": "debug-asan",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_reporter_tc.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_reporter_tc.py
new file mode 100644
index 0000000000..9bc1010da0
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_reporter_tc.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-asan-reporter",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_AUTOMATION": "1",
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "ASAN_OPTIONS": "detect_leaks=0",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc.py
new file mode 100644
index 0000000000..d6751ccf4c
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-asan",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc_and_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc_and_debug.py
new file mode 100644
index 0000000000..12ae638b3a
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_asan_tc_and_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-asan-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_debug.py
new file mode 100644
index 0000000000..f82294e646
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_debug.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-ccov",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_opt.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_opt.py
new file mode 100644
index 0000000000..f82294e646
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_code_coverage_opt.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-ccov",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_debug.py
new file mode 100644
index 0000000000..e8ad7d1cc1
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_asan_tc.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_asan_tc.py
new file mode 100644
index 0000000000..025e0843c2
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_asan_tc.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-fuzzing-asan",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_AUTOMATION": "1",
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "ASAN_OPTIONS": "detect_leaks=0",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_debug.py
new file mode 100644
index 0000000000..101b09d635
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_debug.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-fuzzing-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_AUTOMATION": "1",
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_tsan_tc.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_tsan_tc.py
new file mode 100644
index 0000000000..5e5b880740
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_fuzzing_tsan_tc.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-fuzzing-tsan",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_noopt_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_noopt_debug.py
new file mode 100644
index 0000000000..518ebed408
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_noopt_debug.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-noopt-debug",
+ "debug_build": True,
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "TINDERBOX_OUTPUT": "1",
+ },
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests.py
new file mode 100644
index 0000000000..3d9eef33ea
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-rusttests",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": ":/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ "app_name": "tools/rusttests",
+ "disable_package_metrics": True,
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests_debug.py
new file mode 100644
index 0000000000..b9b1f257d3
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_rusttests_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-rusttests-debug",
+ "debug_build": True,
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ "LD_LIBRARY_PATH": "%(abs_obj_dir)s/dist/bin",
+ "TINDERBOX_OUTPUT": "1",
+ },
+ "app_name": "tools/rusttests",
+ "disable_package_metrics": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_searchfox_and_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_searchfox_and_debug.py
new file mode 100644
index 0000000000..b962166517
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_searchfox_and_debug.py
@@ -0,0 +1,40 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ # note: overridden by MOZHARNESS_ACTIONS in TaskCluster tasks
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 64 bit specific ######
+ "platform": "linux64",
+ "stage_platform": "linux64-searchfox-opt",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ # Disable sccache because otherwise we won't index the files that
+ # sccache optimizes away compilation for
+ "SCCACHE_DISABLE": "1",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ ##
+ },
+ # This doesn't actually inherit from anything.
+ "mozconfig_platform": "linux64",
+ "mozconfig_variant": "debug-searchfox-clang",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_source.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_source.py
new file mode 100644
index 0000000000..d728d917a2
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_source.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "default_actions": ["package-source"],
+ "stage_platform": "source", # Not used, but required by the script
+ "env": {
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "TINDERBOX_OUTPUT": "1",
+ "LC_ALL": "C",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ },
+ "src_mozconfig": "browser/config/mozconfigs/linux64/source",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_stat_and_debug.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_stat_and_debug.py
new file mode 100644
index 0000000000..c9a43b1642
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_stat_and_debug.py
@@ -0,0 +1,37 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ # note: overridden by MOZHARNESS_ACTIONS in TaskCluster tasks
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "vcs_share_base": "/builds/hg-shared",
+ #########################################################################
+ #########################################################################
+ ###### 64 bit specific ######
+ "platform": "linux64",
+ "stage_platform": "linux64-st-an-opt",
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/worker/tooltool-cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ ##
+ },
+ # This doesn't actually inherit from anything.
+ "mozconfig_platform": "linux64",
+ "mozconfig_variant": "debug-static-analysis-clang",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_tsan_tc.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_tsan_tc.py
new file mode 100644
index 0000000000..6a749c8dd5
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_tsan_tc.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "linux64-tsan",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py
new file mode 100644
index 0000000000..60ad7a4ce6
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py
@@ -0,0 +1,30 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ "valgrind-test",
+ ],
+ "stage_platform": "linux64-valgrind",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+ "mozconfig_variant": "valgrind",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_add-on-devel.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_add-on-devel.py
new file mode 100644
index 0000000000..4255adb500
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_add-on-devel.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-add-on-devel",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_debug.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_debug.py
new file mode 100644
index 0000000000..a94cbb75f7
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-ccov-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_opt.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_opt.py
new file mode 100644
index 0000000000..6a5a1cb15a
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_code_coverage_opt.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-ccov-opt",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug.py
new file mode 100644
index 0000000000..82283ba2fb
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug_searchfox.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug_searchfox.py
new file mode 100644
index 0000000000..f73b59db53
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_debug_searchfox.py
@@ -0,0 +1,33 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "macosx64-searchfox-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # Disable sccache because otherwise we won't index the files that
+ # sccache optimizes away compilation for
+ "SCCACHE_DISABLE": "1",
+ # 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ },
+ "mozconfig_variant": "debug-searchfox",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_asan.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_asan.py
new file mode 100644
index 0000000000..8f665762f2
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_asan.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-fuzzing-asan",
+ "publish_nightly_en_US_routes": False,
+ "platform_supports_post_upload_to_latest": False,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "DISPLAY": ":2",
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "ASAN_OPTIONS": "detect_leaks=0",
+ ## 64 bit specific
+ "PATH": "/usr/local/bin:/bin:\
+/usr/bin:/usr/local/sbin:/usr/sbin:/sbin",
+ },
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_debug.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_debug.py
new file mode 100644
index 0000000000..f88bac7001
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_fuzzing_debug.py
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-fuzzing-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+ #######################
+ "artifact_flag_build_variant_in_try": None,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_noopt_debug.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_noopt_debug.py
new file mode 100644
index 0000000000..e3cd50f218
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_cross_noopt_debug.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "stage_platform": "macosx64-noopt-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_debug.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_debug.py
new file mode 100644
index 0000000000..12e7b84588
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_debug.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "macosx64-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ ## 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ ##
+ },
+ "mozconfig_variant": "debug",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/64_stat_and_debug.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_stat_and_debug.py
new file mode 100644
index 0000000000..72049b6ce1
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/64_stat_and_debug.py
@@ -0,0 +1,33 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "debug_build": True,
+ "stage_platform": "macosx64-st-an-debug",
+ "tooltool_manifest_src": "browser/config/tooltool-manifests/macosx64/\
+clang.manifest",
+ #### 64 bit build specific #####
+ "env": {
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "HG_SHARE_BASE_DIR": "/builds/hg-shared",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/builds",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "LC_ALL": "C",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # 64 bit specific
+ "PATH": "/tools/python/bin:/opt/local/bin:/usr/bin:"
+ "/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin",
+ },
+ "mozconfig_variant": "debug-static-analysis",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_mac_configs/rusttests.py b/testing/mozharness/configs/builds/releng_sub_mac_configs/rusttests.py
new file mode 100644
index 0000000000..b3cabc856c
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_mac_configs/rusttests.py
@@ -0,0 +1,12 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "default_actions": [
+ "build",
+ ],
+ "stage_platform": "macosx64-rusttests",
+ "app_name": "tools/rusttests",
+ "disable_package_metrics": True,
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/32_add-on-devel.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_add-on-devel.py
new file mode 100644
index 0000000000..c617eea6e8
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_add-on-devel.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "win32-add-on-devel",
+ #### 32 bit build specific #####
+ "env": {
+ "HG_SHARE_BASE_DIR": "C:/builds/hg-shared",
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "PATH": "C:/mozilla-build/python27;%s" % (os.environ.get("path")),
+ "TINDERBOX_OUTPUT": "1",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TOOLTOOL_CACHE": "c:/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/c/builds",
+ },
+ "mozconfig_variant": "add-on-devel",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/32_debug.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_debug.py
new file mode 100644
index 0000000000..49230527a5
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_debug.py
@@ -0,0 +1,28 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "win32-debug",
+ "debug_build": True,
+ #### 32 bit build specific #####
+ "env": {
+ "HG_SHARE_BASE_DIR": "C:/builds/hg-shared",
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "PATH": "C:/mozilla-build/python27;%s" % (os.environ.get("path")),
+ "TINDERBOX_OUTPUT": "1",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TOOLTOOL_CACHE": "c:/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/c/builds",
+ },
+ "mozconfig_variant": "debug",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/32_mingwclang.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_mingwclang.py
new file mode 100644
index 0000000000..d201b89eb1
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_mingwclang.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "platform": "win32-mingwclang",
+ "stage_platform": "win32-mingwclang",
+ "mozconfig_platform": "win32",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/32_stat_and_debug.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_stat_and_debug.py
new file mode 100644
index 0000000000..803f2f67ca
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/32_stat_and_debug.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "win32-st-an-debug",
+ "debug_build": True,
+ #### 32 bit build specific #####
+ "env": {
+ "HG_SHARE_BASE_DIR": "C:/builds/hg-shared",
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "PATH": "C:/mozilla-build/python27;%s" % (os.environ.get("path")),
+ "TINDERBOX_OUTPUT": "1",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TOOLTOOL_CACHE": "c:/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/c/builds",
+ },
+ "mozconfig_variant": "debug-static-analysis",
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/64_add-on-devel.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/64_add-on-devel.py
new file mode 100644
index 0000000000..71545b072c
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/64_add-on-devel.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "win64-add-on-devel",
+ #### 64 bit build specific #####
+ "env": {
+ "HG_SHARE_BASE_DIR": "C:/builds/hg-shared",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "PATH": "C:/mozilla-build/python27;%s" % (os.environ.get("path")),
+ "TINDERBOX_OUTPUT": "1",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TOOLTOOL_CACHE": "c:/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/c/builds",
+ },
+ "mozconfig_variant": "add-on-devel",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/64_debug.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/64_debug.py
new file mode 100644
index 0000000000..c281f2379d
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/64_debug.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "clobber",
+ "build",
+ ],
+ "stage_platform": "win64-debug",
+ "debug_build": True,
+ #### 64 bit build specific #####
+ "env": {
+ "HG_SHARE_BASE_DIR": "C:/builds/hg-shared",
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "PATH": "C:/mozilla-build/python27;%s" % (os.environ.get("path")),
+ "TINDERBOX_OUTPUT": "1",
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ "TOOLTOOL_CACHE": "c:/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/c/builds",
+ },
+ "mozconfig_variant": "debug",
+ #######################
+}
diff --git a/testing/mozharness/configs/builds/releng_sub_windows_configs/64_mingwclang.py b/testing/mozharness/configs/builds/releng_sub_windows_configs/64_mingwclang.py
new file mode 100644
index 0000000000..7f41cdb793
--- /dev/null
+++ b/testing/mozharness/configs/builds/releng_sub_windows_configs/64_mingwclang.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "platform": "win64-mingwclang",
+ "stage_platform": "win64-mingwclang",
+ "mozconfig_platform": "win64",
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_base_macosx.py b/testing/mozharness/configs/builds/taskcluster_base_macosx.py
new file mode 100644
index 0000000000..7831ad17f4
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_base_macosx.py
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "get-secrets",
+ "build",
+ ],
+ "vcs_share_base": os.path.join(os.getcwd(), "checkouts", "hg-shared"),
+ "max_build_output_timeout": 60 * 80,
+ "env": {
+ "HG_SHARE_BASE_DIR": os.path.join(os.getcwd(), "checkouts", "hg-shared"),
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "WORKSPACE": "%(base_work_dir)s",
+ "PATH": "/usr/local/bin:/bin:/sbin:/usr/bin:/usr/sbin",
+ },
+ "upload_env": {
+ "UPLOAD_PATH": os.path.join(os.getcwd(), "public", "build"),
+ },
+ "secret_files": [
+ {
+ "filename": "gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_base_win32.py b/testing/mozharness/configs/builds/taskcluster_base_win32.py
new file mode 100644
index 0000000000..c18d21c96c
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_base_win32.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "platform": "win32",
+ "mozconfig_platform": "win32",
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_base_win64.py b/testing/mozharness/configs/builds/taskcluster_base_win64.py
new file mode 100644
index 0000000000..013bc11b62
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_base_win64.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "platform": "win64",
+ "mozconfig_platform": "win64",
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_base_windows.py b/testing/mozharness/configs/builds/taskcluster_base_windows.py
new file mode 100644
index 0000000000..8e5a5bc650
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_base_windows.py
@@ -0,0 +1,46 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "default_actions": [
+ "get-secrets",
+ "build",
+ ],
+ "vcs_share_base": os.path.join("y:", os.sep, "hg-shared"),
+ "max_build_output_timeout": 60 * 80,
+ "env": {
+ "HG_SHARE_BASE_DIR": os.path.join("y:", os.sep, "hg-shared"),
+ "MOZBUILD_STATE_PATH": os.path.join(os.getcwd(), ".mozbuild"),
+ "MOZ_CRASHREPORTER_NO_REPORT": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "TINDERBOX_OUTPUT": "1",
+ "TOOLTOOL_CACHE": "c:/builds/tooltool_cache",
+ "TOOLTOOL_HOME": "/c/builds",
+ "MSYSTEM": "MINGW32",
+ "WORKSPACE": "%(base_work_dir)s",
+ },
+ "upload_env": {
+ "UPLOAD_PATH": os.path.join(os.getcwd(), "public", "build"),
+ },
+ "secret_files": [
+ {
+ "filename": "gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "mozilla-desktop-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-desktop-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win32/debug.py b/testing/mozharness/configs/builds/taskcluster_sub_win32/debug.py
new file mode 100644
index 0000000000..dc8977dfc0
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win32/debug.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win32-debug",
+ "debug_build": True,
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win32/noopt_debug.py b/testing/mozharness/configs/builds/taskcluster_sub_win32/noopt_debug.py
new file mode 100644
index 0000000000..6189060639
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win32/noopt_debug.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win32-noopt-debug",
+ "debug_build": True,
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/asan_debug.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/asan_debug.py
new file mode 100644
index 0000000000..da63c2724d
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/asan_debug.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win64-asan-debug",
+ "debug_build": True,
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/asan_reporter_opt.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/asan_reporter_opt.py
new file mode 100644
index 0000000000..af47501c73
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/asan_reporter_opt.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win64-asan-reporter",
+ "mozconfig_variant": "nightly-asan-reporter",
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/ccov_opt.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/ccov_opt.py
new file mode 100644
index 0000000000..c29a2d88bc
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/ccov_opt.py
@@ -0,0 +1,10 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win64-ccov",
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/debug.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/debug.py
new file mode 100644
index 0000000000..d40e77d7ae
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/debug.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win64-debug",
+ "debug_build": True,
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/noopt_debug.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/noopt_debug.py
new file mode 100644
index 0000000000..1dd204a3e4
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/noopt_debug.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win64-noopt-debug",
+ "debug_build": True,
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/plain_opt.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/plain_opt.py
new file mode 100644
index 0000000000..e567c8f123
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/plain_opt.py
@@ -0,0 +1,12 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "default_actions": [
+ "build",
+ ],
+ "disable_package_metrics": True,
+ "mozconfig_variant": "plain-opt",
+ "stage_platform": "win64",
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/rusttests_opt.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/rusttests_opt.py
new file mode 100644
index 0000000000..95ba433c29
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/rusttests_opt.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "default_actions": [
+ "build",
+ ],
+ "stage_platform": "win64-rusttests",
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ },
+ "app_name": "tools/rusttests",
+ "disable_package_metrics": True,
+}
diff --git a/testing/mozharness/configs/builds/taskcluster_sub_win64/searchfox_debug.py b/testing/mozharness/configs/builds/taskcluster_sub_win64/searchfox_debug.py
new file mode 100644
index 0000000000..afcd3a696f
--- /dev/null
+++ b/testing/mozharness/configs/builds/taskcluster_sub_win64/searchfox_debug.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "stage_platform": "win64-st-an-debug",
+ "debug_build": True,
+ "env": {
+ "XPCOM_DEBUG_BREAK": "stack-and-abort",
+ # Disable sccache because otherwise we won't index the files that
+ # sccache optimizes away compilation for
+ "SCCACHE_DISABLE": "1",
+ },
+ "mozconfig_variant": "debug-searchfox",
+}
diff --git a/testing/mozharness/configs/developer_config.py b/testing/mozharness/configs/developer_config.py
new file mode 100644
index 0000000000..563a0db9db
--- /dev/null
+++ b/testing/mozharness/configs/developer_config.py
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+This config file can be appended to any other mozharness job
+running under treeherder. The purpose of this config is to
+override values that are specific to Release Engineering machines
+that can reach specific hosts within their network.
+In other words, this config allows you to run any job
+outside of the Release Engineering network
+
+Using this config file should be accompanied with using
+--test-url and --installer-url where appropiate
+"""
+
+import os
+
+LOCAL_WORKDIR = os.path.expanduser("~/.mozilla/releng")
+
+config = {
+ # Developer mode values
+ "developer_mode": True,
+ "local_workdir": LOCAL_WORKDIR,
+ "replace_urls": [
+ ("http://pvtbuilds.pvt.build", "https://pvtbuilds"),
+ ],
+ # General local variable overwrite
+ "exes": {
+ "gittool.py": os.path.join(LOCAL_WORKDIR, "gittool.py"),
+ },
+ # Talos related
+ "python_webserver": True,
+ "virtualenv_path": "%s/build/venv" % os.getcwd(),
+ "preflight_run_cmd_suites": [],
+ "postflight_run_cmd_suites": [],
+ # Tooltool related
+ "tooltool_cache": os.path.join(LOCAL_WORKDIR, "builds/tooltool_cache"),
+ "tooltool_cache_path": os.path.join(LOCAL_WORKDIR, "builds/tooltool_cache"),
+ # VCS tools
+ "gittool.py": "http://hg.mozilla.org/build/puppet/raw-file/faaf5abd792e/modules/packages/files/gittool.py",
+ # Android related
+ "host_utils_url": "https://tooltool.mozilla-releng.net/sha512/372c89f9dccaf5ee3b9d35fd1cfeb089e1e5db3ff1c04e35aa3adc8800bc61a2ae10e321f37ae7bab20b56e60941f91bb003bcb22035902a73d70872e7bd3282",
+}
diff --git a/testing/mozharness/configs/firefox_ui_tests/qa_jenkins.py b/testing/mozharness/configs/firefox_ui_tests/qa_jenkins.py
new file mode 100644
index 0000000000..5e605bd0a0
--- /dev/null
+++ b/testing/mozharness/configs/firefox_ui_tests/qa_jenkins.py
@@ -0,0 +1,13 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Default configuration as used by Mozmill CI (Jenkins)
+
+
+config = {
+ # Tests run in mozmill-ci do not use RelEng infra
+ "developer_mode": True,
+ # mozcrash support
+ "download_symbols": "ondemand",
+}
diff --git a/testing/mozharness/configs/firefox_ui_tests/releng_release.py b/testing/mozharness/configs/firefox_ui_tests/releng_release.py
new file mode 100644
index 0000000000..ecd55de008
--- /dev/null
+++ b/testing/mozharness/configs/firefox_ui_tests/releng_release.py
@@ -0,0 +1,29 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Default configuration as used by Release Engineering for testing release/beta builds
+
+import os
+import sys
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+
+config = {
+ # General local variable overwrite
+ "exes": {
+ "gittool.py": [
+ # Bug 1227079 - Python executable eeded to get it executed on Windows
+ sys.executable,
+ os.path.join(external_tools_path, "gittool.py"),
+ ],
+ },
+ # mozcrash support
+ "download_symbols": "ondemand",
+}
diff --git a/testing/mozharness/configs/firefox_ui_tests/taskcluster.py b/testing/mozharness/configs/firefox_ui_tests/taskcluster.py
new file mode 100644
index 0000000000..72cd3d78dd
--- /dev/null
+++ b/testing/mozharness/configs/firefox_ui_tests/taskcluster.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Config file for firefox ui tests run via TaskCluster.
+
+
+config = {
+ "vcs_share_base": "/builds/hg-shared",
+ "tooltool_cache": "/builds/worker/tooltool-cache",
+}
diff --git a/testing/mozharness/configs/firefox_ui_tests/taskcluster_mac.py b/testing/mozharness/configs/firefox_ui_tests/taskcluster_mac.py
new file mode 100644
index 0000000000..163e133ed5
--- /dev/null
+++ b/testing/mozharness/configs/firefox_ui_tests/taskcluster_mac.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Configuration over-rides for taskcluter.py, for osx
+config = {
+ "tooltool_cache": "/builds/tooltool_cache",
+}
diff --git a/testing/mozharness/configs/firefox_ui_tests/taskcluster_windows.py b/testing/mozharness/configs/firefox_ui_tests/taskcluster_windows.py
new file mode 100644
index 0000000000..2cb56413c6
--- /dev/null
+++ b/testing/mozharness/configs/firefox_ui_tests/taskcluster_windows.py
@@ -0,0 +1,17 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Config file for firefox ui tests run via TaskCluster.
+
+import os
+import sys
+
+config = {
+ "virtualenv_path": "venv",
+ "exes": {
+ "python": sys.executable,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "download_symbols": "ondemand",
+}
diff --git a/testing/mozharness/configs/l10n_bumper/jamun.py b/testing/mozharness/configs/l10n_bumper/jamun.py
new file mode 100644
index 0000000000..355f82073a
--- /dev/null
+++ b/testing/mozharness/configs/l10n_bumper/jamun.py
@@ -0,0 +1,83 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+
+MULTI_REPO = "projects/jamun"
+EXES = {}
+if sys.platform.startswith("linux"):
+ EXES = {
+ # Get around the https warnings
+ "hg": [
+ "/usr/local/bin/hg",
+ "--config",
+ "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt",
+ ],
+ "hgtool.py": ["/usr/local/bin/hgtool.py"],
+ }
+
+
+config = {
+ "log_name": "l10n_bumper",
+ "log_type": "multi",
+ "exes": EXES,
+ "gecko_pull_url": "https://hg.mozilla.org/{}".format(MULTI_REPO),
+ "gecko_push_url": "ssh://hg.mozilla.org/{}".format(MULTI_REPO),
+ "hg_user": "L10n Bumper Bot <release+l10nbumper@mozilla.com>",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ssh_user": "ffxbld",
+ "vcs_share_base": "/builds/hg-shared",
+ "version_path": "browser/config/version.txt",
+ "status_path": ".l10n_bumper_status",
+ "bump_configs": [
+ {
+ "path": "mobile/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Fennec l10n changesets",
+ "revision_url": "https://l10n.mozilla.org/shipping/l10n-changesets?av=fennec%(MAJOR_VERSION)s",
+ "platform_configs": [
+ {
+ "platforms": ["android-arm", "android"],
+ "path": "mobile/android/locales/all-locales",
+ },
+ {
+ "platforms": ["android-multilocale"],
+ "path": "mobile/android/locales/maemo-locales",
+ },
+ ],
+ },
+ {
+ "path": "browser/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Firefox l10n changesets",
+ "revision_url": "https://l10n.mozilla.org/shipping/l10n-changesets?av=fx%(MAJOR_VERSION)s",
+ "ignore_config": {
+ "ja": ["macosx64"],
+ "ja-JP-mac": ["linux", "linux64", "win32", "win64"],
+ },
+ "platform_configs": [
+ {
+ "platforms": ["linux64", "linux", "macosx64", "win32", "win64"],
+ "path": "browser/locales/shipped-locales",
+ "format": "shipped-locales",
+ }
+ ],
+ },
+ {
+ "path": "browser/locales/central-changesets.json",
+ "format": "json",
+ "name": "Firefox l10n changesets",
+ "ignore_config": {
+ "ja": ["macosx64"],
+ "ja-JP-mac": ["linux", "linux64", "win32", "win64"],
+ },
+ "platform_configs": [
+ {
+ "platforms": ["linux64", "linux", "macosx64", "win32", "win64"],
+ "path": "browser/locales/all-locales",
+ }
+ ],
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/l10n_bumper/mozilla-beta.py b/testing/mozharness/configs/l10n_bumper/mozilla-beta.py
new file mode 100644
index 0000000000..4ba7add5de
--- /dev/null
+++ b/testing/mozharness/configs/l10n_bumper/mozilla-beta.py
@@ -0,0 +1,87 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+
+MULTI_REPO = "releases/mozilla-beta"
+EXES = {}
+if sys.platform.startswith("linux"):
+ EXES = {
+ # Get around the https warnings
+ "hg": [
+ "/usr/local/bin/hg",
+ "--config",
+ "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt",
+ ],
+ "hgtool.py": ["/usr/local/bin/hgtool.py"],
+ }
+
+config = {
+ "log_name": "l10n_bumper",
+ "log_type": "multi",
+ "exes": EXES,
+ "gecko_pull_url": "https://hg.mozilla.org/{}".format(MULTI_REPO),
+ "gecko_push_url": "ssh://hg.mozilla.org/{}".format(MULTI_REPO),
+ "hg_user": "L10n Bumper Bot <release+l10nbumper@mozilla.com>",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ssh_user": "ffxbld",
+ "vcs_share_base": "/builds/hg-shared",
+ "version_path": "browser/config/version.txt",
+ "status_path": ".l10n_bumper_status",
+ "bump_configs": [
+ {
+ "path": "mobile/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Fennec l10n changesets",
+ "revision_url": "https://l10n.mozilla.org/shipping/l10n-changesets?av=fennec%(MAJOR_VERSION)s",
+ "platform_configs": [
+ {
+ "platforms": ["android-multilocale"],
+ "path": "mobile/android/locales/maemo-locales",
+ }
+ ],
+ },
+ {
+ "path": "browser/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Firefox l10n changesets",
+ "revision_url": "https://l10n.mozilla.org/shipping/l10n-changesets?av=fx%(MAJOR_VERSION)s",
+ "ignore_config": {
+ "ja": ["macosx64", "macosx64-devedition"],
+ "ja-JP-mac": [
+ "linux",
+ "linux-devedition",
+ "linux64",
+ "linux64-devedition",
+ "win32",
+ "win32-devedition",
+ "win64",
+ "win64-devedition",
+ "win64-aarch64",
+ "win64-aarch64-devedition",
+ ],
+ },
+ "platform_configs": [
+ {
+ "platforms": [
+ "linux",
+ "linux-devedition",
+ "linux64",
+ "linux64-devedition",
+ "macosx64",
+ "macosx64-devedition",
+ "win32",
+ "win32-devedition",
+ "win64",
+ "win64-devedition",
+ "win64-aarch64",
+ "win64-aarch64-devedition",
+ ],
+ "path": "browser/locales/shipped-locales",
+ "format": "shipped-locales",
+ }
+ ],
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/l10n_bumper/mozilla-central.py b/testing/mozharness/configs/l10n_bumper/mozilla-central.py
new file mode 100644
index 0000000000..b2bcc1b98f
--- /dev/null
+++ b/testing/mozharness/configs/l10n_bumper/mozilla-central.py
@@ -0,0 +1,88 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+
+MULTI_REPO = "mozilla-central"
+EXES = {}
+if sys.platform.startswith("linux"):
+ EXES = {
+ # Get around the https warnings
+ "hg": [
+ "/usr/local/bin/hg",
+ "--config",
+ "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt",
+ ],
+ "hgtool.py": ["/usr/local/bin/hgtool.py"],
+ }
+
+config = {
+ "log_name": "l10n_bumper",
+ "log_type": "multi",
+ "exes": EXES,
+ "gecko_pull_url": "https://hg.mozilla.org/{}".format(MULTI_REPO),
+ "gecko_push_url": "ssh://hg.mozilla.org/{}".format(MULTI_REPO),
+ "hg_user": "L10n Bumper Bot <release+l10nbumper@mozilla.com>",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ssh_user": "ffxbld",
+ "vcs_share_base": "/builds/hg-shared",
+ "version_path": "browser/config/version.txt",
+ "status_path": ".l10n_bumper_status",
+ "bump_configs": [
+ {
+ "path": "mobile/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Fennec l10n changesets",
+ "platform_configs": [
+ {
+ "platforms": ["android-arm", "android"],
+ "path": "mobile/android/locales/all-locales",
+ },
+ {
+ "platforms": ["android-multilocale"],
+ "path": "mobile/android/locales/maemo-locales",
+ },
+ ],
+ },
+ {
+ "path": "browser/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Firefox l10n changesets",
+ "ignore_config": {
+ "ja": ["macosx64", "macosx64-devedition"],
+ "ja-JP-mac": [
+ "linux",
+ "linux-devedition",
+ "linux64",
+ "linux64-devedition",
+ "win32",
+ "win32-devedition",
+ "win64",
+ "win64-devedition",
+ "win64-aarch64",
+ "win64-aarch64-devedition",
+ ],
+ },
+ "platform_configs": [
+ {
+ "platforms": [
+ "linux",
+ "linux-devedition",
+ "linux64",
+ "linux64-devedition",
+ "macosx64",
+ "macosx64-devedition",
+ "win32",
+ "win32-devedition",
+ "win64",
+ "win64-devedition",
+ "win64-aarch64",
+ "win64-aarch64-devedition",
+ ],
+ "path": "browser/locales/all-locales",
+ }
+ ],
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/l10n_bumper/mozilla-esr68.py b/testing/mozharness/configs/l10n_bumper/mozilla-esr68.py
new file mode 100644
index 0000000000..01a7c85e2d
--- /dev/null
+++ b/testing/mozharness/configs/l10n_bumper/mozilla-esr68.py
@@ -0,0 +1,46 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+
+MULTI_REPO = "releases/mozilla-esr68"
+EXES = {}
+if sys.platform.startswith("linux"):
+ EXES = {
+ # Get around the https warnings
+ "hg": [
+ "/usr/local/bin/hg",
+ "--config",
+ "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt",
+ ],
+ "hgtool.py": ["/usr/local/bin/hgtool.py"],
+ }
+
+config = {
+ "log_name": "l10n_bumper",
+ "log_type": "multi",
+ "exes": EXES,
+ "gecko_pull_url": "https://hg.mozilla.org/{}".format(MULTI_REPO),
+ "gecko_push_url": "ssh://hg.mozilla.org/{}".format(MULTI_REPO),
+ "hg_user": "L10n Bumper Bot <release+l10nbumper@mozilla.com>",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ssh_user": "ffxbld",
+ "vcs_share_base": "/builds/hg-shared",
+ "version_path": "mobile/android/config/version-files/release/version.txt",
+ "status_path": ".l10n_bumper_status",
+ "bump_configs": [
+ {
+ "path": "mobile/locales/l10n-changesets.json",
+ "format": "json",
+ "name": "Fennec l10n changesets",
+ "revision_url": "https://l10n.mozilla.org/shipping/l10n-changesets?av=fennec%(COMBINED_MAJOR_VERSION)s",
+ "platform_configs": [
+ {
+ "platforms": ["android-multilocale"],
+ "path": "mobile/android/locales/maemo-locales",
+ }
+ ],
+ }
+ ],
+}
diff --git a/testing/mozharness/configs/marionette/mac_taskcluster_config.py b/testing/mozharness/configs/marionette/mac_taskcluster_config.py
new file mode 100644
index 0000000000..2ae326512e
--- /dev/null
+++ b/testing/mozharness/configs/marionette/mac_taskcluster_config.py
@@ -0,0 +1,39 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Configuration over-rides for prod_config.py, for osx
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = False
+
+#####
+config = {
+ "tooltool_cache": "/builds/tooltool_cache",
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ "python",
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/marionette/prod_config.py b/testing/mozharness/configs/marionette/prod_config.py
new file mode 100644
index 0000000000..200106061b
--- /dev/null
+++ b/testing/mozharness/configs/marionette/prod_config.py
@@ -0,0 +1,68 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This is a template config file for marionette production.
+HG_SHARE_BASE_DIR = "/builds/hg-shared"
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = True
+ADJUST_MOUSE_AND_SCREEN = False
+
+#####
+config = {
+ # marionette options
+ "marionette_address": "localhost:2828",
+ "test_manifest": "unit-tests.ini",
+ "vcs_share_base": HG_SHARE_BASE_DIR,
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "download_symbols": "ondemand",
+ "tooltool_cache": "/builds/worker/tooltool-cache",
+ "suite_definitions": {
+ "marionette_desktop": {
+ "options": [
+ "-vv",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-html=%(html_report_file)s",
+ "--binary=%(binary)s",
+ "--address=%(address)s",
+ "--symbols-path=%(symbols_path)s",
+ ],
+ "run_filename": "",
+ "testsdir": "marionette",
+ }
+ },
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "halt_on_failure": False,
+ "architectures": ["32bit", "64bit"],
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ "python",
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+ "structured_output": True,
+}
diff --git a/testing/mozharness/configs/marionette/test_config.py b/testing/mozharness/configs/marionette/test_config.py
new file mode 100644
index 0000000000..a41fd5a68f
--- /dev/null
+++ b/testing/mozharness/configs/marionette/test_config.py
@@ -0,0 +1,29 @@
+# This is a template config file for marionette test.
+
+config = {
+ # marionette options
+ "marionette_address": "localhost:2828",
+ "test_manifest": "unit-tests.ini",
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "suite_definitions": {
+ "marionette_desktop": {
+ "options": [
+ "-vv",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-html=%(html_report_file)s",
+ "--binary=%(binary)s",
+ "--address=%(address)s",
+ "--symbols-path=%(symbols_path)s",
+ ],
+ "run_filename": "",
+ "testsdir": "marionette",
+ },
+ },
+}
diff --git a/testing/mozharness/configs/marionette/windows_config.py b/testing/mozharness/configs/marionette/windows_config.py
new file mode 100644
index 0000000000..9101d67a5b
--- /dev/null
+++ b/testing/mozharness/configs/marionette/windows_config.py
@@ -0,0 +1,38 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This is a template config file for marionette production on Windows.
+config = {
+ # marionette options
+ "marionette_address": "localhost:2828",
+ "test_manifest": "unit-tests.ini",
+ "virtualenv_path": "venv",
+ "exes": {
+ "python": "c:/mozilla-build/python27/python",
+ "hg": "c:/mozilla-build/hg/hg",
+ },
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "download_symbols": "ondemand",
+ "suite_definitions": {
+ "marionette_desktop": {
+ "options": [
+ "-vv",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-html=%(html_report_file)s",
+ "--binary=%(binary)s",
+ "--address=%(address)s",
+ "--symbols-path=%(symbols_path)s",
+ ],
+ "run_filename": "",
+ "testsdir": "marionette",
+ },
+ },
+}
diff --git a/testing/mozharness/configs/marionette/windows_taskcluster_config.py b/testing/mozharness/configs/marionette/windows_taskcluster_config.py
new file mode 100644
index 0000000000..582371e130
--- /dev/null
+++ b/testing/mozharness/configs/marionette/windows_taskcluster_config.py
@@ -0,0 +1,148 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This is a template config file for marionette production on Windows.
+import os
+import platform
+import sys
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = True
+DESKTOP_VISUALFX_THEME = {
+ "Let Windows choose": 0,
+ "Best appearance": 1,
+ "Best performance": 2,
+ "Custom": 3,
+}.get("Best appearance")
+TASKBAR_AUTOHIDE_REG_PATH = {
+ "Windows 7": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2",
+ "Windows 10": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3",
+}.get("{} {}".format(platform.system(), platform.release()))
+
+#####
+config = {
+ # marionette options
+ "marionette_address": "localhost:2828",
+ "test_manifest": "unit-tests.ini",
+ "virtualenv_path": "venv",
+ "exes": {
+ "python": sys.executable,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "download_symbols": "ondemand",
+ "suite_definitions": {
+ "marionette_desktop": {
+ "options": [
+ "-vv",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--log-html=%(html_report_file)s",
+ "--binary=%(binary)s",
+ "--address=%(address)s",
+ "--symbols-path=%(symbols_path)s",
+ ],
+ "run_filename": "",
+ "testsdir": "marionette",
+ },
+ },
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ sys.executable,
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "mouse_and_screen_resolution.py",
+ ),
+ "--configuration-file",
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "machine-configuration.json",
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ {
+ "name": "disable windows security and maintenance notifications",
+ "cmd": [
+ "powershell",
+ "-command",
+ r"\"&{$p='HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Notifications\Settings\Windows.SystemToast.SecurityAndMaintenance';if(!(Test-Path -Path $p)){&New-Item -Path $p -Force}&Set-ItemProperty -Path $p -Name Enabled -Value 0}\"", # noqa
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": (platform.release() == 10),
+ },
+ {
+ "name": "set windows VisualFX",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{{&Set-ItemProperty -Path 'HKCU:Software\Microsoft\Windows\CurrentVersion\Explorer\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format(
+ DESKTOP_VISUALFX_THEME
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "create scrollbars always show key",
+ "cmd": [
+ "powershell",
+ "-command",
+ "New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": True,
+ },
+ {
+ "name": "hide windows taskbar",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{{$p='{}';$v=(Get-ItemProperty -Path $p).Settings;$v[8]=3;&Set-ItemProperty -Path $p -Name Settings -Value $v}}\"".format(
+ TASKBAR_AUTOHIDE_REG_PATH
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "restart windows explorer",
+ "cmd": [
+ "powershell",
+ "-command",
+ '"&{&Stop-Process -ProcessName explorer}"',
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/multi_locale/android-mozharness-build.json b/testing/mozharness/configs/multi_locale/android-mozharness-build.json
new file mode 100644
index 0000000000..5fc7fa3794
--- /dev/null
+++ b/testing/mozharness/configs/multi_locale/android-mozharness-build.json
@@ -0,0 +1,11 @@
+{
+ "log_name": "multilocale",
+ "locales_dir": "mobile/android/locales",
+ "ignore_locales": ["en-US", "multi"],
+ "vcs_share_base": "/builds/hg-shared",
+ "hg_l10n_base": "https://hg.mozilla.org/l10n-central",
+ "hg_l10n_tag": "default",
+ "work_dir": "build",
+ "locales_file": "mobile/locales/l10n-changesets.json",
+ "locales_platform": "android-multilocale"
+}
diff --git a/testing/mozharness/configs/openh264/linux32.py b/testing/mozharness/configs/openh264/linux32.py
new file mode 100644
index 0000000000..1955f1fa8c
--- /dev/null
+++ b/testing/mozharness/configs/openh264/linux32.py
@@ -0,0 +1,41 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+config = {
+ "exes": {
+ "gittool.py": [os.path.join(external_tools_path, "gittool.py")],
+ "python3": "python3",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms".format(os.environ["MOZ_FETCHES_DIR"]),
+ "arch": "x86",
+ "operating_system": "linux",
+ "partial_env": {
+ "CXXFLAGS": (
+ "--sysroot {MOZ_FETCHES_DIR}/sysroot-i686-linux-gnu".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "LDFLAGS": (
+ "--sysroot {MOZ_FETCHES_DIR}/sysroot-i686-linux-gnu".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "PATH": (
+ "{MOZ_FETCHES_DIR}/clang/bin:"
+ "{MOZ_FETCHES_DIR}/binutils/bin:"
+ "{MOZ_FETCHES_DIR}/nasm:%(PATH)s".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ },
+}
diff --git a/testing/mozharness/configs/openh264/linux64.py b/testing/mozharness/configs/openh264/linux64.py
new file mode 100644
index 0000000000..bc5aecf015
--- /dev/null
+++ b/testing/mozharness/configs/openh264/linux64.py
@@ -0,0 +1,41 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+config = {
+ "exes": {
+ "gittool.py": [os.path.join(external_tools_path, "gittool.py")],
+ "python3": "python3",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms".format(os.environ["MOZ_FETCHES_DIR"]),
+ "arch": "x64",
+ "operating_system": "linux",
+ "partial_env": {
+ "CXXFLAGS": (
+ "--sysroot {MOZ_FETCHES_DIR}/sysroot-x86_64-linux-gnu".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "LDFLAGS": (
+ "--sysroot {MOZ_FETCHES_DIR}/sysroot-x86_64-linux-gnu".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "PATH": (
+ "{MOZ_FETCHES_DIR}/clang/bin:"
+ "{MOZ_FETCHES_DIR}/binutils/bin:"
+ "{MOZ_FETCHES_DIR}/nasm:%(PATH)s".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ },
+}
diff --git a/testing/mozharness/configs/openh264/macosx64-aarch64.py b/testing/mozharness/configs/openh264/macosx64-aarch64.py
new file mode 100644
index 0000000000..c6262d360b
--- /dev/null
+++ b/testing/mozharness/configs/openh264/macosx64-aarch64.py
@@ -0,0 +1,41 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+config = {
+ "exes": {
+ "gittool.py": [os.path.join(external_tools_path, "gittool.py")],
+ "python3": "python3",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms".format(os.environ["MOZ_FETCHES_DIR"]),
+ "arch": "aarch64",
+ "operating_system": "darwin",
+ "partial_env": {
+ "CFLAGS": (
+ "-target aarch64-apple-darwin -mcpu=apple-a12 "
+ "-isysroot {MOZ_FETCHES_DIR}/MacOSX11.0.sdk".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "LDFLAGS": (
+ "-target aarch64-apple-darwin -mcpu=apple-a12 "
+ "-isysroot {MOZ_FETCHES_DIR}/MacOSX11.0.sdk".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "PATH": (
+ "{MOZ_FETCHES_DIR}/clang/bin/:{MOZ_FETCHES_DIR}/cctools/bin/:%(PATH)s".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ },
+}
diff --git a/testing/mozharness/configs/openh264/macosx64.py b/testing/mozharness/configs/openh264/macosx64.py
new file mode 100644
index 0000000000..3416173a86
--- /dev/null
+++ b/testing/mozharness/configs/openh264/macosx64.py
@@ -0,0 +1,45 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+config = {
+ "exes": {
+ "gittool.py": [os.path.join(external_tools_path, "gittool.py")],
+ "python3": "python3",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms".format(os.environ["MOZ_FETCHES_DIR"]),
+ "arch": "x64",
+ "operating_system": "darwin",
+ "partial_env": {
+ "CXXFLAGS": (
+ "-target x86_64-apple-darwin "
+ "-isysroot {MOZ_FETCHES_DIR}/MacOSX10.11.sdk "
+ "-mmacosx-version-min=10.11".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "LDFLAGS": (
+ "-target x86_64-apple-darwin "
+ "-isysroot {MOZ_FETCHES_DIR}/MacOSX10.11.sdk "
+ "-mmacosx-version-min=10.11".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ "PATH": (
+ "{MOZ_FETCHES_DIR}/clang/bin:"
+ "{MOZ_FETCHES_DIR}/cctools/bin:"
+ "{MOZ_FETCHES_DIR}/nasm:%(PATH)s".format(
+ MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]
+ )
+ ),
+ },
+}
diff --git a/testing/mozharness/configs/openh264/tooltool-manifests/win.manifest b/testing/mozharness/configs/openh264/tooltool-manifests/win.manifest
new file mode 100644
index 0000000000..5ec9a8377a
--- /dev/null
+++ b/testing/mozharness/configs/openh264/tooltool-manifests/win.manifest
@@ -0,0 +1,10 @@
+[
+ {
+ "version": "Visual Studio 2017 15.8.4 / SDK 10.0.17134.0",
+ "digest": "ecf1e03f6f98f86775059a43f9e7dc7e326f6643d7c08962d9f614e4f5a65b1ca63fa1cfeb0f1a3c2474bf0d4318dda960b378beb2a44ecf8a91111207f4ece5",
+ "size": 349626009,
+ "algorithm": "sha512",
+ "filename": "vs2017_15.8.4.zip",
+ "unpack": true
+ }
+]
diff --git a/testing/mozharness/configs/openh264/tooltool-manifests/win64-aarch64.manifest b/testing/mozharness/configs/openh264/tooltool-manifests/win64-aarch64.manifest
new file mode 100644
index 0000000000..4989d3a4ee
--- /dev/null
+++ b/testing/mozharness/configs/openh264/tooltool-manifests/win64-aarch64.manifest
@@ -0,0 +1,11 @@
+[
+ {
+ "version": "Visual Studio 2017 15.9.6 / SDK 10.0.17134.0",
+ "size": 490015895,
+ "visibility": "internal",
+ "digest": "91d08703a8ce39f6f53ccecc7c7b6f57e1b571ddb5d1eb4dd9260e52580566c35a4bed39ad366fd60ca60ebf5c06f0f00561bba5cd631826511f2872a3d2dcd5",
+ "algorithm": "sha512",
+ "filename": "vs2017_15.9.6.zip",
+ "unpack": true
+ }
+]
diff --git a/testing/mozharness/configs/openh264/win32.py b/testing/mozharness/configs/openh264/win32.py
new file mode 100644
index 0000000000..6b07072200
--- /dev/null
+++ b/testing/mozharness/configs/openh264/win32.py
@@ -0,0 +1,52 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+VSPATH = "%(abs_work_dir)s\\vs2017_15.8.4"
+config = {
+ "tooltool_manifest_file": "win.manifest",
+ "exes": {
+ "gittool.py": [sys.executable, os.path.join(external_tools_path, "gittool.py")],
+ "python3": "c:\\mozilla-build\\python\\python3.exe",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms.exe".format(
+ os.environ["MOZ_FETCHES_DIR"]
+ ),
+ "arch": "x86",
+ "partial_env": {
+ "PATH": (
+ "{MOZ_FETCHES_DIR}\\clang\\bin\\;"
+ "{MOZ_FETCHES_DIR}\\nasm;"
+ "{_VSPATH}\\VC\\bin\\Hostx64\\x64;%(PATH)s"
+ # 32-bit redist here for our dump_syms.exe
+ "{_VSPATH}/VC/redist/x86/Microsoft.VC141.CRT;"
+ "{_VSPATH}/SDK/Redist/ucrt/DLLs/x86;"
+ "{_VSPATH}/DIA SDK/bin"
+ ).format(_VSPATH=VSPATH, MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]),
+ "INCLUDES": (
+ "-I{_VSPATH}\\VC\\include "
+ "-I{_VSPATH}\\VC\\atlmfc\\include "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\ucrt "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\shared "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\um "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\winrt "
+ ).format(_VSPATH=VSPATH),
+ "LIB": (
+ "{_VSPATH}/VC/lib/x86;"
+ "{_VSPATH}/VC/atlmfc/lib/x86;"
+ "{_VSPATH}/SDK/lib/10.0.17134.0/ucrt/x86;"
+ "{_VSPATH}/SDK/lib/10.0.17134.0/um/x86;"
+ ).format(_VSPATH=VSPATH),
+ "CFLAGS": ("-m32"),
+ },
+}
diff --git a/testing/mozharness/configs/openh264/win64-aarch64.py b/testing/mozharness/configs/openh264/win64-aarch64.py
new file mode 100644
index 0000000000..5546c4fabc
--- /dev/null
+++ b/testing/mozharness/configs/openh264/win64-aarch64.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+VSPATH = "%(abs_work_dir)s\\vs2017_15.9.6"
+config = {
+ "tooltool_manifest_file": "win64-aarch64.manifest",
+ "exes": {
+ "gittool.py": [sys.executable, os.path.join(external_tools_path, "gittool.py")],
+ "python3": "c:\\mozilla-build\\python\\python3.exe",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms.exe".format(
+ os.environ["MOZ_FETCHES_DIR"]
+ ),
+ "arch": "aarch64",
+ "partial_env": {
+ "PATH": (
+ "%(abs_work_dir)s\\openh264;"
+ "{MOZ_FETCHES_DIR}\\clang\\bin\\;"
+ "{_VSPATH}\\VC\\bin\\Hostx64\\arm64;"
+ "{_VSPATH}\\VC\\bin\\Hostx64\\x64;"
+ # 32-bit redist here for our dump_syms.exe
+ "{_VSPATH}/VC/redist/x86/Microsoft.VC141.CRT;"
+ "{_VSPATH}/SDK/Redist/ucrt/DLLs/x86;"
+ "{_VSPATH}/DIA SDK/bin;%(PATH)s;"
+ ).format(_VSPATH=VSPATH, MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]),
+ "INCLUDES": (
+ "-I{_VSPATH}\\VC\\include "
+ "-I{_VSPATH}\\VC\\atlmfc\\include "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\ucrt "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\shared "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\um "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\winrt "
+ ).format(_VSPATH=VSPATH),
+ "LIB": (
+ "{_VSPATH}/VC/lib/arm64;"
+ "{_VSPATH}/VC/atlmfc/lib/arm64;"
+ "{_VSPATH}/SDK/lib/10.0.17134.0/ucrt/arm64;"
+ "{_VSPATH}/SDK/lib/10.0.17134.0/um/arm64;"
+ ).format(_VSPATH=VSPATH),
+ "CFLAGS": ("--target=aarch64-windows-msvc"),
+ },
+}
diff --git a/testing/mozharness/configs/openh264/win64.py b/testing/mozharness/configs/openh264/win64.py
new file mode 100644
index 0000000000..c561dbc67a
--- /dev/null
+++ b/testing/mozharness/configs/openh264/win64.py
@@ -0,0 +1,51 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+import mozharness
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+VSPATH = "%(abs_work_dir)s\\vs2017_15.8.4"
+config = {
+ "tooltool_manifest_file": "win.manifest",
+ "exes": {
+ "gittool.py": [sys.executable, os.path.join(external_tools_path, "gittool.py")],
+ "python3": "c:\\mozilla-build\\python\\python3.exe",
+ },
+ "dump_syms_binary": "{}/dump_syms/dump_syms.exe".format(
+ os.environ["MOZ_FETCHES_DIR"]
+ ),
+ "arch": "x64",
+ "partial_env": {
+ "PATH": (
+ "{MOZ_FETCHES_DIR}\\clang\\bin\\;"
+ "{MOZ_FETCHES_DIR}\\nasm;"
+ "{_VSPATH}\\VC\\bin\\Hostx64\\x64;%(PATH)s;"
+ # 32-bit redist here for our dump_syms.exe
+ "{_VSPATH}/VC/redist/x86/Microsoft.VC141.CRT;"
+ "{_VSPATH}/SDK/Redist/ucrt/DLLs/x86;"
+ "{_VSPATH}/DIA SDK/bin"
+ ).format(_VSPATH=VSPATH, MOZ_FETCHES_DIR=os.environ["MOZ_FETCHES_DIR"]),
+ "INCLUDES": (
+ "-I{_VSPATH}\\VC\\include "
+ "-I{_VSPATH}\\VC\\atlmfc\\include "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\ucrt "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\shared "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\um "
+ "-I{_VSPATH}\\SDK\\Include\\10.0.17134.0\\winrt "
+ ).format(_VSPATH=VSPATH),
+ "LIB": (
+ "{_VSPATH}/VC/lib/x64;"
+ "{_VSPATH}/VC/atlmfc/lib/x64;"
+ "{_VSPATH}/SDK/lib/10.0.17134.0/ucrt/x64;"
+ "{_VSPATH}/SDK/lib/10.0.17134.0/um/x64;"
+ ).format(_VSPATH=VSPATH),
+ },
+}
diff --git a/testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop.py b/testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop.py
new file mode 100644
index 0000000000..3391dd396c
--- /dev/null
+++ b/testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop.py
@@ -0,0 +1,19 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "appName": "Firefox",
+ "log_name": "partner_repack",
+ "repack_manifests_url": "git@github.com:mozilla-partners/repack-manifests.git",
+ "repo_file": "https://raw.githubusercontent.com/mozilla/git-repo/master/repo",
+ "secret_files": [
+ {
+ "filename": "/builds/partner-github-ssh",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/partner-github-ssh",
+ "min_scm_level": 1,
+ "mode": 0o600,
+ },
+ ],
+ "ssh_key": "/builds/partner-github-ssh",
+}
diff --git a/testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop_EME-free.py b/testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop_EME-free.py
new file mode 100644
index 0000000000..4d2d2df418
--- /dev/null
+++ b/testing/mozharness/configs/partner_repacks/release_mozilla-release_desktop_EME-free.py
@@ -0,0 +1,19 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "appName": "Firefox",
+ "log_name": "partner_repack",
+ "repack_manifests_url": "git@github.com:mozilla-partners/mozilla-EME-free-manifest.git",
+ "repo_file": "https://raw.githubusercontent.com/mozilla/git-repo/master/repo",
+ "secret_files": [
+ {
+ "filename": "/builds/partner-github-ssh",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/partner-github-ssh",
+ "min_scm_level": 1,
+ "mode": 0o600,
+ },
+ ],
+ "ssh_key": "/builds/partner-github-ssh",
+}
diff --git a/testing/mozharness/configs/partner_repacks/staging_release_mozilla-release_desktop.py b/testing/mozharness/configs/partner_repacks/staging_release_mozilla-release_desktop.py
new file mode 100644
index 0000000000..3391dd396c
--- /dev/null
+++ b/testing/mozharness/configs/partner_repacks/staging_release_mozilla-release_desktop.py
@@ -0,0 +1,19 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "appName": "Firefox",
+ "log_name": "partner_repack",
+ "repack_manifests_url": "git@github.com:mozilla-partners/repack-manifests.git",
+ "repo_file": "https://raw.githubusercontent.com/mozilla/git-repo/master/repo",
+ "secret_files": [
+ {
+ "filename": "/builds/partner-github-ssh",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/partner-github-ssh",
+ "min_scm_level": 1,
+ "mode": 0o600,
+ },
+ ],
+ "ssh_key": "/builds/partner-github-ssh",
+}
diff --git a/testing/mozharness/configs/raptor/android_hw_config.py b/testing/mozharness/configs/raptor/android_hw_config.py
new file mode 100644
index 0000000000..81118b497b
--- /dev/null
+++ b/testing/mozharness/configs/raptor/android_hw_config.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "log_name": "raptor",
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chrome-android",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": "/builds/tooltool_cache",
+ "download_tooltool": True,
+ "hostutils_manifest_path": "testing/config/tooltool-manifests/linux64/hostutils.manifest",
+}
+
+# raptor will pick these up in mitmproxy.py, doesn't use the mozharness config
+os.environ["TOOLTOOLCACHE"] = config["tooltool_cache"]
+os.environ["HOSTUTILS_MANIFEST_PATH"] = config["hostutils_manifest_path"]
diff --git a/testing/mozharness/configs/raptor/linux64_config_taskcluster.py b/testing/mozharness/configs/raptor/linux64_config_taskcluster.py
new file mode 100644
index 0000000000..e3b2870c92
--- /dev/null
+++ b/testing/mozharness/configs/raptor/linux64_config_taskcluster.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+PYTHON = sys.executable
+VENV_PATH = "%s/build/venv" % os.getcwd()
+
+exes = {
+ "python": PYTHON,
+}
+ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
+INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.tar.bz2")
+
+config = {
+ "log_name": "raptor",
+ "installer_path": INSTALLER_PATH,
+ "virtualenv_path": VENV_PATH,
+ "exes": exes,
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": "/builds/worker/tooltool-cache",
+}
diff --git a/testing/mozharness/configs/raptor/linux_config.py b/testing/mozharness/configs/raptor/linux_config.py
new file mode 100644
index 0000000000..7fffe20bb6
--- /dev/null
+++ b/testing/mozharness/configs/raptor/linux_config.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+VENV_PATH = "%s/build/venv" % os.getcwd()
+
+config = {
+ "log_name": "raptor",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": "/builds/tooltool_cache",
+}
diff --git a/testing/mozharness/configs/raptor/mac_config.py b/testing/mozharness/configs/raptor/mac_config.py
new file mode 100644
index 0000000000..b3d8a0177f
--- /dev/null
+++ b/testing/mozharness/configs/raptor/mac_config.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+VENV_PATH = "%s/build/venv" % os.getcwd()
+
+config = {
+ "log_name": "raptor",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [],
+ "postflight_run_cmd_suites": [],
+ "tooltool_cache": "/builds/tooltool_cache",
+}
diff --git a/testing/mozharness/configs/raptor/windows_config.py b/testing/mozharness/configs/raptor/windows_config.py
new file mode 100644
index 0000000000..86d8e2269a
--- /dev/null
+++ b/testing/mozharness/configs/raptor/windows_config.py
@@ -0,0 +1,84 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import socket
+import sys
+
+PYTHON = sys.executable
+PYTHON_DLL = "c:/mozilla-build/python27/python27.dll"
+VENV_PATH = os.path.join(os.getcwd(), "build/venv")
+
+PYWIN32 = "pypiwin32==219"
+if sys.version_info > (3, 0):
+ PYWIN32 = "pywin32==300"
+
+config = {
+ "log_name": "raptor",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "virtualenv_modules": [PYWIN32, "raptor", "mozinstall"],
+ "exes": {
+ "python": PYTHON,
+ "easy_install": [
+ "%s/scripts/python" % VENV_PATH,
+ "%s/scripts/easy_install-2.7-script.py" % VENV_PATH,
+ ],
+ "mozinstall": [
+ "%s/scripts/python" % VENV_PATH,
+ "%s/scripts/mozinstall-script.py" % VENV_PATH,
+ ],
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ "tooltool.py": [
+ PYTHON,
+ os.path.join(os.environ["MOZILLABUILD"], "tooltool.py"),
+ ],
+ },
+ "title": socket.gethostname().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join("c:\\", "build", "tooltool_cache"),
+ "python3_manifest": {
+ "win32": "python3.manifest",
+ "win64": "python3_x64.manifest",
+ },
+ "env": {
+ # python3 requires C runtime, found in firefox installation; see bug 1361732
+ "PATH": "%(PATH)s;c:\\slave\\test\\build\\application\\firefox;"
+ },
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ sys.executable,
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "mouse_and_screen_resolution.py",
+ ),
+ "--configuration-file",
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "machine-configuration.json",
+ ),
+ "--platform",
+ "win10-hw",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ }
+ ],
+}
diff --git a/testing/mozharness/configs/raptor/windows_vm_config.py b/testing/mozharness/configs/raptor/windows_vm_config.py
new file mode 100644
index 0000000000..6654da4388
--- /dev/null
+++ b/testing/mozharness/configs/raptor/windows_vm_config.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import socket
+import sys
+
+PYTHON = sys.executable
+PYTHON_DLL = "c:/mozilla-build/python27/python27.dll"
+VENV_PATH = os.path.join(os.getcwd(), "build/venv")
+
+PYWIN32 = "pypiwin32==219"
+if sys.version_info > (3, 0):
+ PYWIN32 = "pywin32==300"
+
+config = {
+ "log_name": "raptor",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "virtualenv_modules": [PYWIN32, "raptor", "mozinstall"],
+ "exes": {
+ "python": PYTHON,
+ "easy_install": [
+ "%s/scripts/python" % VENV_PATH,
+ "%s/scripts/easy_install-2.7-script.py" % VENV_PATH,
+ ],
+ "mozinstall": [
+ "%s/scripts/python" % VENV_PATH,
+ "%s/scripts/mozinstall-script.py" % VENV_PATH,
+ ],
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "title": socket.gethostname().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join("c:\\", "build", "tooltool_cache"),
+ "python3_manifest": {
+ "win32": "python3.manifest",
+ "win64": "python3_x64.manifest",
+ },
+ "env": {
+ # python3 requires C runtime, found in firefox installation; see bug 1361732
+ "PATH": "%(PATH)s;c:\\slave\\test\\build\\application\\firefox;"
+ },
+}
diff --git a/testing/mozharness/configs/releases/bouncer_firefox_beta.py b/testing/mozharness/configs/releases/bouncer_firefox_beta.py
new file mode 100644
index 0000000000..fa3ece116c
--- /dev/null
+++ b/testing/mozharness/configs/releases/bouncer_firefox_beta.py
@@ -0,0 +1,152 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# lint_ignore=E501
+config = {
+ "products": {
+ # for installers, stubs, msi (ie not updates) ...
+ # products containing "latest" are for www.mozilla.org via cron-bouncer-check
+ # products using versions are for release automation via release-bouncer-check-firefox
+ "installer": {
+ "product-name": "Firefox-%(version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest": {
+ "product-name": "Firefox-beta-latest",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-ssl": {
+ "product-name": "Firefox-%(version)s-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest-ssl": {
+ "product-name": "Firefox-beta-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "msi": {
+ "product-name": "Firefox-%(version)s-msi-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msi-latest": {
+ "product-name": "Firefox-beta-msi-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix": {
+ "product-name": "Firefox-%(version)s-msix-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix-latest": {
+ "product-name": "Firefox-beta-msix-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "pkg": {
+ "product-name": "Firefox-%(version)s-pkg-SSL",
+ "platforms": ["osx"],
+ },
+ "pkg-latest": {
+ "product-name": "Firefox-beta-pkg-latest-SSL",
+ "platforms": ["osx"],
+ },
+ "stub-installer": {
+ "product-name": "Firefox-%(version)s-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "stub-installer-latest": {
+ "product-name": "Firefox-beta-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "langpack": {
+ "product-name": "Firefox-%(version)s-langpack-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ ],
+ },
+ "langpack-latest": {
+ "product-name": "Firefox-beta-langpack-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ ],
+ },
+ "complete-mar": {
+ "product-name": "Firefox-%(version)s-Complete",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+ "partials": {
+ "releases-dir": {
+ "product-name": "Firefox-%(version)s-Partial-%(prev_version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+}
diff --git a/testing/mozharness/configs/releases/bouncer_firefox_devedition.py b/testing/mozharness/configs/releases/bouncer_firefox_devedition.py
new file mode 100644
index 0000000000..3b736dc74e
--- /dev/null
+++ b/testing/mozharness/configs/releases/bouncer_firefox_devedition.py
@@ -0,0 +1,124 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# lint_ignore=E501
+config = {
+ "products": {
+ # for installers, stubs, msi (ie not updates) ...
+ # products containing "latest" are for www.mozilla.org via cron-bouncer-check
+ # products using versions are for release automation via release-bouncer-check-firefox
+ "installer": {
+ "product-name": "Devedition-%(version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest": {
+ "product-name": "Firefox-devedition-latest",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-ssl": {
+ "product-name": "Devedition-%(version)s-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest-ssl": {
+ "product-name": "Firefox-devedition-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "msi": {
+ "product-name": "Devedition-%(version)s-msi-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msi-latest": {
+ "product-name": "Firefox-devedition-msi-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix": {
+ "product-name": "Devedition-%(version)s-msix-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix-latest": {
+ "product-name": "Firefox-devedition-msix-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "stub-installer": {
+ "product-name": "Devedition-%(version)s-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "stub-installer-latest": {
+ "product-name": "Firefox-devedition-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "complete-mar": {
+ "product-name": "Devedition-%(version)s-Complete",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+ "partials": {
+ "releases-dir": {
+ "product-name": "Devedition-%(version)s-Partial-%(prev_version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+}
diff --git a/testing/mozharness/configs/releases/bouncer_firefox_esr.py b/testing/mozharness/configs/releases/bouncer_firefox_esr.py
new file mode 100644
index 0000000000..353fe35285
--- /dev/null
+++ b/testing/mozharness/configs/releases/bouncer_firefox_esr.py
@@ -0,0 +1,156 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# lint_ignore=E501
+config = {
+ "products": {
+ # for installers, stubs, msi (ie not updates) ...
+ # products containing "latest" are for www.mozilla.org via cron-bouncer-check
+ # products using versions are for release automation via release-bouncer-check-firefox
+ "installer": {
+ "product-name": "Firefox-%(version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest": {
+ "product-name": "Firefox-esr-latest",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-next-latest": {
+ "product-name": "Firefox-esr-next-latest",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-ssl": {
+ "product-name": "Firefox-%(version)s-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest-ssl": {
+ "product-name": "Firefox-esr-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-next-latest-ssl": {
+ "product-name": "Firefox-esr-next-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "msi": {
+ "product-name": "Firefox-%(version)s-msi-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msi-latest": {
+ "product-name": "Firefox-esr-msi-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msi-next-latest": {
+ "product-name": "Firefox-esr-next-msi-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix": {
+ "product-name": "Firefox-%(version)s-msix-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix-latest": {
+ "product-name": "Firefox-esr-msix-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix-next-latest": {
+ "product-name": "Firefox-esr-next-msix-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "pkg": {
+ "product-name": "Firefox-%(version)s-pkg-SSL",
+ "platforms": ["osx"],
+ },
+ "pkg-latest": {
+ "product-name": "Firefox-esr-pkg-latest-SSL",
+ "platforms": ["osx"],
+ },
+ "pkg-next-latest": {
+ "product-name": "Firefox-esr-next-pkg-latest-SSL",
+ "platforms": ["osx"],
+ },
+ "complete-mar": {
+ "product-name": "Firefox-%(version)s-Complete",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+ "partials": {
+ "releases-dir": {
+ "product-name": "Firefox-%(version)s-Partial-%(prev_version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+}
diff --git a/testing/mozharness/configs/releases/bouncer_firefox_nightly.py b/testing/mozharness/configs/releases/bouncer_firefox_nightly.py
new file mode 100644
index 0000000000..09d52fe4a3
--- /dev/null
+++ b/testing/mozharness/configs/releases/bouncer_firefox_nightly.py
@@ -0,0 +1,80 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# lint_ignore=E501
+config = {
+ "products": {
+ "installer-latest": {
+ "product-name": "Firefox-nightly-latest",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest-ssl": {
+ "product-name": "Firefox-nightly-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest-l10n-ssl": {
+ "product-name": "Firefox-nightly-latest-l10n-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "msi-latest": {
+ "product-name": "Firefox-nightly-msi-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msi-latest-l10n": {
+ "product-name": "Firefox-nightly-msi-latest-l10n-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "stub-installer": {
+ "product-name": "Firefox-nightly-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "stub-installer-l10n": {
+ "product-name": "Firefox-nightly-stub-l10n",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "pkg-latest": {
+ "product-name": "Firefox-nightly-pkg-latest-ssl",
+ "platforms": ["osx"],
+ },
+ "pkg-latest-l10n": {
+ "product-name": "Firefox-nightly-pkg-latest-l10n-ssl",
+ "platforms": ["osx"],
+ },
+ },
+}
diff --git a/testing/mozharness/configs/releases/bouncer_firefox_release.py b/testing/mozharness/configs/releases/bouncer_firefox_release.py
new file mode 100644
index 0000000000..0259e4ad57
--- /dev/null
+++ b/testing/mozharness/configs/releases/bouncer_firefox_release.py
@@ -0,0 +1,152 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# lint_ignore=E501
+config = {
+ "products": {
+ # for installers, stubs, msi (ie not updates) ...
+ # products containing "latest" are for www.mozilla.org via cron-bouncer-check
+ # products using versions are for release automation via release-bouncer-check-firefox
+ "installer": {
+ "product-name": "Firefox-%(version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest": {
+ "product-name": "Firefox-latest",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-ssl": {
+ "product-name": "Firefox-%(version)s-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "installer-latest-ssl": {
+ "product-name": "Firefox-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "msi": {
+ "product-name": "Firefox-%(version)s-msi-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msi-latest": {
+ "product-name": "Firefox-msi-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix": {
+ "product-name": "Firefox-%(version)s-msix-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "msix-latest": {
+ "product-name": "Firefox-msix-latest-SSL",
+ "platforms": [
+ "win",
+ "win64",
+ ],
+ },
+ "langpack": {
+ "product-name": "Firefox-%(version)s-langpack-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ ],
+ },
+ "langpack-latest": {
+ "product-name": "Firefox-langpack-latest-SSL",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ ],
+ },
+ "stub-installer": {
+ "product-name": "Firefox-%(version)s-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "stub-installer-latest": {
+ "product-name": "Firefox-stub",
+ "platforms": [
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ "pkg": {
+ "product-name": "Firefox-%(version)s-pkg-SSL",
+ "platforms": ["osx"],
+ },
+ "pkg-latest": {
+ "product-name": "Firefox-pkg-latest-SSL",
+ "platforms": ["osx"],
+ },
+ "complete-mar": {
+ "product-name": "Firefox-%(version)s-Complete",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+ "partials": {
+ "releases-dir": {
+ "product-name": "Firefox-%(version)s-Partial-%(prev_version)s",
+ "platforms": [
+ "linux",
+ "linux64",
+ "osx",
+ "win",
+ "win64",
+ "win64-aarch64",
+ ],
+ },
+ },
+}
diff --git a/testing/mozharness/configs/releases/dev_postrelease_fennec_beta.py b/testing/mozharness/configs/releases/dev_postrelease_fennec_beta.py
new file mode 100644
index 0000000000..1b0038db75
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_postrelease_fennec_beta.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ # maple is used for staging mozilla-beta
+ "log_name": "bump_maple",
+ "version_files": [{"file": "browser/config/version_display.txt"}],
+ "repo": {
+ # maple is used for staging mozilla-beta
+ "repo": "https://hg.mozilla.org/projects/maple",
+ "branch": "default",
+ "dest": "maple",
+ "vcs": "hg",
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ },
+ # maple is used for staging mozilla-beta
+ "push_dest": "ssh://hg.mozilla.org/projects/maple",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ship_it_root": "https://ship-it-dev.allizom.org",
+ "ship_it_username": "ship_it-stage-ffxbld",
+}
diff --git a/testing/mozharness/configs/releases/dev_postrelease_fennec_release.py b/testing/mozharness/configs/releases/dev_postrelease_fennec_release.py
new file mode 100644
index 0000000000..5205ad41cc
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_postrelease_fennec_release.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "bump_release_dev",
+ "version_files": [
+ {"file": "browser/config/version.txt"},
+ {"file": "browser/config/version_display.txt"},
+ {"file": "config/milestone.txt"},
+ ],
+ "repo": {
+ # jamun is used for staging mozilla-release
+ "repo": "https://hg.mozilla.org/projects/jamun",
+ "branch": "default",
+ "dest": "jamun",
+ "vcs": "hg",
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ },
+ "push_dest": "ssh://hg.mozilla.org/projects/jamun",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ship_it_root": "https://ship-it-dev.allizom.org",
+ "ship_it_username": "ship_it-stage-ffxbld",
+}
diff --git a/testing/mozharness/configs/releases/dev_postrelease_firefox_beta.py b/testing/mozharness/configs/releases/dev_postrelease_firefox_beta.py
new file mode 100644
index 0000000000..ce7ff54bd5
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_postrelease_firefox_beta.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ # date is used for staging mozilla-beta
+ "log_name": "bump_date",
+ "version_files": [{"file": "browser/config/version_display.txt"}],
+ "repo": {
+ # maple is used for staging mozilla-beta
+ "repo": "https://hg.mozilla.org/projects/jamun",
+ "branch": "default",
+ "dest": "jamun",
+ "vcs": "hg",
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ },
+ # date is used for staging mozilla-beta
+ "push_dest": "ssh://hg.mozilla.org/projects/jamun",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ship_it_root": "https://ship-it-dev.allizom.org",
+ "ship_it_username": "ship_it-stage-ffxbld",
+}
diff --git a/testing/mozharness/configs/releases/dev_postrelease_firefox_release.py b/testing/mozharness/configs/releases/dev_postrelease_firefox_release.py
new file mode 100644
index 0000000000..5205ad41cc
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_postrelease_firefox_release.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "bump_release_dev",
+ "version_files": [
+ {"file": "browser/config/version.txt"},
+ {"file": "browser/config/version_display.txt"},
+ {"file": "config/milestone.txt"},
+ ],
+ "repo": {
+ # jamun is used for staging mozilla-release
+ "repo": "https://hg.mozilla.org/projects/jamun",
+ "branch": "default",
+ "dest": "jamun",
+ "vcs": "hg",
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ },
+ "push_dest": "ssh://hg.mozilla.org/projects/jamun",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ship_it_root": "https://ship-it-dev.allizom.org",
+ "ship_it_username": "ship_it-stage-ffxbld",
+}
diff --git a/testing/mozharness/configs/releases/dev_postrelease_firefox_release_birch.py b/testing/mozharness/configs/releases/dev_postrelease_firefox_release_birch.py
new file mode 100644
index 0000000000..47593625f2
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_postrelease_firefox_release_birch.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "bump_release_dev",
+ "version_files": [
+ {"file": "browser/config/version.txt"},
+ {"file": "browser/config/version_display.txt"},
+ {"file": "config/milestone.txt"},
+ ],
+ "repo": {
+ # jamun is used for staging mozilla-release
+ "repo": "https://hg.mozilla.org/projects/birch",
+ "branch": "default",
+ "dest": "birch",
+ "vcs": "hg",
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ },
+ "push_dest": "ssh://hg.mozilla.org/projects/birch",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "ship_it_root": "https://ship-it-dev.allizom.org",
+ "ship_it_username": "ship_it-stage-ffxbld",
+}
diff --git a/testing/mozharness/configs/releases/dev_updates_firefox_beta.py b/testing/mozharness/configs/releases/dev_updates_firefox_beta.py
new file mode 100644
index 0000000000..9c60d1e9ce
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_updates_firefox_beta.py
@@ -0,0 +1,42 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "bump_beta_dev",
+ # TODO: use real repo
+ "repo": {
+ "repo": "https://hg.mozilla.org/users/stage-ffxbld/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ # TODO: use real repo
+ "push_dest": "ssh://hg.mozilla.org/users/stage-ffxbld/tools",
+ # jamun repo used for staging beta
+ "shipped-locales-url": "https://hg.mozilla.org/projects/jamun/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "ftp.stage.mozaws.net",
+ "archive_prefix": "https://ftp.stage.mozaws.net/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "http://54.90.211.22:9090",
+ "balrog_username": "balrog-stage-ffxbld",
+ "update_channels": {
+ "beta": {
+ "version_regex": r"^(\d+\.\d+(b\d+)?)$",
+ "requires_mirrors": True,
+ # TODO - when we use a real repo, rename this file # s/MozJamun/Mozbeta/
+ "patcher_config": "mozJamun-branch-patcher2.cfg",
+ "update_verify_channel": "beta-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["beta", "beta-localtest", "beta-cdntest"],
+ "rules_to_update": ["firefox-beta-cdntest", "firefox-beta-localtest"],
+ "publish_rules": [32],
+ }
+ },
+ "balrog_use_dummy_suffix": False,
+}
diff --git a/testing/mozharness/configs/releases/dev_updates_firefox_devedition.py b/testing/mozharness/configs/releases/dev_updates_firefox_devedition.py
new file mode 100644
index 0000000000..2b2406be39
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_updates_firefox_devedition.py
@@ -0,0 +1,45 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "updates_devedition",
+ # TODO: use real repo
+ "repo": {
+ "repo": "https://hg.mozilla.org/users/asasaki_mozilla.com/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ # TODO: use real repo
+ "push_dest": "ssh://hg.mozilla.org/users/asasaki_mozilla.com/tools",
+ # maple repo used for staging beta
+ "shipped-locales-url": "https://hg.mozilla.org/projects/maple/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "ftp.stage.mozaws.net",
+ "archive_prefix": "https://ftp.stage.mozaws.net/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "https://stage.balrog.nonprod.cloudops.mozgcp.net/",
+ "balrog_username": "balrog-stage-ffxbld",
+ "update_channels": {
+ "aurora": {
+ "version_regex": r"^(\d+\.\d+(b\d+)?)$",
+ "requires_mirrors": True,
+ # TODO - when we use a real repo, rename this file # s/MozJamun/Mozbeta/
+ "patcher_config": "mozDevedition-branch-patcher2.cfg",
+ "patcher_config_product_override": "firefox",
+ "update_verify_channel": "aurora-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["aurora", "aurora-localtest", "aurora-cdntest"],
+ "rules_to_update": ["devedition-cdntest", "devedition-localtest"],
+ "publish_rules": [10],
+ }
+ },
+ "balrog_use_dummy_suffix": False,
+ "stage_product": "devedition",
+ "bouncer_product": "devedition",
+}
diff --git a/testing/mozharness/configs/releases/dev_updates_firefox_release.py b/testing/mozharness/configs/releases/dev_updates_firefox_release.py
new file mode 100644
index 0000000000..1413ae1031
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_updates_firefox_release.py
@@ -0,0 +1,55 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "updates_release_dev",
+ # TODO: use real repo
+ "repo": {
+ "repo": "https://hg.mozilla.org/users/stage-ffxbld/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ # TODO: use real repo
+ "push_dest": "ssh://hg.mozilla.org/users/stage-ffxbld/tools",
+ # jamun repo used for staging release
+ "shipped-locales-url": "https://hg.mozilla.org/projects/jamun/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "ftp.stage.mozaws.net",
+ "archive_prefix": "https://ftp.stage.mozaws.net/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "http://54.90.211.22:9090",
+ "balrog_username": "balrog-stage-ffxbld",
+ "update_channels": {
+ "beta": {
+ "version_regex": r"^(\d+\.\d+(b\d+)?)$",
+ "requires_mirrors": False,
+ "patcher_config": "mozDate-branch-patcher2.cfg",
+ "update_verify_channel": "beta-localtest",
+ "mar_channel_ids": [
+ "firefox-mozilla-beta",
+ "firefox-mozilla-release",
+ ],
+ "channel_names": ["beta", "beta-localtest", "beta-cdntest"],
+ "rules_to_update": ["firefox-beta-cdntest", "firefox-beta-localtest"],
+ "publish_rules": [32],
+ "schedule_asap": True,
+ },
+ "release": {
+ "version_regex": r"^\d+\.\d+(\.\d+)?$",
+ "requires_mirrors": True,
+ "patcher_config": "mozJamun-branch-patcher2.cfg",
+ "update_verify_channel": "release-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["release", "release-localtest", "release-cdntest"],
+ "rules_to_update": ["firefox-release-cdntest", "firefox-release-localtest"],
+ "publish_rules": [145],
+ },
+ },
+ "balrog_use_dummy_suffix": False,
+}
diff --git a/testing/mozharness/configs/releases/dev_updates_firefox_release_birch.py b/testing/mozharness/configs/releases/dev_updates_firefox_release_birch.py
new file mode 100644
index 0000000000..8ea3ebd76a
--- /dev/null
+++ b/testing/mozharness/configs/releases/dev_updates_firefox_release_birch.py
@@ -0,0 +1,55 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "updates_release_dev",
+ # TODO: use real repo
+ "repo": {
+ "repo": "https://hg.mozilla.org/users/bhearsum_mozilla.com/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ # TODO: use real repo
+ "push_dest": "ssh://hg.mozilla.org/users/bhearsum_mozilla.com/tools",
+ # birch repo used for staging release
+ "shipped-locales-url": "https://hg.mozilla.org/projects/birch/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "ftp.stage.mozaws.net",
+ "archive_prefix": "https://ftp.stage.mozaws.net/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "https://stage.balrog.nonprod.cloudops.mozgcp.net/",
+ "balrog_username": "balrog-stage-ffxbld",
+ "update_channels": {
+ "beta": {
+ "version_regex": r"^(\d+\.\d+(b\d+)?)$",
+ "requires_mirrors": False,
+ "patcher_config": "mozBeta-branch-patcher2.cfg",
+ "update_verify_channel": "beta-localtest",
+ "mar_channel_ids": [
+ "firefox-mozilla-beta",
+ "firefox-mozilla-release",
+ ],
+ "channel_names": ["beta", "beta-localtest", "beta-cdntest"],
+ "rules_to_update": ["firefox-beta-cdntest", "firefox-beta-localtest"],
+ "publish_rules": [32],
+ "schedule_asap": True,
+ },
+ "release": {
+ "version_regex": r"^\d+\.\d+(\.\d+)?$",
+ "requires_mirrors": True,
+ "patcher_config": "mozRelease-branch-patcher2.cfg",
+ "update_verify_channel": "release-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["release", "release-localtest", "release-cdntest"],
+ "rules_to_update": ["firefox-release-cdntest", "firefox-release-localtest"],
+ "publish_rules": [145],
+ },
+ },
+ "balrog_use_dummy_suffix": False,
+}
diff --git a/testing/mozharness/configs/releases/updates_firefox_beta.py b/testing/mozharness/configs/releases/updates_firefox_beta.py
new file mode 100644
index 0000000000..8e4bfa9e7d
--- /dev/null
+++ b/testing/mozharness/configs/releases/updates_firefox_beta.py
@@ -0,0 +1,38 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "updates_beta",
+ "repo": {
+ "repo": "https://hg.mozilla.org/build/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ "push_dest": "ssh://hg.mozilla.org/build/tools",
+ "shipped-locales-url": "https://hg.mozilla.org/releases/mozilla-beta/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "archive.mozilla.org",
+ "archive_prefix": "https://archive.mozilla.org/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "https://aus5.mozilla.org",
+ "balrog_username": "balrog-ffxbld",
+ "update_channels": {
+ "beta": {
+ "version_regex": r"^(\d+\.\d+(b\d+)?)$",
+ "requires_mirrors": True,
+ "patcher_config": "mozBeta-branch-patcher2.cfg",
+ "update_verify_channel": "beta-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["beta", "beta-localtest", "beta-cdntest"],
+ "rules_to_update": ["firefox-beta-cdntest", "firefox-beta-localtest"],
+ "publish_rules": [32],
+ },
+ },
+ "balrog_use_dummy_suffix": False,
+}
diff --git a/testing/mozharness/configs/releases/updates_firefox_devedition.py b/testing/mozharness/configs/releases/updates_firefox_devedition.py
new file mode 100644
index 0000000000..f09456b624
--- /dev/null
+++ b/testing/mozharness/configs/releases/updates_firefox_devedition.py
@@ -0,0 +1,42 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "updates_devedition",
+ "repo": {
+ "repo": "https://hg.mozilla.org/build/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ "push_dest": "ssh://hg.mozilla.org/build/tools",
+ "shipped-locales-url": "https://hg.mozilla.org/releases/mozilla-beta/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "archive.mozilla.org",
+ "archive_prefix": "https://archive.mozilla.org/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "https://aus5.mozilla.org",
+ "balrog_username": "balrog-ffxbld",
+ "update_channels": {
+ "aurora": {
+ "version_regex": r"^.*$",
+ "requires_mirrors": True,
+ "patcher_config": "mozDevedition-branch-patcher2.cfg",
+ # Allow to override the patcher config product name
+ "patcher_config_product_override": "firefox",
+ "update_verify_channel": "aurora-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["aurora", "aurora-localtest", "aurora-cdntest"],
+ "rules_to_update": ["devedition-cdntest", "devedition-localtest"],
+ "publish_rules": [10],
+ },
+ },
+ "balrog_use_dummy_suffix": False,
+ "stage_product": "devedition",
+ "bouncer_product": "devedition",
+}
diff --git a/testing/mozharness/configs/releases/updates_firefox_release.py b/testing/mozharness/configs/releases/updates_firefox_release.py
new file mode 100644
index 0000000000..969b4efadc
--- /dev/null
+++ b/testing/mozharness/configs/releases/updates_firefox_release.py
@@ -0,0 +1,52 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "log_name": "updates_release",
+ "repo": {
+ "repo": "https://hg.mozilla.org/build/tools",
+ "branch": "default",
+ "dest": "tools",
+ "vcs": "hg",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ "push_dest": "ssh://hg.mozilla.org/build/tools",
+ "shipped-locales-url": "https://hg.mozilla.org/releases/mozilla-release/raw-file/{revision}/browser/locales/shipped-locales",
+ "ignore_no_changes": True,
+ "ssh_user": "ffxbld",
+ "ssh_key": "~/.ssh/ffxbld_rsa",
+ "archive_domain": "archive.mozilla.org",
+ "archive_prefix": "https://archive.mozilla.org/pub",
+ "previous_archive_prefix": "https://archive.mozilla.org/pub",
+ "download_domain": "download.mozilla.org",
+ "balrog_url": "https://aus5.mozilla.org",
+ "balrog_username": "balrog-ffxbld",
+ "update_channels": {
+ "beta": {
+ "version_regex": r"^(\d+\.\d+(b\d+)?)$",
+ "requires_mirrors": False,
+ "patcher_config": "mozBeta-branch-patcher2.cfg",
+ "update_verify_channel": "beta-localtest",
+ "mar_channel_ids": [
+ "firefox-mozilla-beta",
+ "firefox-mozilla-release",
+ ],
+ "channel_names": ["beta", "beta-localtest", "beta-cdntest"],
+ "rules_to_update": ["firefox-beta-cdntest", "firefox-beta-localtest"],
+ "publish_rules": [32],
+ "schedule_asap": True,
+ },
+ "release": {
+ "version_regex": r"^\d+\.\d+(\.\d+)?$",
+ "requires_mirrors": True,
+ "patcher_config": "mozRelease-branch-patcher2.cfg",
+ "update_verify_channel": "release-localtest",
+ "mar_channel_ids": [],
+ "channel_names": ["release", "release-localtest", "release-cdntest"],
+ "rules_to_update": ["firefox-release-cdntest", "firefox-release-localtest"],
+ "publish_rules": [145],
+ },
+ },
+ "balrog_use_dummy_suffix": False,
+}
diff --git a/testing/mozharness/configs/remove_executables.py b/testing/mozharness/configs/remove_executables.py
new file mode 100644
index 0000000000..1377c5825c
--- /dev/null
+++ b/testing/mozharness/configs/remove_executables.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "exes": {},
+}
diff --git a/testing/mozharness/configs/repackage/base.py b/testing/mozharness/configs/repackage/base.py
new file mode 100644
index 0000000000..517ddbb2d7
--- /dev/null
+++ b/testing/mozharness/configs/repackage/base.py
@@ -0,0 +1,13 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "package-name": "firefox",
+ "installer-tag": "browser/installer/windows/app.tag",
+ "stub-installer-tag": "browser/installer/windows/stub.tag",
+ "wsx-stub": "browser/installer/windows/msi/installer.wxs",
+ "fetch-dir": os.environ.get("MOZ_FETCHES_DIR"),
+}
diff --git a/testing/mozharness/configs/repackage/linux32_signed.py b/testing/mozharness/configs/repackage/linux32_signed.py
new file mode 100644
index 0000000000..adbed9537b
--- /dev/null
+++ b/testing/mozharness/configs/repackage/linux32_signed.py
@@ -0,0 +1,14 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+platform = "linux32"
+
+config = {
+ "locale": os.environ.get("LOCALE"),
+ # ToolTool
+ "tooltool_cache": os.environ.get("TOOLTOOL_CACHE"),
+ "run_configure": False,
+}
diff --git a/testing/mozharness/configs/repackage/linux64_signed.py b/testing/mozharness/configs/repackage/linux64_signed.py
new file mode 100644
index 0000000000..d1431456c0
--- /dev/null
+++ b/testing/mozharness/configs/repackage/linux64_signed.py
@@ -0,0 +1,14 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+platform = "linux64"
+
+config = {
+ "locale": os.environ.get("LOCALE"),
+ # ToolTool
+ "tooltool_cache": os.environ.get("TOOLTOOL_CACHE"),
+ "run_configure": False,
+}
diff --git a/testing/mozharness/configs/repackage/osx_partner.py b/testing/mozharness/configs/repackage/osx_partner.py
new file mode 100644
index 0000000000..e8e93c5e8f
--- /dev/null
+++ b/testing/mozharness/configs/repackage/osx_partner.py
@@ -0,0 +1,12 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "src_mozconfig": "browser/config/mozconfigs/macosx64/repack",
+ "repack_id": os.environ.get("REPACK_ID"),
+ # ToolTool
+ "tooltool_cache": os.environ.get("TOOLTOOL_CACHE"),
+}
diff --git a/testing/mozharness/configs/repackage/osx_signed.py b/testing/mozharness/configs/repackage/osx_signed.py
new file mode 100644
index 0000000000..3969ebc70a
--- /dev/null
+++ b/testing/mozharness/configs/repackage/osx_signed.py
@@ -0,0 +1,13 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "src_mozconfig": "browser/config/mozconfigs/macosx64/repack",
+ "locale": os.environ.get("LOCALE"),
+ # ToolTool
+ "tooltool_cache": os.environ.get("TOOLTOOL_CACHE"),
+ "run_configure": False,
+}
diff --git a/testing/mozharness/configs/repackage/win32_partner.py b/testing/mozharness/configs/repackage/win32_partner.py
new file mode 100644
index 0000000000..375f949c3a
--- /dev/null
+++ b/testing/mozharness/configs/repackage/win32_partner.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+platform = "win32"
+
+config = {
+ "repack_id": os.environ.get("REPACK_ID"),
+ "run_configure": False,
+ "env": {
+ "PATH": "%(abs_input_dir)s/upx/bin:%(PATH)s",
+ },
+}
diff --git a/testing/mozharness/configs/repackage/win32_sfx_stub.py b/testing/mozharness/configs/repackage/win32_sfx_stub.py
new file mode 100644
index 0000000000..7b098b7281
--- /dev/null
+++ b/testing/mozharness/configs/repackage/win32_sfx_stub.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "sfx-stub": "other-licenses/7zstub/firefox/7zSD.Win32.sfx",
+}
diff --git a/testing/mozharness/configs/repackage/win32_signed.py b/testing/mozharness/configs/repackage/win32_signed.py
new file mode 100644
index 0000000000..1095a8ff01
--- /dev/null
+++ b/testing/mozharness/configs/repackage/win32_signed.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+platform = "win32"
+
+config = {
+ "locale": os.environ.get("LOCALE"),
+ "run_configure": False,
+ "env": {
+ "PATH": "%(abs_input_dir)s/upx/bin:%(PATH)s",
+ },
+}
diff --git a/testing/mozharness/configs/repackage/win64-aarch64_sfx_stub.py b/testing/mozharness/configs/repackage/win64-aarch64_sfx_stub.py
new file mode 100644
index 0000000000..0d8634d3a9
--- /dev/null
+++ b/testing/mozharness/configs/repackage/win64-aarch64_sfx_stub.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "sfx-stub": "other-licenses/7zstub/firefox/7zSD.ARM64.sfx",
+}
diff --git a/testing/mozharness/configs/repackage/win64_partner.py b/testing/mozharness/configs/repackage/win64_partner.py
new file mode 100644
index 0000000000..d5aa55e728
--- /dev/null
+++ b/testing/mozharness/configs/repackage/win64_partner.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+platform = "win64"
+
+config = {
+ "repack_id": os.environ.get("REPACK_ID"),
+ "run_configure": False,
+ "env": {
+ "PATH": "%(abs_input_dir)s/upx/bin:%(PATH)s",
+ },
+}
diff --git a/testing/mozharness/configs/repackage/win64_signed.py b/testing/mozharness/configs/repackage/win64_signed.py
new file mode 100644
index 0000000000..3b0a23ef04
--- /dev/null
+++ b/testing/mozharness/configs/repackage/win64_signed.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+platform = "win64"
+
+config = {
+ "locale": os.environ.get("LOCALE"),
+ "run_configure": False,
+ "env": {
+ "PATH": "%(abs_input_dir)s/upx/bin:%(PATH)s",
+ },
+}
diff --git a/testing/mozharness/configs/servo/mac.py b/testing/mozharness/configs/servo/mac.py
new file mode 100644
index 0000000000..6058abdc20
--- /dev/null
+++ b/testing/mozharness/configs/servo/mac.py
@@ -0,0 +1,7 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "concurrency": 6,
+}
diff --git a/testing/mozharness/configs/single_locale/devedition.py b/testing/mozharness/configs/single_locale/devedition.py
new file mode 100644
index 0000000000..aef519b9d1
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/devedition.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "app_name": "browser",
+ "mozconfig_variant": "l10n-mozconfig-devedition",
+ "locales_dir": "browser/locales",
+}
diff --git a/testing/mozharness/configs/single_locale/firefox.py b/testing/mozharness/configs/single_locale/firefox.py
new file mode 100644
index 0000000000..c679ecebba
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/firefox.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "app_name": "browser",
+ "mozconfig_variant": "l10n-mozconfig",
+ "locales_dir": "browser/locales",
+}
diff --git a/testing/mozharness/configs/single_locale/linux32.py b/testing/mozharness/configs/single_locale/linux32.py
new file mode 100644
index 0000000000..1a1d073862
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/linux32.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "mozconfig_platform": "linux32",
+ "log_name": "single_locale",
+ "vcs_share_base": "/builds/hg-shared",
+ # l10n
+ "ignore_locales": ["en-US", "ja-JP-mac"],
+}
diff --git a/testing/mozharness/configs/single_locale/linux64.py b/testing/mozharness/configs/single_locale/linux64.py
new file mode 100644
index 0000000000..d54a67b857
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/linux64.py
@@ -0,0 +1,10 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "mozconfig_platform": "linux64",
+ "vcs_share_base": "/builds/hg-shared",
+ # l10n
+ "ignore_locales": ["en-US", "ja-JP-mac"],
+}
diff --git a/testing/mozharness/configs/single_locale/macosx64.py b/testing/mozharness/configs/single_locale/macosx64.py
new file mode 100644
index 0000000000..07c1f07221
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/macosx64.py
@@ -0,0 +1,12 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "mozconfig_platform": "macosx64",
+ "upload_env_extra": {
+ "MOZ_PKG_PLATFORM": "mac",
+ },
+ # l10n
+ "ignore_locales": ["en-US", "ja"],
+}
diff --git a/testing/mozharness/configs/single_locale/tc_android-arm.py b/testing/mozharness/configs/single_locale/tc_android-arm.py
new file mode 100644
index 0000000000..85a8ff45f5
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_android-arm.py
@@ -0,0 +1,35 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "locales_dir": "mobile/android/locales",
+ "ignore_locales": ["en-US"],
+ "repack_env": {
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ },
+ "vcs_share_base": "/builds/hg-shared",
+ "mozconfig": "src/mobile/android/config/mozconfigs/android-arm/l10n-nightly",
+ "tooltool_config": {
+ "manifest": "mobile/android/config/tooltool-manifests/android/releng.manifest",
+ "output_dir": "%(abs_work_dir)s/src",
+ },
+ "secret_files": [
+ {
+ "filename": "/builds/gls-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/gls-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/sb-gapi.data",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/sb-gapi.data",
+ "min_scm_level": 1,
+ },
+ {
+ "filename": "/builds/mozilla-fennec-geoloc-api.key",
+ "secret_name": "project/releng/gecko/build/level-%(scm-level)s/mozilla-fennec-geoloc-api.key",
+ "min_scm_level": 2,
+ "default": "try-build-has-no-secrets",
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/single_locale/tc_common.py b/testing/mozharness/configs/single_locale/tc_common.py
new file mode 100644
index 0000000000..029283b163
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_common.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "simple_name_move": True,
+ "vcs_share_base": "/builds/hg-shared",
+ "upload_env": {
+ "UPLOAD_PATH": "/builds/worker/artifacts/",
+ },
+}
diff --git a/testing/mozharness/configs/single_locale/tc_linux32.py b/testing/mozharness/configs/single_locale/tc_linux32.py
new file mode 100644
index 0000000000..4ae7528473
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_linux32.py
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "bootstrap_env": {
+ "NO_MERCURIAL_SETUP_CHECK": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "EN_US_BINARY_URL": os.environ["EN_US_BINARY_URL"],
+ "DIST": "%(abs_obj_dir)s",
+ "L10NBASEDIR": "../../l10n",
+ "TOOLTOOL_CACHE": os.environ.get("TOOLTOOL_CACHE"),
+ },
+}
diff --git a/testing/mozharness/configs/single_locale/tc_linux_common.py b/testing/mozharness/configs/single_locale/tc_linux_common.py
new file mode 100644
index 0000000000..4ae7528473
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_linux_common.py
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "bootstrap_env": {
+ "NO_MERCURIAL_SETUP_CHECK": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "EN_US_BINARY_URL": os.environ["EN_US_BINARY_URL"],
+ "DIST": "%(abs_obj_dir)s",
+ "L10NBASEDIR": "../../l10n",
+ "TOOLTOOL_CACHE": os.environ.get("TOOLTOOL_CACHE"),
+ },
+}
diff --git a/testing/mozharness/configs/single_locale/tc_macosx64.py b/testing/mozharness/configs/single_locale/tc_macosx64.py
new file mode 100644
index 0000000000..4ae7528473
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_macosx64.py
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "bootstrap_env": {
+ "NO_MERCURIAL_SETUP_CHECK": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "EN_US_BINARY_URL": os.environ["EN_US_BINARY_URL"],
+ "DIST": "%(abs_obj_dir)s",
+ "L10NBASEDIR": "../../l10n",
+ "TOOLTOOL_CACHE": os.environ.get("TOOLTOOL_CACHE"),
+ },
+}
diff --git a/testing/mozharness/configs/single_locale/tc_win32.py b/testing/mozharness/configs/single_locale/tc_win32.py
new file mode 100644
index 0000000000..34bcde89bc
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_win32.py
@@ -0,0 +1,17 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "bootstrap_env": {
+ "NO_MERCURIAL_SETUP_CHECK": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "EN_US_BINARY_URL": os.environ["EN_US_BINARY_URL"],
+ "DIST": "%(abs_obj_dir)s",
+ "L10NBASEDIR": "../../l10n",
+ "TOOLTOOL_CACHE": os.environ.get("TOOLTOOL_CACHE"),
+ "EN_US_PACKAGE_NAME": "target.zip",
+ },
+}
diff --git a/testing/mozharness/configs/single_locale/tc_win64.py b/testing/mozharness/configs/single_locale/tc_win64.py
new file mode 100644
index 0000000000..34bcde89bc
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/tc_win64.py
@@ -0,0 +1,17 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+config = {
+ "bootstrap_env": {
+ "NO_MERCURIAL_SETUP_CHECK": "1",
+ "MOZ_OBJDIR": "%(abs_obj_dir)s",
+ "EN_US_BINARY_URL": os.environ["EN_US_BINARY_URL"],
+ "DIST": "%(abs_obj_dir)s",
+ "L10NBASEDIR": "../../l10n",
+ "TOOLTOOL_CACHE": os.environ.get("TOOLTOOL_CACHE"),
+ "EN_US_PACKAGE_NAME": "target.zip",
+ },
+}
diff --git a/testing/mozharness/configs/single_locale/win32.py b/testing/mozharness/configs/single_locale/win32.py
new file mode 100644
index 0000000000..e00275792c
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/win32.py
@@ -0,0 +1,10 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "mozconfig_platform": "win32",
+ "log_name": "single_locale",
+ # l10n
+ "ignore_locales": ["en-US", "ja-JP-mac"],
+}
diff --git a/testing/mozharness/configs/single_locale/win64-aarch64.py b/testing/mozharness/configs/single_locale/win64-aarch64.py
new file mode 100644
index 0000000000..5846f01b89
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/win64-aarch64.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "mozconfig_platform": "win64-aarch64",
+ # l10n
+ "ignore_locales": ["en-US", "ja-JP-mac"],
+}
diff --git a/testing/mozharness/configs/single_locale/win64.py b/testing/mozharness/configs/single_locale/win64.py
new file mode 100644
index 0000000000..a3061add60
--- /dev/null
+++ b/testing/mozharness/configs/single_locale/win64.py
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "mozconfig_platform": "win64",
+ # l10n
+ "ignore_locales": ["en-US", "ja-JP-mac"],
+}
diff --git a/testing/mozharness/configs/talos/linux64_config_taskcluster.py b/testing/mozharness/configs/talos/linux64_config_taskcluster.py
new file mode 100644
index 0000000000..4fe2861d8f
--- /dev/null
+++ b/testing/mozharness/configs/talos/linux64_config_taskcluster.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+PYTHON = sys.executable
+VENV_PATH = "%s/build/venv" % os.getcwd()
+
+exes = {
+ "python": PYTHON,
+}
+ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
+INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.tar.bz2")
+
+config = {
+ "log_name": "talos",
+ "installer_path": INSTALLER_PATH,
+ "virtualenv_path": VENV_PATH,
+ "exes": exes,
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": "/builds/worker/tooltool-cache",
+}
diff --git a/testing/mozharness/configs/talos/linux_config.py b/testing/mozharness/configs/talos/linux_config.py
new file mode 100644
index 0000000000..c26fa4f8e5
--- /dev/null
+++ b/testing/mozharness/configs/talos/linux_config.py
@@ -0,0 +1,23 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+VENV_PATH = "%s/build/venv" % os.getcwd()
+
+config = {
+ "log_name": "talos",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": "/builds/tooltool_cache",
+}
diff --git a/testing/mozharness/configs/talos/mac_config.py b/testing/mozharness/configs/talos/mac_config.py
new file mode 100644
index 0000000000..a8a5f9b87d
--- /dev/null
+++ b/testing/mozharness/configs/talos/mac_config.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+VENV_PATH = "%s/build/venv" % os.getcwd()
+
+config = {
+ "log_name": "talos",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "title": os.uname()[1].lower().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [],
+ "postflight_run_cmd_suites": [],
+ "tooltool_cache": "/builds/tooltool_cache",
+}
diff --git a/testing/mozharness/configs/talos/windows_config.py b/testing/mozharness/configs/talos/windows_config.py
new file mode 100644
index 0000000000..962146386a
--- /dev/null
+++ b/testing/mozharness/configs/talos/windows_config.py
@@ -0,0 +1,62 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import socket
+import sys
+
+PYTHON = sys.executable
+PYTHON_DLL = "c:/mozilla-build/python27/python27.dll"
+VENV_PATH = os.path.join(os.getcwd(), "build/venv")
+
+config = {
+ "log_name": "talos",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "exes": {
+ "python": PYTHON,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ "tooltool.py": [
+ PYTHON,
+ os.path.join(os.environ["MOZILLABUILD"], "tooltool.py"),
+ ],
+ },
+ "title": socket.gethostname().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join("c:\\", "build", "tooltool_cache"),
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ sys.executable,
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "mouse_and_screen_resolution.py",
+ ),
+ "--configuration-file",
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "machine-configuration.json",
+ ),
+ "--platform",
+ "win10-hw",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ }
+ ],
+}
diff --git a/testing/mozharness/configs/talos/windows_taskcluster_config.py b/testing/mozharness/configs/talos/windows_taskcluster_config.py
new file mode 100644
index 0000000000..97e67b4513
--- /dev/null
+++ b/testing/mozharness/configs/talos/windows_taskcluster_config.py
@@ -0,0 +1,29 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import socket
+import sys
+
+PYTHON = sys.executable
+PYTHON_DLL = "c:/mozilla-build/python/python27.dll"
+VENV_PATH = os.path.join(os.getcwd(), "venv")
+
+config = {
+ "log_name": "talos",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "exes": {
+ "python": PYTHON,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "title": socket.gethostname().split(".")[0],
+ "default_actions": [
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join("Y:\\", "tooltool-cache"),
+}
diff --git a/testing/mozharness/configs/talos/windows_vm_config.py b/testing/mozharness/configs/talos/windows_vm_config.py
new file mode 100644
index 0000000000..a5a7fa4f89
--- /dev/null
+++ b/testing/mozharness/configs/talos/windows_vm_config.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import socket
+import sys
+
+PYTHON = sys.executable
+PYTHON_DLL = "c:/mozilla-build/python27/python27.dll"
+VENV_PATH = os.path.join(os.getcwd(), "build/venv")
+
+config = {
+ "log_name": "talos",
+ "installer_path": "installer.exe",
+ "virtualenv_path": VENV_PATH,
+ "exes": {
+ "python": PYTHON,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "title": socket.gethostname().split(".")[0],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ "tooltool_cache": os.path.join("c:\\", "build", "tooltool_cache"),
+}
diff --git a/testing/mozharness/configs/taskcluster_nightly.py b/testing/mozharness/configs/taskcluster_nightly.py
new file mode 100644
index 0000000000..596bf2f94e
--- /dev/null
+++ b/testing/mozharness/configs/taskcluster_nightly.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+config = {
+ "nightly_build": True,
+ "taskcluster_nightly": True,
+}
diff --git a/testing/mozharness/configs/test/example_config1.json b/testing/mozharness/configs/test/example_config1.json
new file mode 100644
index 0000000000..ca73466ba5
--- /dev/null
+++ b/testing/mozharness/configs/test/example_config1.json
@@ -0,0 +1,5 @@
+{
+ "beverage": "fizzy drink",
+ "long_sleep_time": 1800,
+ "random_config_key1": "spectacular"
+}
diff --git a/testing/mozharness/configs/test/example_config2.py b/testing/mozharness/configs/test/example_config2.py
new file mode 100644
index 0000000000..958543b60e
--- /dev/null
+++ b/testing/mozharness/configs/test/example_config2.py
@@ -0,0 +1,5 @@
+config = {
+ "beverage": "cider",
+ "long_sleep_time": 300,
+ "random_config_key2": "wunderbar",
+}
diff --git a/testing/mozharness/configs/test/test.illegal_suffix b/testing/mozharness/configs/test/test.illegal_suffix
new file mode 100644
index 0000000000..7d9a4d96d6
--- /dev/null
+++ b/testing/mozharness/configs/test/test.illegal_suffix
@@ -0,0 +1,20 @@
+{
+ "log_name": "test",
+ "log_dir": "test_logs",
+ "log_to_console": false,
+ "key1": "value1",
+ "key2": "value2",
+ "section1": {
+
+ "subsection1": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+
+ "subsection2": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+
+ }
+}
diff --git a/testing/mozharness/configs/test/test.json b/testing/mozharness/configs/test/test.json
new file mode 100644
index 0000000000..7d9a4d96d6
--- /dev/null
+++ b/testing/mozharness/configs/test/test.json
@@ -0,0 +1,20 @@
+{
+ "log_name": "test",
+ "log_dir": "test_logs",
+ "log_to_console": false,
+ "key1": "value1",
+ "key2": "value2",
+ "section1": {
+
+ "subsection1": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+
+ "subsection2": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+
+ }
+}
diff --git a/testing/mozharness/configs/test/test.py b/testing/mozharness/configs/test/test.py
new file mode 100644
index 0000000000..d06c2f130a
--- /dev/null
+++ b/testing/mozharness/configs/test/test.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+config = {
+ "log_name": "test",
+ "log_dir": "test_logs",
+ "log_to_console": False,
+ "key1": "value1",
+ "key2": "value2",
+ "section1": {
+ "subsection1": {"key1": "value1", "key2": "value2"},
+ "subsection2": {"key1": "value1", "key2": "value2"},
+ },
+ "opt_override": "some stuff",
+}
diff --git a/testing/mozharness/configs/test/test_malformed.json b/testing/mozharness/configs/test/test_malformed.json
new file mode 100644
index 0000000000..260be45b88
--- /dev/null
+++ b/testing/mozharness/configs/test/test_malformed.json
@@ -0,0 +1,20 @@
+{
+ "log_name": "test",
+ "log_dir": "test_logs",
+ "log_to_console": false,
+ "key1": "value1",
+ "key2": "value2",
+ "section1": {
+
+ "subsection1": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+
+ "subsection2": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+
+ }
+}
diff --git a/testing/mozharness/configs/test/test_malformed.py b/testing/mozharness/configs/test/test_malformed.py
new file mode 100644
index 0000000000..e7ccefd15f
--- /dev/null
+++ b/testing/mozharness/configs/test/test_malformed.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+config = {
+ "log_name": "test",
+ "log_dir": "test_logs",
+ "log_to_console": False,
+ "key1": "value1",
+ "key2": "value2",
+ "section1": {
+
+ "subsection1": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+
+a;sldkfjas;dfkljasdf;kjasdf;ljkadsflkjsdfkweoi
+ "subsection2": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+
+ },
+}
diff --git a/testing/mozharness/configs/test/test_optional.py b/testing/mozharness/configs/test/test_optional.py
new file mode 100644
index 0000000000..4eb13b3dfb
--- /dev/null
+++ b/testing/mozharness/configs/test/test_optional.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+config = {
+ "opt_override": "new stuff",
+}
diff --git a/testing/mozharness/configs/test/test_override.py b/testing/mozharness/configs/test/test_override.py
new file mode 100644
index 0000000000..356207d547
--- /dev/null
+++ b/testing/mozharness/configs/test/test_override.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+config = {
+ "override_string": "TODO",
+ "override_list": ["to", "do"],
+ "override_dict": {"to": "do"},
+ "keep_string": "don't change me",
+}
diff --git a/testing/mozharness/configs/test/test_override2.py b/testing/mozharness/configs/test/test_override2.py
new file mode 100644
index 0000000000..561a170358
--- /dev/null
+++ b/testing/mozharness/configs/test/test_override2.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+config = {
+ "override_string": "yay",
+ "override_list": ["yay", "worked"],
+ "override_dict": {"yay": "worked"},
+}
diff --git a/testing/mozharness/configs/unittests/linux_unittest.py b/testing/mozharness/configs/unittests/linux_unittest.py
new file mode 100644
index 0000000000..c7a77c760b
--- /dev/null
+++ b/testing/mozharness/configs/unittests/linux_unittest.py
@@ -0,0 +1,292 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import platform
+
+# OS Specifics
+ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
+BINARY_PATH = os.path.join(ABS_WORK_DIR, "application", "firefox", "firefox-bin")
+INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.tar.bz2")
+XPCSHELL_NAME = "xpcshell"
+HTTP3SERVER_NAME = "http3server"
+EXE_SUFFIX = ""
+DISABLE_SCREEN_SAVER = True
+ADJUST_MOUSE_AND_SCREEN = False
+
+# Note: keep these Valgrind .sup file names consistent with those
+# in testing/mochitest/mochitest_options.py.
+VALGRIND_SUPP_DIR = os.path.join(os.getcwd(), "build/tests/mochitest")
+NODEJS_PATH = None
+if "MOZ_FETCHES_DIR" in os.environ:
+ NODEJS_PATH = os.path.join(os.environ["MOZ_FETCHES_DIR"], "node/bin/node")
+
+VALGRIND_SUPP_CROSS_ARCH = os.path.join(VALGRIND_SUPP_DIR, "cross-architecture.sup")
+VALGRIND_SUPP_ARCH = None
+
+if platform.architecture()[0] == "64bit":
+ VALGRIND_SUPP_ARCH = os.path.join(VALGRIND_SUPP_DIR, "x86_64-pc-linux-gnu.sup")
+else:
+ VALGRIND_SUPP_ARCH = os.path.join(VALGRIND_SUPP_DIR, "i386-pc-linux-gnu.sup")
+
+#####
+config = {
+ ###
+ "virtualenv_modules": ["six==1.13.0", "vcversioner==2.16.0.0"],
+ "installer_path": INSTALLER_PATH,
+ "binary_path": BINARY_PATH,
+ "xpcshell_name": XPCSHELL_NAME,
+ "http3server_name": HTTP3SERVER_NAME,
+ "exe_suffix": EXE_SUFFIX,
+ "run_file_names": {
+ "mochitest": "runtests.py",
+ "reftest": "runreftest.py",
+ "xpcshell": "runxpcshelltests.py",
+ "cppunittest": "runcppunittests.py",
+ "gtest": "rungtests.py",
+ "jittest": "jit_test.py",
+ },
+ "minimum_tests_zip_dirs": [
+ "bin/*",
+ "certs/*",
+ "config/*",
+ "mach",
+ "marionette/*",
+ "modules/*",
+ "mozbase/*",
+ "tools/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ],
+ "suite_definitions": {
+ "cppunittest": {
+ "options": [
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=tests/bin",
+ "--xre-path=%(abs_app_dir)s",
+ ],
+ "run_filename": "runcppunittests.py",
+ "testsdir": "cppunittest",
+ },
+ "jittest": {
+ "options": [
+ "tests/bin/js",
+ "--no-slow",
+ "--no-progress",
+ "--format=automation",
+ "--jitflags=all",
+ "--timeout=970", # Keep in sync with run_timeout below.
+ ],
+ "run_filename": "jit_test.py",
+ "testsdir": "jit-test/jit-test",
+ "run_timeout": 1000, # Keep in sync with --timeout above.
+ },
+ "mochitest": {
+ "options": [
+ "--appname=%(binary_path)s",
+ "--utility-path=tests/bin",
+ "--extra-profile-file=tests/bin/plugins",
+ "--symbols-path=%(symbols_path)s",
+ "--certificate-path=tests/certs",
+ "--setpref=webgl.force-enabled=true",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--use-test-media-devices",
+ "--screenshot-on-fail",
+ "--cleanup-crashes",
+ "--marionette-startup-timeout=180",
+ "--sandbox-read-whitelist=%(abs_work_dir)s",
+ ],
+ "run_filename": "runtests.py",
+ "testsdir": "mochitest",
+ },
+ "reftest": {
+ "options": [
+ "--appname=%(binary_path)s",
+ "--utility-path=tests/bin",
+ "--extra-profile-file=tests/bin/plugins",
+ "--symbols-path=%(symbols_path)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--cleanup-crashes",
+ "--marionette-startup-timeout=180",
+ "--sandbox-read-whitelist=%(abs_work_dir)s",
+ ],
+ "run_filename": "runreftest.py",
+ "testsdir": "reftest",
+ },
+ "xpcshell": {
+ "options": [
+ "--self-test",
+ "--symbols-path=%(symbols_path)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--utility-path=tests/bin",
+ ],
+ "run_filename": "runxpcshelltests.py",
+ "testsdir": "xpcshell",
+ },
+ "gtest": {
+ "options": [
+ "--xre-path=%(abs_res_dir)s",
+ "--cwd=%(gtest_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=tests/bin",
+ "%(binary_path)s",
+ ],
+ "run_filename": "rungtests.py",
+ },
+ },
+ # local mochi suites
+ "all_mochitest_suites": {
+ "mochitest-valgrind-plain": [
+ "--valgrind=/usr/bin/valgrind",
+ "--valgrind-supp-files="
+ + VALGRIND_SUPP_ARCH
+ + ","
+ + VALGRIND_SUPP_CROSS_ARCH,
+ "--timeout=900",
+ "--max-timeouts=50",
+ ],
+ "mochitest-plain": ["--chunk-by-dir=4"],
+ "mochitest-plain-gpu": ["--subsuite=gpu"],
+ "mochitest-plain-coverage": ["--chunk-by-dir=4", "--timeout=1200"],
+ "mochitest-media": ["--subsuite=media"],
+ "mochitest-chrome": ["--flavor=chrome", "--chunk-by-dir=4", "--disable-e10s"],
+ "mochitest-chrome-gpu": ["--flavor=chrome", "--subsuite=gpu", "--disable-e10s"],
+ "mochitest-browser-chrome": ["--flavor=browser", "--chunk-by-runtime"],
+ "mochitest-browser-chrome-coverage": [
+ "--flavor=browser",
+ "--chunk-by-runtime",
+ "--timeout=1200",
+ ],
+ "mochitest-browser-chrome-screenshots": [
+ "--flavor=browser",
+ "--subsuite=screenshots",
+ ],
+ "mochitest-webgl1-core": ["--subsuite=webgl1-core"],
+ "mochitest-webgl1-ext": ["--subsuite=webgl1-ext"],
+ "mochitest-webgl2-core": ["--subsuite=webgl2-core"],
+ "mochitest-webgl2-ext": ["--subsuite=webgl2-ext"],
+ "mochitest-webgl2-deqp": ["--subsuite=webgl2-deqp"],
+ "mochitest-webgpu": ["--subsuite=webgpu"],
+ "mochitest-devtools-chrome": [
+ "--flavor=browser",
+ "--subsuite=devtools",
+ "--chunk-by-runtime",
+ ],
+ "mochitest-devtools-chrome-coverage": [
+ "--flavor=browser",
+ "--subsuite=devtools",
+ "--chunk-by-runtime",
+ "--timeout=1200",
+ ],
+ "mochitest-browser-a11y": ["--flavor=browser", "--subsuite=a11y"],
+ "mochitest-browser-media": ["--flavor=browser", "--subsuite=media-bc"],
+ "mochitest-a11y": ["--flavor=a11y", "--disable-e10s"],
+ "mochitest-remote": ["--flavor=browser", "--subsuite=remote"],
+ },
+ # local reftest suites
+ "all_reftest_suites": {
+ "crashtest": {
+ "options": ["--suite=crashtest", "--topsrcdir=tests/reftest/tests"],
+ "tests": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
+ },
+ "jsreftest": {
+ "options": [
+ "--extra-profile-file=tests/jsreftest/tests/js/src/tests/user.js",
+ "--suite=jstestbrowser",
+ "--topsrcdir=tests/jsreftest/tests",
+ ],
+ "tests": ["tests/jsreftest/tests/js/src/tests/jstests.list"],
+ },
+ "reftest": {
+ "options": [
+ "--suite=reftest",
+ "--setpref=layers.acceleration.force-enabled=true",
+ "--topsrcdir=tests/reftest/tests",
+ ],
+ "tests": ["tests/reftest/tests/layout/reftests/reftest.list"],
+ },
+ "reftest-no-accel": {
+ "options": [
+ "--suite=reftest",
+ "--setpref=layers.acceleration.disabled=true",
+ "--topsrcdir=tests/reftest/tests",
+ ],
+ "tests": ["tests/reftest/tests/layout/reftests/reftest.list"],
+ },
+ "reftest-snapshot": {
+ "options": [
+ "--suite=reftest",
+ "--setpref=reftest.use-draw-snapshot=true",
+ "--topsrcdir=tests/reftest/tests",
+ ],
+ "tests": ["tests/reftest/tests/layout/reftests/reftest.list"],
+ },
+ },
+ "all_xpcshell_suites": {
+ "xpcshell": {
+ "options": [
+ "--xpcshell=%(abs_app_dir)s/" + XPCSHELL_NAME,
+ "--http3server=%(abs_app_dir)s/" + HTTP3SERVER_NAME,
+ "--manifest=tests/xpcshell/tests/xpcshell.ini",
+ ],
+ "tests": [],
+ },
+ "xpcshell-coverage": {
+ "options": [
+ "--xpcshell=%(abs_app_dir)s/" + XPCSHELL_NAME,
+ "--http3server=%(abs_app_dir)s/" + HTTP3SERVER_NAME,
+ "--manifest=tests/xpcshell/tests/xpcshell.ini",
+ "--sequential",
+ ],
+ "tests": [],
+ },
+ },
+ "all_cppunittest_suites": {"cppunittest": {"tests": ["tests/cppunittest"]}},
+ "all_gtest_suites": {"gtest": []},
+ "all_jittest_suites": {
+ "jittest": [],
+ "jittest1": ["--total-chunks=2", "--this-chunk=1"],
+ "jittest2": ["--total-chunks=2", "--this-chunk=2"],
+ },
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "halt_on_failure": False,
+ "architectures": ["32bit", "64bit"],
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ "python",
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+ "vcs_output_timeout": 1000,
+ "minidump_save_path": "%(abs_work_dir)s/../minidumps",
+ "unstructured_flavors": {
+ "xpcshell": [],
+ "gtest": [],
+ "cppunittest": [],
+ "jittest": [],
+ },
+ "tooltool_cache": "/builds/worker/tooltool-cache",
+ "nodejs_path": NODEJS_PATH,
+ # "log_format": "%(levelname)8s - %(message)s",
+}
diff --git a/testing/mozharness/configs/unittests/mac_unittest.py b/testing/mozharness/configs/unittests/mac_unittest.py
new file mode 100644
index 0000000000..466773ec33
--- /dev/null
+++ b/testing/mozharness/configs/unittests/mac_unittest.py
@@ -0,0 +1,231 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+# OS Specifics
+INSTALLER_PATH = os.path.join(os.getcwd(), "installer.dmg")
+NODEJS_PATH = None
+if "MOZ_FETCHES_DIR" in os.environ:
+ NODEJS_PATH = os.path.join(os.environ["MOZ_FETCHES_DIR"], "node/bin/node")
+
+XPCSHELL_NAME = "xpcshell"
+HTTP3SERVER_NAME = "http3server"
+EXE_SUFFIX = ""
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = False
+#####
+config = {
+ "virtualenv_modules": ["six==1.13.0", "vcversioner==2.16.0.0"],
+ ###
+ "installer_path": INSTALLER_PATH,
+ "xpcshell_name": XPCSHELL_NAME,
+ "http3server_name": HTTP3SERVER_NAME,
+ "exe_suffix": EXE_SUFFIX,
+ "run_file_names": {
+ "mochitest": "runtests.py",
+ "reftest": "runreftest.py",
+ "xpcshell": "runxpcshelltests.py",
+ "cppunittest": "runcppunittests.py",
+ "gtest": "rungtests.py",
+ "jittest": "jit_test.py",
+ },
+ "minimum_tests_zip_dirs": [
+ "bin/*",
+ "certs/*",
+ "config/*",
+ "mach",
+ "marionette/*",
+ "modules/*",
+ "mozbase/*",
+ "tools/*",
+ ],
+ "suite_definitions": {
+ "cppunittest": {
+ "options": [
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=tests/bin",
+ "--xre-path=%(abs_res_dir)s",
+ ],
+ "run_filename": "runcppunittests.py",
+ "testsdir": "cppunittest",
+ },
+ "jittest": {
+ "options": [
+ "tests/bin/js",
+ "--no-slow",
+ "--no-progress",
+ "--format=automation",
+ "--jitflags=all",
+ "--timeout=970", # Keep in sync with run_timeout below.
+ ],
+ "run_filename": "jit_test.py",
+ "testsdir": "jit-test/jit-test",
+ "run_timeout": 1000, # Keep in sync with --timeout above.
+ },
+ "mochitest": {
+ "options": [
+ "--appname=%(binary_path)s",
+ "--utility-path=tests/bin",
+ "--extra-profile-file=tests/bin/plugins",
+ "--symbols-path=%(symbols_path)s",
+ "--certificate-path=tests/certs",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--screenshot-on-fail",
+ "--cleanup-crashes",
+ "--marionette-startup-timeout=180",
+ "--sandbox-read-whitelist=%(abs_work_dir)s",
+ ],
+ "run_filename": "runtests.py",
+ "testsdir": "mochitest",
+ },
+ "reftest": {
+ "options": [
+ "--appname=%(binary_path)s",
+ "--utility-path=tests/bin",
+ "--extra-profile-file=tests/bin/plugins",
+ "--symbols-path=%(symbols_path)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--cleanup-crashes",
+ "--marionette-startup-timeout=180",
+ "--sandbox-read-whitelist=%(abs_work_dir)s",
+ ],
+ "run_filename": "runreftest.py",
+ "testsdir": "reftest",
+ },
+ "xpcshell": {
+ "options": [
+ "--self-test",
+ "--symbols-path=%(symbols_path)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--utility-path=tests/bin",
+ ],
+ "run_filename": "runxpcshelltests.py",
+ "testsdir": "xpcshell",
+ },
+ "gtest": {
+ "options": [
+ "--xre-path=%(abs_res_dir)s",
+ "--cwd=%(gtest_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=tests/bin",
+ "%(binary_path)s",
+ ],
+ "run_filename": "rungtests.py",
+ },
+ },
+ # local mochi suites
+ "all_mochitest_suites": {
+ "mochitest-plain": ["--chunk-by-dir=4"],
+ "mochitest-plain-gpu": ["--subsuite=gpu"],
+ "mochitest-media": ["--subsuite=media"],
+ "mochitest-chrome": ["--flavor=chrome", "--chunk-by-dir=4", "--disable-e10s"],
+ "mochitest-chrome-gpu": ["--flavor=chrome", "--subsuite=gpu", "--disable-e10s"],
+ "mochitest-browser-chrome": ["--flavor=browser", "--chunk-by-runtime"],
+ "mochitest-browser-chrome-screenshots": [
+ "--flavor=browser",
+ "--subsuite=screenshots",
+ ],
+ "mochitest-webgl1-core": ["--subsuite=webgl1-core"],
+ "mochitest-webgl1-ext": ["--subsuite=webgl1-ext"],
+ "mochitest-webgl2-core": ["--subsuite=webgl2-core"],
+ "mochitest-webgl2-ext": ["--subsuite=webgl2-ext"],
+ "mochitest-webgl2-deqp": ["--subsuite=webgl2-deqp"],
+ "mochitest-webgpu": ["--subsuite=webgpu"],
+ "mochitest-devtools-chrome": [
+ "--flavor=browser",
+ "--subsuite=devtools",
+ "--chunk-by-runtime",
+ ],
+ "mochitest-browser-a11y": ["--flavor=browser", "--subsuite=a11y"],
+ "mochitest-browser-media": ["--flavor=browser", "--subsuite=media-bc"],
+ "mochitest-a11y": ["--flavor=a11y", "--disable-e10s"],
+ "mochitest-remote": ["--flavor=browser", "--subsuite=remote"],
+ },
+ # local reftest suites
+ "all_reftest_suites": {
+ "crashtest": {
+ "options": ["--suite=crashtest", "--topsrcdir=tests/reftest/tests"],
+ "tests": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
+ },
+ "jsreftest": {
+ "options": [
+ "--extra-profile-file=tests/jsreftest/tests/js/src/tests/user.js",
+ "--suite=jstestbrowser",
+ "--topsrcdir=tests/jsreftest/tests",
+ ],
+ "tests": ["tests/jsreftest/tests/js/src/tests/jstests.list"],
+ },
+ "reftest": {
+ "options": ["--suite=reftest", "--topsrcdir=tests/reftest/tests"],
+ "tests": ["tests/reftest/tests/layout/reftests/reftest.list"],
+ },
+ },
+ "all_xpcshell_suites": {
+ "xpcshell": {
+ "options": [
+ "--xpcshell=%(abs_app_dir)s/" + XPCSHELL_NAME,
+ "--http3server=%(abs_app_dir)s/" + HTTP3SERVER_NAME,
+ "--manifest=tests/xpcshell/tests/xpcshell.ini",
+ ],
+ "tests": [],
+ },
+ },
+ "all_cppunittest_suites": {"cppunittest": ["tests/cppunittest"]},
+ "all_gtest_suites": {"gtest": []},
+ "all_jittest_suites": {"jittest": [], "jittest-chunked": []},
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "disable_dock",
+ "cmd": ["defaults", "write", "com.apple.dock", "autohide", "-bool", "true"],
+ "architectures": ["64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "kill_dock",
+ "cmd": ["killall", "Dock"],
+ "architectures": ["64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ "python",
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+ "vcs_output_timeout": 1000,
+ "minidump_save_path": "%(abs_work_dir)s/../minidumps",
+ "unstructured_flavors": {
+ "xpcshell": [],
+ "gtest": [],
+ "cppunittest": [],
+ "jittest": [],
+ },
+ "tooltool_cache": "/builds/tooltool_cache",
+ "nodejs_path": NODEJS_PATH,
+}
diff --git a/testing/mozharness/configs/unittests/win_unittest.py b/testing/mozharness/configs/unittests/win_unittest.py
new file mode 100644
index 0000000000..dbbad8fc99
--- /dev/null
+++ b/testing/mozharness/configs/unittests/win_unittest.py
@@ -0,0 +1,341 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import platform
+import sys
+
+# OS Specifics
+ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
+BINARY_PATH = os.path.join(ABS_WORK_DIR, "firefox", "firefox.exe")
+INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.zip")
+NODEJS_PATH = None
+if "MOZ_FETCHES_DIR" in os.environ:
+ NODEJS_PATH = os.path.join(os.environ["MOZ_FETCHES_DIR"], "node/node.exe")
+
+REQUIRE_GPU = False
+if "REQUIRE_GPU" in os.environ:
+ REQUIRE_GPU = os.environ["REQUIRE_GPU"] == "1"
+
+PYWIN32 = "pypiwin32==219"
+if sys.version_info > (3, 0):
+ PYWIN32 = "pywin32==300"
+
+XPCSHELL_NAME = "xpcshell.exe"
+EXE_SUFFIX = ".exe"
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = True
+DESKTOP_VISUALFX_THEME = {
+ "Let Windows choose": 0,
+ "Best appearance": 1,
+ "Best performance": 2,
+ "Custom": 3,
+}.get("Best appearance")
+TASKBAR_AUTOHIDE_REG_PATH = {
+ "Windows 7": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2",
+ "Windows 10": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3",
+}.get("{} {}".format(platform.system(), platform.release()))
+#####
+config = {
+ "exes": {
+ "python": sys.executable,
+ "hg": os.path.join(os.environ.get("PROGRAMFILES", ""), "Mercurial", "hg"),
+ },
+ ###
+ "installer_path": INSTALLER_PATH,
+ "binary_path": BINARY_PATH,
+ "xpcshell_name": XPCSHELL_NAME,
+ "virtualenv_modules": [PYWIN32, "six==1.13.0", "vcversioner==2.16.0.0"],
+ "virtualenv_path": "venv",
+ "exe_suffix": EXE_SUFFIX,
+ "run_file_names": {
+ "mochitest": "runtests.py",
+ "reftest": "runreftest.py",
+ "xpcshell": "runxpcshelltests.py",
+ "cppunittest": "runcppunittests.py",
+ "gtest": "rungtests.py",
+ "jittest": "jit_test.py",
+ },
+ "minimum_tests_zip_dirs": [
+ "bin/*",
+ "certs/*",
+ "config/*",
+ "mach",
+ "marionette/*",
+ "modules/*",
+ "mozbase/*",
+ "tools/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ],
+ "suite_definitions": {
+ "cppunittest": {
+ "options": [
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=tests/bin",
+ "--xre-path=%(abs_app_dir)s",
+ ],
+ "run_filename": "runcppunittests.py",
+ "testsdir": "cppunittest",
+ },
+ "jittest": {
+ "options": [
+ "tests/bin/js",
+ "--no-slow",
+ "--no-progress",
+ "--format=automation",
+ "--jitflags=all",
+ "--timeout=970", # Keep in sync with run_timeout below.
+ ],
+ "run_filename": "jit_test.py",
+ "testsdir": "jit-test/jit-test",
+ "run_timeout": 1000, # Keep in sync with --timeout above.
+ },
+ "mochitest": {
+ "options": [
+ "--appname=%(binary_path)s",
+ "--utility-path=tests/bin",
+ "--extra-profile-file=tests/bin/plugins",
+ "--symbols-path=%(symbols_path)s",
+ "--certificate-path=tests/certs",
+ "--quiet",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--screenshot-on-fail",
+ "--cleanup-crashes",
+ "--marionette-startup-timeout=180",
+ ],
+ "run_filename": "runtests.py",
+ "testsdir": "mochitest",
+ },
+ "reftest": {
+ "options": [
+ "--appname=%(binary_path)s",
+ "--utility-path=tests/bin",
+ "--extra-profile-file=tests/bin/plugins",
+ "--symbols-path=%(symbols_path)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--cleanup-crashes",
+ "--marionette-startup-timeout=180",
+ "--sandbox-read-whitelist=%(abs_work_dir)s",
+ ],
+ "run_filename": "runreftest.py",
+ "testsdir": "reftest",
+ },
+ "xpcshell": {
+ "options": [
+ "--self-test",
+ "--symbols-path=%(symbols_path)s",
+ "--log-raw=%(raw_log_file)s",
+ "--log-errorsummary=%(error_summary_file)s",
+ "--utility-path=tests/bin",
+ "--manifest=tests/xpcshell/tests/xpcshell.ini",
+ ],
+ "run_filename": "runxpcshelltests.py",
+ "testsdir": "xpcshell",
+ },
+ "gtest": {
+ "options": [
+ "--xre-path=%(abs_res_dir)s",
+ "--cwd=%(gtest_dir)s",
+ "--symbols-path=%(symbols_path)s",
+ "--utility-path=tests/bin",
+ "%(binary_path)s",
+ ],
+ "run_filename": "rungtests.py",
+ },
+ },
+ # local mochi suites
+ "all_mochitest_suites": {
+ "mochitest-plain": ["--chunk-by-dir=4"],
+ "mochitest-plain-gpu": ["--subsuite=gpu"],
+ "mochitest-media": ["--subsuite=media"],
+ "mochitest-chrome": ["--flavor=chrome", "--chunk-by-dir=4", "--disable-e10s"],
+ "mochitest-chrome-gpu": ["--flavor=chrome", "--subsuite=gpu", "--disable-e10s"],
+ "mochitest-browser-chrome": ["--flavor=browser", "--chunk-by-runtime"],
+ "mochitest-browser-chrome-screenshots": [
+ "--flavor=browser",
+ "--subsuite=screenshots",
+ ],
+ "mochitest-webgl1-core": ["--subsuite=webgl1-core"],
+ "mochitest-webgl1-ext": ["--subsuite=webgl1-ext"],
+ "mochitest-webgl2-core": ["--subsuite=webgl2-core"],
+ "mochitest-webgl2-ext": ["--subsuite=webgl2-ext"],
+ "mochitest-webgl2-deqp": ["--subsuite=webgl2-deqp"],
+ "mochitest-webgpu": ["--subsuite=webgpu"],
+ "mochitest-devtools-chrome": [
+ "--flavor=browser",
+ "--subsuite=devtools",
+ "--chunk-by-runtime",
+ ],
+ "mochitest-browser-a11y": ["--flavor=browser", "--subsuite=a11y"],
+ "mochitest-browser-media": ["--flavor=browser", "--subsuite=media-bc"],
+ "mochitest-a11y": ["--flavor=a11y", "--disable-e10s"],
+ "mochitest-remote": ["--flavor=browser", "--subsuite=remote"],
+ },
+ # local reftest suites
+ "all_reftest_suites": {
+ "crashtest": {
+ "options": ["--suite=crashtest", "--topsrcdir=tests/reftest/tests"],
+ "tests": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
+ },
+ "jsreftest": {
+ "options": [
+ "--extra-profile-file=tests/jsreftest/tests/js/src/tests/user.js",
+ "--suite=jstestbrowser",
+ "--topsrcdir=tests/jsreftest/tests",
+ ],
+ "tests": ["tests/jsreftest/tests/js/src/tests/jstests.list"],
+ },
+ "reftest": {
+ "options": ["--suite=reftest", "--topsrcdir=tests/reftest/tests"],
+ "tests": ["tests/reftest/tests/layout/reftests/reftest.list"],
+ },
+ "reftest-no-accel": {
+ "options": [
+ "--suite=reftest",
+ "--setpref=layers.acceleration.disabled=true",
+ "--topsrcdir=tests/reftest/tests",
+ ],
+ "tests": ["tests/reftest/tests/layout/reftests/reftest.list"],
+ },
+ },
+ "all_xpcshell_suites": {
+ "xpcshell": {
+ "options": [
+ "--xpcshell=%(abs_app_dir)s/" + XPCSHELL_NAME,
+ ],
+ "tests": [],
+ },
+ "xpcshell-msix": {
+ "options": [
+ "--app-binary=%(binary_path)s",
+ "--app-path=%(install_dir)s",
+ "--xre-path=%(install_dir)s",
+ ],
+ "tests": [],
+ },
+ },
+ "all_cppunittest_suites": {"cppunittest": ["tests/cppunittest"]},
+ "all_gtest_suites": {"gtest": []},
+ "all_jittest_suites": {
+ "jittest": [],
+ "jittest-chunked": [],
+ },
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ sys.executable,
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "mouse_and_screen_resolution.py",
+ ),
+ "--configuration-file",
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "machine-configuration.json",
+ ),
+ "--platform=win10-vm"
+ if REQUIRE_GPU and (platform.release() == "10")
+ else "--platform=win7",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ {
+ "name": "disable windows security and maintenance notifications",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{$p='HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\\Notifications\Settings\Windows.SystemToast.SecurityAndMaintenance';if(!(Test-Path -Path $p)){&New-Item -Path $p -Force}&Set-ItemProperty -Path $p -Name Enabled -Value 0}\"", # noqa
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": (platform.release() == "10"),
+ },
+ {
+ "name": "set windows VisualFX",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{{&Set-ItemProperty -Path 'HKCU:Software\Microsoft\Windows\CurrentVersion\Explorer\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format(
+ DESKTOP_VISUALFX_THEME
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "create scrollbars always show key",
+ "cmd": [
+ "powershell",
+ "-command",
+ "New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": True,
+ },
+ {
+ "name": "hide windows taskbar",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{{$p='{}';$v=(Get-ItemProperty -Path $p).Settings;$v[8]=3;&Set-ItemProperty -Path $p -Name Settings -Value $v}}\"".format(
+ TASKBAR_AUTOHIDE_REG_PATH
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "restart windows explorer",
+ "cmd": [
+ "powershell",
+ "-command",
+ '"&{&Stop-Process -ProcessName explorer}"',
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "prepare chrome profile",
+ "cmd": [
+ "powershell",
+ "-command",
+ "if (test-path ${env:ProgramFiles(x86)}\Google\Chrome\Application\chrome.exe) {start chrome; Start-Sleep -s 30; taskkill /F /IM chrome.exe /T}",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ ],
+ "vcs_output_timeout": 1000,
+ "minidump_save_path": "%(abs_work_dir)s/../minidumps",
+ "unstructured_flavors": {
+ "xpcshell": [],
+ "gtest": [],
+ "cppunittest": [],
+ "jittest": [],
+ },
+ "nodejs_path": NODEJS_PATH,
+}
diff --git a/testing/mozharness/configs/web_platform_tests/prod_config.py b/testing/mozharness/configs/web_platform_tests/prod_config.py
new file mode 100644
index 0000000000..ee3f192b68
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/prod_config.py
@@ -0,0 +1,50 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import os
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = True
+ADJUST_MOUSE_AND_SCREEN = False
+#####
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(test_install_path)s/bin/certutil",
+ ],
+ "geckodriver": os.path.join("%(abs_fetches_dir)s", "geckodriver"),
+ "per_test_category": "web-platform",
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "halt_on_failure": False,
+ "architectures": ["32bit", "64bit"],
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ "python",
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/web_platform_tests/prod_config_android.py b/testing/mozharness/configs/web_platform_tests/prod_config_android.py
new file mode 100644
index 0000000000..f1644d26fd
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/prod_config_android.py
@@ -0,0 +1,25 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import os
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(xre_path)s/certutil",
+ "--product=firefox_android",
+ ],
+ "binary_path": "/tmp",
+ "geckodriver": "%(abs_fetches_dir)s/geckodriver",
+ "hostutils_manifest_path": "testing/config/tooltool-manifests/linux64/hostutils.manifest",
+ "log_tbpl_level": "info",
+ "log_raw_level": "info",
+ "per_test_category": "web-platform",
+ "tooltool_cache": os.environ.get("TOOLTOOL_CACHE"),
+}
diff --git a/testing/mozharness/configs/web_platform_tests/prod_config_mac.py b/testing/mozharness/configs/web_platform_tests/prod_config_mac.py
new file mode 100644
index 0000000000..17a0133003
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/prod_config_mac.py
@@ -0,0 +1,50 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import os
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = False
+#####
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(test_install_path)s/bin/certutil",
+ ],
+ "geckodriver": os.path.join("%(abs_fetches_dir)s", "geckodriver"),
+ "per_test_category": "web-platform",
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "halt_on_failure": False,
+ "architectures": ["32bit", "64bit"],
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ "python",
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/web_platform_tests/prod_config_windows.py b/testing/mozharness/configs/web_platform_tests/prod_config_windows.py
new file mode 100644
index 0000000000..e62ea4ad94
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/prod_config_windows.py
@@ -0,0 +1,58 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+# This is a template config file for web-platform-tests test.
+
+import os
+import sys
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = True
+#####
+
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(test_install_path)s/bin/certutil",
+ ],
+ "exes": {
+ "python": sys.executable,
+ "hg": "c:/mozilla-build/hg/hg",
+ },
+ "geckodriver": os.path.join("%(abs_fetches_dir)s", "geckodriver.exe"),
+ "per_test_category": "web-platform",
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ # when configs are consolidated this python path will only show
+ # for windows.
+ sys.executable,
+ "../scripts/external_tools/mouse_and_screen_resolution.py",
+ "--configuration-file",
+ "../scripts/external_tools/machine-configuration.json",
+ ],
+ "architectures": ["32bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ ],
+}
diff --git a/testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py b/testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py
new file mode 100644
index 0000000000..8b611370a9
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/prod_config_windows_taskcluster.py
@@ -0,0 +1,134 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+# This is a template config file for web-platform-tests test.
+
+import os
+import platform
+import sys
+
+# OS Specifics
+DISABLE_SCREEN_SAVER = False
+ADJUST_MOUSE_AND_SCREEN = True
+DESKTOP_VISUALFX_THEME = {
+ "Let Windows choose": 0,
+ "Best appearance": 1,
+ "Best performance": 2,
+ "Custom": 3,
+}.get("Best appearance")
+TASKBAR_AUTOHIDE_REG_PATH = {
+ "Windows 7": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2",
+ "Windows 10": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3",
+}.get("{} {}".format(platform.system(), platform.release()))
+#####
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(test_install_path)s/bin/certutil",
+ ],
+ "exes": {
+ "python": sys.executable,
+ "hg": os.path.join(os.environ["PROGRAMFILES"], "Mercurial", "hg"),
+ },
+ "run_cmd_checks_enabled": True,
+ "preflight_run_cmd_suites": [
+ {
+ "name": "disable_screen_saver",
+ "cmd": ["xset", "s", "off", "s", "reset"],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": DISABLE_SCREEN_SAVER,
+ },
+ {
+ "name": "run mouse & screen adjustment script",
+ "cmd": [
+ sys.executable,
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "mouse_and_screen_resolution.py",
+ ),
+ "--configuration-file",
+ os.path.join(
+ os.getcwd(),
+ "mozharness",
+ "external_tools",
+ "machine-configuration.json",
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": ADJUST_MOUSE_AND_SCREEN,
+ },
+ {
+ "name": "disable windows security and maintenance notifications",
+ "cmd": [
+ "powershell",
+ "-command",
+ r"\"&{$p='HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Notifications\Settings\Windows.SystemToast.SecurityAndMaintenance';if(!(Test-Path -Path $p)){&New-Item -Path $p -Force}&Set-ItemProperty -Path $p -Name Enabled -Value 0}\"", # noqa
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": (platform.release() == 10),
+ },
+ {
+ "name": "set windows VisualFX",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{{&Set-ItemProperty -Path 'HKCU:Software\Microsoft\Windows\CurrentVersion\Explorer\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format(
+ DESKTOP_VISUALFX_THEME
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "create scrollbars always show key",
+ "cmd": [
+ "powershell",
+ "-command",
+ "New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0",
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": False,
+ "enabled": True,
+ },
+ {
+ "name": "hide windows taskbar",
+ "cmd": [
+ "powershell",
+ "-command",
+ "\"&{{$p='{}';$v=(Get-ItemProperty -Path $p).Settings;$v[8]=3;&Set-ItemProperty -Path $p -Name Settings -Value $v}}\"".format(
+ TASKBAR_AUTOHIDE_REG_PATH
+ ),
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ {
+ "name": "restart windows explorer",
+ "cmd": [
+ "powershell",
+ "-command",
+ '"&{&Stop-Process -ProcessName explorer}"',
+ ],
+ "architectures": ["32bit", "64bit"],
+ "halt_on_failure": True,
+ "enabled": True,
+ },
+ ],
+ "geckodriver": os.path.join("%(abs_fetches_dir)s", "geckodriver.exe"),
+ "per_test_category": "web-platform",
+}
diff --git a/testing/mozharness/configs/web_platform_tests/test_config.py b/testing/mozharness/configs/web_platform_tests/test_config.py
new file mode 100644
index 0000000000..a9787185a7
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/test_config.py
@@ -0,0 +1,24 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(test_install_path)s/bin/certutil",
+ ],
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "pull",
+ "install",
+ "run-tests",
+ ],
+}
diff --git a/testing/mozharness/configs/web_platform_tests/test_config_windows.py b/testing/mozharness/configs/web_platform_tests/test_config_windows.py
new file mode 100644
index 0000000000..9c8afb3e0c
--- /dev/null
+++ b/testing/mozharness/configs/web_platform_tests/test_config_windows.py
@@ -0,0 +1,30 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import sys
+
+config = {
+ "options": [
+ "--prefs-root=%(test_path)s/prefs",
+ "--config=%(test_path)s/wptrunner.ini",
+ "--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
+ "--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
+ "--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
+ "--certutil-binary=%(test_install_path)s/bin/certutil",
+ ],
+ "exes": {
+ "python": sys.executable,
+ "hg": "c:/mozilla-build/hg/hg",
+ },
+ "default_actions": [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "pull",
+ "install",
+ "run-tests",
+ ],
+}
diff --git a/testing/mozharness/docs/Makefile b/testing/mozharness/docs/Makefile
new file mode 100644
index 0000000000..980ffbd3b7
--- /dev/null
+++ b/testing/mozharness/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/MozHarness.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/MozHarness.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/MozHarness"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/MozHarness"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/testing/mozharness/docs/android_emulator_build.rst b/testing/mozharness/docs/android_emulator_build.rst
new file mode 100644
index 0000000000..4087c64d41
--- /dev/null
+++ b/testing/mozharness/docs/android_emulator_build.rst
@@ -0,0 +1,7 @@
+android_emulator_build module
+=============================
+
+.. automodule:: android_emulator_build
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/android_emulator_unittest.rst b/testing/mozharness/docs/android_emulator_unittest.rst
new file mode 100644
index 0000000000..7a8c42c501
--- /dev/null
+++ b/testing/mozharness/docs/android_emulator_unittest.rst
@@ -0,0 +1,7 @@
+android_emulator_unittest module
+================================
+
+.. automodule:: android_emulator_unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/bouncer_submitter.rst b/testing/mozharness/docs/bouncer_submitter.rst
new file mode 100644
index 0000000000..5b71caca7a
--- /dev/null
+++ b/testing/mozharness/docs/bouncer_submitter.rst
@@ -0,0 +1,8 @@
+bouncer_submitter module
+========================
+
+.. automodule:: bouncer_submitter
+ :members:
+ :undoc-members:
+ :private-members:
+ :special-members:
diff --git a/testing/mozharness/docs/conf.py b/testing/mozharness/docs/conf.py
new file mode 100644
index 0000000000..1fb8e80882
--- /dev/null
+++ b/testing/mozharness/docs/conf.py
@@ -0,0 +1,268 @@
+# -*- coding: utf-8 -*-
+#
+# Moz Harness documentation build configuration file, created by
+# sphinx-quickstart on Mon Apr 14 17:35:24 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+# sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath("../scripts"))
+sys.path.insert(0, os.path.abspath("../mozharness"))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.viewcode",
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# The suffix of source filenames.
+source_suffix = ".rst"
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = "index"
+
+# General information about the project.
+project = "Mozharness"
+copyright = "2019, mozilla"
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = "0.1"
+# The full version, including alpha/beta/rc tags.
+release = "0.1"
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+# language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ["_build"]
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = "default"
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+# html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+html_show_copyright = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = "MozHarnessdoc"
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ("index", "MozHarness.tex", "Mozharness Documentation", "mozilla", "manual"),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [("index", "mozharness", "Mozharness Documentation", ["mozilla"], 1)]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (
+ "index",
+ "Mozharness",
+ "Mozharness Documentation",
+ "mozilla",
+ "Mozharness",
+ "One line description of project.",
+ "Miscellaneous",
+ ),
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+# texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {"http://docs.python.org/": None}
diff --git a/testing/mozharness/docs/configtest.rst b/testing/mozharness/docs/configtest.rst
new file mode 100644
index 0000000000..10e4a56c96
--- /dev/null
+++ b/testing/mozharness/docs/configtest.rst
@@ -0,0 +1,7 @@
+configtest module
+=================
+
+.. automodule:: configtest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/desktop_l10n.rst b/testing/mozharness/docs/desktop_l10n.rst
new file mode 100644
index 0000000000..b94dadedcf
--- /dev/null
+++ b/testing/mozharness/docs/desktop_l10n.rst
@@ -0,0 +1,7 @@
+desktop_l10n module
+===================
+
+.. automodule:: desktop_l10n
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/desktop_unittest.rst b/testing/mozharness/docs/desktop_unittest.rst
new file mode 100644
index 0000000000..f70e8d8d9e
--- /dev/null
+++ b/testing/mozharness/docs/desktop_unittest.rst
@@ -0,0 +1,7 @@
+desktop_unittest module
+=======================
+
+.. automodule:: desktop_unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/fx_desktop_build.rst b/testing/mozharness/docs/fx_desktop_build.rst
new file mode 100644
index 0000000000..b5d6ac21c2
--- /dev/null
+++ b/testing/mozharness/docs/fx_desktop_build.rst
@@ -0,0 +1,7 @@
+fx_desktop_build module
+=======================
+
+.. automodule:: fx_desktop_build
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/index.rst b/testing/mozharness/docs/index.rst
new file mode 100644
index 0000000000..04b3a2e382
--- /dev/null
+++ b/testing/mozharness/docs/index.rst
@@ -0,0 +1,23 @@
+.. Moz Harness documentation master file, created by
+ sphinx-quickstart on Mon Apr 14 17:35:24 2014.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to Moz Harness's documentation!
+=======================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ modules.rst
+ scripts.rst
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/testing/mozharness/docs/marionette.rst b/testing/mozharness/docs/marionette.rst
new file mode 100644
index 0000000000..28763406be
--- /dev/null
+++ b/testing/mozharness/docs/marionette.rst
@@ -0,0 +1,7 @@
+marionette module
+=================
+
+.. automodule:: marionette
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mobile_partner_repack.rst b/testing/mozharness/docs/mobile_partner_repack.rst
new file mode 100644
index 0000000000..f8be0bef86
--- /dev/null
+++ b/testing/mozharness/docs/mobile_partner_repack.rst
@@ -0,0 +1,7 @@
+mobile_partner_repack module
+============================
+
+.. automodule:: mobile_partner_repack
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/modules.rst b/testing/mozharness/docs/modules.rst
new file mode 100644
index 0000000000..73652563b6
--- /dev/null
+++ b/testing/mozharness/docs/modules.rst
@@ -0,0 +1,13 @@
+mozharness
+==========
+
+.. toctree::
+ :maxdepth: 4
+
+ mozharness
+ mozharness.base.rst
+ mozharness.base.vcs.rst
+ mozharness.mozilla.building.rst
+ mozharness.mozilla.l10n.rst
+ mozharness.mozilla.rst
+ mozharness.mozilla.testing.rst
diff --git a/testing/mozharness/docs/mozharness.base.rst b/testing/mozharness/docs/mozharness.base.rst
new file mode 100644
index 0000000000..dc46a45ba8
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.base.rst
@@ -0,0 +1,85 @@
+mozharness.base package
+=======================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ mozharness.base.vcs
+
+Submodules
+----------
+
+mozharness.base.config module
+-----------------------------
+
+.. automodule:: mozharness.base.config
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.errors module
+-----------------------------
+
+.. automodule:: mozharness.base.errors
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.log module
+--------------------------
+
+.. automodule:: mozharness.base.log
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.mar module
+--------------------------
+
+.. automodule:: mozharness.base.mar
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.parallel module
+-------------------------------
+
+.. automodule:: mozharness.base.parallel
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.python module
+-----------------------------
+
+.. automodule:: mozharness.base.python
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.script module
+-----------------------------
+
+.. automodule:: mozharness.base.script
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.transfer module
+-------------------------------
+
+.. automodule:: mozharness.base.transfer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: mozharness.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mozharness.base.vcs.rst b/testing/mozharness/docs/mozharness.base.vcs.rst
new file mode 100644
index 0000000000..3418463593
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.base.vcs.rst
@@ -0,0 +1,37 @@
+mozharness.base.vcs package
+===========================
+
+Submodules
+----------
+
+mozharness.base.vcs.gittool module
+----------------------------------
+
+.. automodule:: mozharness.base.vcs.gittool
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.vcs.mercurial module
+------------------------------------
+
+.. automodule:: mozharness.base.vcs.mercurial
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.base.vcs.vcsbase module
+----------------------------------
+
+.. automodule:: mozharness.base.vcs.vcsbase
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: mozharness.base.vcs
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mozharness.mozilla.building.rst b/testing/mozharness/docs/mozharness.mozilla.building.rst
new file mode 100644
index 0000000000..b8b6106c24
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.mozilla.building.rst
@@ -0,0 +1,22 @@
+mozharness.mozilla.building package
+===================================
+
+Submodules
+----------
+
+mozharness.mozilla.building.buildbase module
+--------------------------------------------
+
+.. automodule:: mozharness.mozilla.building.buildbase
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: mozharness.mozilla.building
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mozharness.mozilla.l10n.rst b/testing/mozharness/docs/mozharness.mozilla.l10n.rst
new file mode 100644
index 0000000000..6951ec1a76
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.mozilla.l10n.rst
@@ -0,0 +1,30 @@
+mozharness.mozilla.l10n package
+===============================
+
+Submodules
+----------
+
+mozharness.mozilla.l10n.locales module
+--------------------------------------
+
+.. automodule:: mozharness.mozilla.l10n.locales
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.l10n.multi_locale_build module
+-------------------------------------------------
+
+.. automodule:: mozharness.mozilla.l10n.multi_locale_build
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: mozharness.mozilla.l10n
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mozharness.mozilla.rst b/testing/mozharness/docs/mozharness.mozilla.rst
new file mode 100644
index 0000000000..016761320e
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.mozilla.rst
@@ -0,0 +1,63 @@
+mozharness.mozilla package
+==========================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ mozharness.mozilla.building
+ mozharness.mozilla.l10n
+ mozharness.mozilla.testing
+
+Submodules
+----------
+
+mozharness.mozilla.blob_upload module
+-------------------------------------
+
+.. automodule:: mozharness.mozilla.blob_upload
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.automation module
+----------------------------------
+
+.. automodule:: mozharness.mozilla.automation
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.mozbase module
+---------------------------------
+
+.. automodule:: mozharness.mozilla.mozbase
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.purge module
+-------------------------------
+
+.. automodule:: mozharness.mozilla.purge
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.tooltool module
+----------------------------------
+
+.. automodule:: mozharness.mozilla.tooltool
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: mozharness.mozilla
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mozharness.mozilla.testing.rst b/testing/mozharness/docs/mozharness.mozilla.testing.rst
new file mode 100644
index 0000000000..388ee9925b
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.mozilla.testing.rst
@@ -0,0 +1,46 @@
+mozharness.mozilla.testing package
+==================================
+
+Submodules
+----------
+
+mozharness.mozilla.testing.errors module
+----------------------------------------
+
+.. automodule:: mozharness.mozilla.testing.errors
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.testing.talos module
+---------------------------------------
+
+.. automodule:: mozharness.mozilla.testing.talos
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.testing.testbase module
+------------------------------------------
+
+.. automodule:: mozharness.mozilla.testing.testbase
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+mozharness.mozilla.testing.unittest module
+------------------------------------------
+
+.. automodule:: mozharness.mozilla.testing.unittest
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: mozharness.mozilla.testing
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/mozharness.rst b/testing/mozharness/docs/mozharness.rst
new file mode 100644
index 0000000000..f14e6b91e4
--- /dev/null
+++ b/testing/mozharness/docs/mozharness.rst
@@ -0,0 +1,18 @@
+mozharness package
+==================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ mozharness.base
+ mozharness.mozilla
+
+Module contents
+---------------
+
+.. automodule:: mozharness
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/multil10n.rst b/testing/mozharness/docs/multil10n.rst
new file mode 100644
index 0000000000..b14e62b78e
--- /dev/null
+++ b/testing/mozharness/docs/multil10n.rst
@@ -0,0 +1,7 @@
+multil10n module
+================
+
+.. automodule:: multil10n
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/scripts.rst b/testing/mozharness/docs/scripts.rst
new file mode 100644
index 0000000000..972dae5601
--- /dev/null
+++ b/testing/mozharness/docs/scripts.rst
@@ -0,0 +1,16 @@
+scripts
+=======
+
+.. toctree::
+ android_emulator_build.rst
+ android_emulator_unittest.rst
+ bouncer_submitter.rst
+ configtest.rst
+ desktop_l10n.rst
+ desktop_unittest.rst
+ fx_desktop_build.rst
+ marionette.rst
+ mobile_partner_repack.rst
+ multil10n.rst
+ talos_script.rst
+ web_platform_tests.rst
diff --git a/testing/mozharness/docs/talos_script.rst b/testing/mozharness/docs/talos_script.rst
new file mode 100644
index 0000000000..509aac400e
--- /dev/null
+++ b/testing/mozharness/docs/talos_script.rst
@@ -0,0 +1,7 @@
+talos_script module
+===================
+
+.. automodule:: talos_script
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/docs/web_platform_tests.rst b/testing/mozharness/docs/web_platform_tests.rst
new file mode 100644
index 0000000000..6a2887aa8e
--- /dev/null
+++ b/testing/mozharness/docs/web_platform_tests.rst
@@ -0,0 +1,7 @@
+web_platform_tests module
+=========================
+
+.. automodule:: web_platform_tests
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/testing/mozharness/examples/action_config_script.py b/testing/mozharness/examples/action_config_script.py
new file mode 100755
index 0000000000..c86adc75d8
--- /dev/null
+++ b/testing/mozharness/examples/action_config_script.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""action_config_script.py
+
+Demonstrate actions and config.
+"""
+
+import os
+import sys
+import time
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+
+
+# ActionsConfigExample {{{1
+class ActionsConfigExample(BaseScript):
+ config_options = [
+ [
+ [
+ "--beverage",
+ ],
+ {
+ "action": "store",
+ "dest": "beverage",
+ "type": "string",
+ "help": "Specify your beverage of choice",
+ },
+ ],
+ [
+ [
+ "--ship-style",
+ ],
+ {
+ "action": "store",
+ "dest": "ship_style",
+ "type": "choice",
+ "choices": ["1", "2", "3"],
+ "help": "Specify the type of ship",
+ },
+ ],
+ [
+ [
+ "--long-sleep-time",
+ ],
+ {
+ "action": "store",
+ "dest": "long_sleep_time",
+ "type": "int",
+ "help": "Specify how long to sleep",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=False):
+ super(ActionsConfigExample, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "nap",
+ "ship-it",
+ ],
+ default_actions=[
+ "clobber",
+ "nap",
+ "ship-it",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "beverage": "kool-aid",
+ "long_sleep_time": 3600,
+ "ship_style": "1",
+ },
+ )
+
+ def _sleep(self, sleep_length, interval=5):
+ self.info("Sleeping %d seconds..." % sleep_length)
+ counter = 0
+ while counter + interval <= sleep_length:
+ sys.stdout.write(".")
+ try:
+ time.sleep(interval)
+ except:
+ print
+ self.error("Impatient, are we?")
+ sys.exit(1)
+ counter += interval
+ print()
+ self.info("Ok, done.")
+
+ def _ship1(self):
+ self.info(
+ """
+ _~
+ _~ )_)_~
+ )_))_))_)
+ _!__!__!_
+ \______t/
+~~~~~~~~~~~~~
+"""
+ )
+
+ def _ship2(self):
+ self.info(
+ """
+ _4 _4
+ _)_))_)
+ _)_)_)_)
+ _)_))_))_)_
+ \_=__=__=_/
+~~~~~~~~~~~~~
+"""
+ )
+
+ def _ship3(self):
+ self.info(
+ """
+ ,;;:;,
+ ;;;;;
+ ,:;;:; ,'=.
+ ;:;:;' .=" ,'_\\
+ ':;:;,/ ,__:=@
+ ';;:; =./)_
+ `"=\\_ )_"`
+ ``'"`
+"""
+ )
+
+ def nap(self):
+ for var_name in self.config.keys():
+ if var_name.startswith("random_config_key"):
+ self.info("This is going to be %s!" % self.config[var_name])
+ sleep_time = self.config["long_sleep_time"]
+ if sleep_time > 60:
+ self.info(
+ "Ok, grab a %s. This is going to take a while."
+ % self.config["beverage"]
+ )
+ else:
+ self.info(
+ "This will be quick, but grab a %s anyway." % self.config["beverage"]
+ )
+ self._sleep(self.config["long_sleep_time"])
+
+ def ship_it(self):
+ name = "_ship%s" % self.config["ship_style"]
+ if hasattr(self, name):
+ getattr(self, name)()
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ actions_config_example = ActionsConfigExample()
+ actions_config_example.run_and_exit()
diff --git a/testing/mozharness/examples/silent_script.py b/testing/mozharness/examples/silent_script.py
new file mode 100755
index 0000000000..6b00ac1c93
--- /dev/null
+++ b/testing/mozharness/examples/silent_script.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+""" This script is an example of why I care so much about Mozharness' 2nd core
+concept, logging. http://escapewindow.dreamwidth.org/230853.html
+"""
+
+import os
+import shutil
+
+# print "downloading foo.tar.bz2..."
+os.system("curl -s -o foo.tar.bz2 http://people.mozilla.org/~asasaki/foo.tar.bz2")
+# os.system("curl -v -o foo.tar.bz2 http://people.mozilla.org/~asasaki/foo.tar.bz2")
+
+# os.rename("foo.tar.bz2", "foo3.tar.bz2")
+os.system("tar xjf foo.tar.bz2")
+
+# os.chdir("x")
+os.remove("x/ship2")
+os.remove("foo.tar.bz2")
+os.system("tar cjf foo.tar.bz2 x")
+shutil.rmtree("x")
+# os.system("scp -q foo.tar.bz2 people.mozilla.org:public_html/foo2.tar.bz2")
+os.remove("foo.tar.bz2")
diff --git a/testing/mozharness/examples/venv.py b/testing/mozharness/examples/venv.py
new file mode 100755
index 0000000000..82ee3d0109
--- /dev/null
+++ b/testing/mozharness/examples/venv.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""venv.py
+
+Test virtualenv creation. This installs talos in the local venv; that's it.
+"""
+
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import PythonErrorList
+from mozharness.base.python import virtualenv_config_options, VirtualenvMixin
+from mozharness.base.script import BaseScript
+
+# VirtualenvExample {{{1
+class VirtualenvExample(VirtualenvMixin, BaseScript):
+ config_options = [
+ [
+ ["--talos-url"],
+ {
+ "action": "store",
+ "dest": "talos_url",
+ "default": "https://hg.mozilla.org/build/talos/archive/tip.tar.gz",
+ "help": "Specify the talos pip url",
+ },
+ ]
+ ] + virtualenv_config_options
+
+ def __init__(self, require_config_file=False):
+ super(VirtualenvExample, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "create-virtualenv",
+ ],
+ default_actions=[
+ "create-virtualenv",
+ ],
+ require_config_file=require_config_file,
+ config={"virtualenv_modules": ["talos"]},
+ )
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ venv_example = VirtualenvExample()
+ venv_example.run_and_exit()
diff --git a/testing/mozharness/examples/verbose_script.py b/testing/mozharness/examples/verbose_script.py
new file mode 100755
index 0000000000..f30ed8f5d5
--- /dev/null
+++ b/testing/mozharness/examples/verbose_script.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""verbose_script.py
+
+Contrast to silent_script.py.
+"""
+
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+# from mozharness.base.errors import TarErrorList, SSHErrorList
+from mozharness.base.script import BaseScript
+
+
+# VerboseExample {{{1
+class VerboseExample(BaseScript):
+ def __init__(self, require_config_file=False):
+ super(VerboseExample, self).__init__(
+ all_actions=[
+ "verbosity",
+ ],
+ require_config_file=require_config_file,
+ config={"tarball_name": "bar.tar.bz2"},
+ )
+
+ def verbosity(self):
+ tarball_name = self.config["tarball_name"]
+ self.download_file(
+ "http://people.mozilla.org/~asasaki/foo.tar.bz2", file_name=tarball_name
+ )
+ # the error_list adds more error checking.
+ # the halt_on_failure will kill the script at this point if
+ # unsuccessful. Be aware if you need to do any cleanup before you
+ # actually fatal(), though. If so, you may want to either use an
+ # |if self.run_command(...):| construct, or define a self._post_fatal()
+ # for a generic end-of-fatal-run method.
+ self.run_command(
+ ["tar", "xjvf", tarball_name],
+ # error_list=TarErrorList,
+ # halt_on_failure=True,
+ # fatal_exit_code=3,
+ )
+ self.rmtree("x/ship2")
+ self.rmtree(tarball_name)
+ self.run_command(
+ ["tar", "cjvf", tarball_name, "x"],
+ # error_list=TarErrorList,
+ # halt_on_failure=True,
+ # fatal_exit_code=3,
+ )
+ self.rmtree("x")
+ if self.run_command(
+ ["scp", tarball_name, "people.mozilla.org:public_html/foo2.tar.bz2"],
+ # error_list=SSHErrorList,
+ ):
+ self.error(
+ "There's been a problem with the scp. We're going to proceed anyway."
+ )
+ self.rmtree(tarball_name)
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ verbose_example = VerboseExample()
+ verbose_example.run_and_exit()
diff --git a/testing/mozharness/external_tools/__init__.py b/testing/mozharness/external_tools/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/external_tools/__init__.py
diff --git a/testing/mozharness/external_tools/gittool.py b/testing/mozharness/external_tools/gittool.py
new file mode 100755
index 0000000000..e7cf524ea9
--- /dev/null
+++ b/testing/mozharness/external_tools/gittool.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+### Compressed module sources ###
+module_sources = [
+ (
+ "util",
+ "eJxlkMEKgzAQRO/5isWTQhFaSg8Ff6LnQknM2ixoItmov1+T2FLb3DY7mZkXGkbnAxjJpiclKI+K\nrOSWSAihsQM28sjBk32WXF0FrKe4YZi8hWAwrZMDuC5fJC1wkaQ+K7eIOqpXm1rTEzmU1ZahLuc/\ncwYlGS9nQNs6jfoACwUDQVIf/RdDAXmULYK0Gpo1aXAz6l3sG6VWJ/nIdjHdx45jWTR3W3xVSKTT\n8NuEE9a+DMzomZz9QOencdyDJ7LvH6zEC9SEeBQ=\n",
+ ),
+ (
+ "util.file",
+ "eJzNVk2P2zYQvftXTF0sVl64am20lwA+FNsCKVCkRZJbEHhpkbKYlUiBpNb2v+8MP0RZ3uTQU3SwJXLmcWbem5GWy+Vb0fbCQD2oykmtLDgNDVO8FVBL/NG4y/zOcrlcyK7XxkGrj0epjulR23Rnm8HJNj01zDatPKRHJ7qeMBe10R1YeS47/SJsWWlVy2PPjMVAou17dnr0y//65QWeCLt0bnkU7m+8FabY7xXrxH6/WiwWXNRQ6Q6BREHnbNY+he3qzQLwwvjjLibZCDRVTihnQdfgTtrb2jX0zFrBQSoQEs0MMOvd8cJaqFCVUCF0xe2qEtbKQypYz1xjS/hD4zbDLLseM44AiskXAdYZTCKGKq1Wa7AauAalHQwWa66gZeZItFBMVHjyljVIK5V1TFVjhgdmRQCMadLl97BeFHAyvDf3a/hoBrH6Ctj2G2DbKdj2BswfsR8LugsLpRGMF9liO7fYTi2McINRN1C7mWvkmUsjKqfNBVXiGOZRjCtYzaGu5TnTDu8DtsOAKXFi/4hMqAzj1UA4frM3+kVyVMFJtrxihnukALsmTiXyQ53y1Fqotf754cDMEySiGukwQ6Yuxae6FIrbEyqpiFGhPfJK+tK2bKV1GEMOfvV5pIfUgEiZCFR/KYzRplg+6qHl3qKWisPDnSXAO7uEOyhSnBn0qsKIOTZLf6HqFtZUaG7d2i91moudJ3es4CMuA1pRzt6uxy4S5oV0jAOik9gBNDywLcDJDqUv6xELhea1ksoThkR5c/qYeXLMqU9caGPmMrNATbuJRcjVNmxjh66oc1JJFUj4h7e//7Tx88pPg9l08H39VD+tqciNOBOFHXMj3Uh2HHUlHZMk349NQw1zuA/Lp4bQqB45vUOrq2dqij50xG+bLTzA5pftrznBqAhvmj29N/o8jytNOfScOVF4y2vmSwyeyyP2eDHWhdViP6hWqmff3DROc4nCBqQNkEeljSLWvRBt6iZfIY4jT907EGdUdSqOM5c30xxQ9DQhS2lJ97MTx5GDLWK0dl7DNoxxG1vmxNoc6RoF2XN9UlO9zpF8s3mI2/1MVIzri5aqCGfXq1fNryrW39ogkukoOULJ26K14vr8STV8yezXyhFR5yx5G3GuRO/gnw9/EiH4soLJKT/CX0SYgOU7jeOrauI73eTZsJySI2i+KE1Td3sdQlDQN5IxTFox1dSsvVFzWVZk0F58n49TBQ3w5UfSYv5LQRuGY1liF5pOcImKDtOlwbFtqAD0AUJ40TkJruYoiq73ct2N3xxl92zpnibtLlUd78ms8MGtXptN+vCl8C3sk/BNvCYqah4aG8+6P+HijXOeQTGWYEHa8OQVcTlWJhau1Yzvw+fQXAthFTOafRkVk6lJq2GAfEren1fwww7yYyYtzoR3WonpjAgoMR78zkrhhD98+Qn/nYhV6MM/2rGhTeTmOHAi7qNxEf9XnsHJfsAoZpirmyCjC4ZzYzuNPYZyE/weVfS9JECh/K8cDlq330sSFItgty6vJfIfMB3quQ==",
+ ),
+ (
+ "util.commands",
+ "eJzdWW1v2zgS/u5fwXPQs9x1laDFvSBA9pDdJnfBtkkucS9XtIUgS+OYG4n0kVQc76+/GZKSKPkl2T3slzOQF4nkcF6eeWZID4fD80pkhkuh2VwqpiohuLhnmSzLVOR6OBwOeLmUyjBdzZZKZqB1/UY2/xleQv3/skgNiirr50Le36PIRgx/GuArdlIPxPdgPuC/oKIkEWkJSTIeDIxaHw8YfvyyFRfv3s55ARsv0yW3764/311cvnuLgqeqggE8ZbA07MLOPFNKquPetPO00DAYDHKYky5JVuYR/kzY69cPq1Td67FbccCyVc64ZnoJGU8LxgUzixS3B5YWq3St2SoVhnG0XXFhAEXAIwjG5/hupJmQxguCp2XBM26KNcsWUoOw791uqJH7J87kch2NnaFzNsLdRySD9nUznF7t0i92zjeUIDW5E5/8erQr5mIuo6GP6DG7nZ7eTIc7h1/pIXsVBDsuuDZv0S8FF0D+GbulhJYHWE/YY1pUQLphZGNuQOFPqaOOC3fuZfebOEHaqMgKwx1cVEpQ95CAeIzwx4sSsKI3zlb8hyspQo/58bha5qkBu9C+V2AqJephvwHCfWfYEfo3lWA4xKKUkReYnDOcUJUgjB7HjN2kXEPoqx/TooD82j1Z0GEErTSzgDqpEAXcIGa4WWBYxZtfQEkUdjHvTHL6IuiqjITNq6JYT0JLjmJKTu/ZLVZ4yN0Bc65A6YjhdI4RqlOPzSBLKzQBkZxLMTIOyGgEmUracLGwAc1rAKPnH1OlWVnlhIh7FG4nysosK9NgFueN9uGVxgmvbYQ7I075hgPsA84mmonpl4/1Dlg5XY6H7STvsiBS2QKyhyRDU7e5zZPH3shuQfaoUeHs5ubqZjQhQQkNnRAnBfoQcFwacYGSAzuhSJca8q617A0z+yw+u3zPolfx27mu14+/Csyr+iGA+38qDmYn6HN4FIg0yq4liAhTzL+ZsNFq1MmkPc7UJscYnDRL8RmUap9bZ7d6KSilgVYxDeoR1IQhPBVVhZNLKQBf68VPsG4fTkb4azRpnBN8eqah1GSpYM6f0LovuOpbjdZ6izYI7dwYngyIPPoyelOMWm2+NfTsFHpmKcel6Mtlahb4dokxI0GRWzv2woKV6XJJK50Pmq245kKbVGTgXDRLNSBpYhaOO/uTffjnWxitgOlqL3zHesjvcGKyxMSXOc+SpazjivOoO0ioyCmk6pN3R0cTRuPtq6P47Z+2hsN/ahmEllmaPfhQ/kryJW5JAwgyokwLQ0bciomxLIDaGiutNoUy7cW0vUnZVtZvoe2WsgfbXMDmvgdD4Zh7M5RmFXJ7kfQ5uliuiG4bF3gNHXLc5thqwBNklRVl0gfQyBUCGyuaKDai12IGQaVMQlTTUAaGFd1Rx+L/usw8W0HIEygkAMy1Zcctjmg9uaVcFalG5rUgPQlmDhwoFtjc2ta1NUPRtrRnbHNwPGhG0Hgc5La3ZJRBx52UI5NymFX30dCj2kMQQWUbLpWNOwucvJMTdnS8kbu7KlJgQX/JbylRfRl1znQGAFv2TQ3V8wkdqWxCdBL4UMjVjp4CvUGDb8KIfb8nf+hzgAprMOESI1n6KHmOB4e0LO2xymd8N1ghLHDfnsG1851GO5yOGvdZZdNNASrIQaTQTiqixjx6pcd4GNhD5lF//SRQcLwZ005HF376cqKdlLQp1DdrZ/YPqb51BzIeLBKs8dZYi5sca4eXgYsnjN8LSTU1juPhvkLmt97W3tmhrUglLbytqEdTXGrSxoRG3qbANMSdiuFeJYZ9WGKW5eNn1kzqPbdA4JmltxQYcpohiDTBwQfKbTo7Rn2NnvPi858ADf+zrJpwAiawHKALgGXUaWbqlgiP1IkrCY7zuciKKofE97X2ImFCpc0kGOD6+de2NG37EJQ612F0N3TlD3sMMHVv3XQOfmbuLilqKdh94oNaEU1y00zWuDGyVHtAJdpU2EZ4odhcQlrGzlF3Lqru6mMtK6YXsipyKudU223ZbxoZpL4sXaItgG1agUbi31JWgmy20pxeLPoex+ewwuWy0uSB2dqAHk8aNXNI80JmD81pl/fdEfYxVu1Oqb6dvr/6NHWnuk5KNpOpfA6e63B+n0Ot98IJG7ZZt+ugu7cL8SeucPDi+qw5ebk/z+dO1+Z6V2QsbgJNGq3toNs6RqTk3RJqRx2gM5kD+8NGd/GSut2T4ot4sA11QHXqbXZBz14NdCa64Y17g6DrfMnNAEuxx2lVOWAfsRfHJQpc8nj/uQxm0iVqU77am4C4cXSgQOu33+UCYf/9AZ7RHyGhY2xEv1pmm1L2U9+Pc5ZFmgFxmr0j1AssM0WsSuQSb/9KqgeN/GPojFAJhKY/WYlcrnTMplj0cAJyyA+QojeNddDV7em5leiOWzm4w1T0g0Sfr9DguZIlm1W8yGfSIJ2njxDX99jj4BBT90A3ZA0xmu2IrT2DOtcTrhOvT7T1DvOTppWJM6ueyqhnaNHTG3Uu62Gr2ZFg3F4RYHXQoYsdjqyX0Tl5gUe7mZIPeKrDNu5Bh/lQC+GahvpC6IMzXCT3a7Rf0EuEHLB/4bFuvrYhy7mCzEi1JpwQWxyu6Mh36DoRaENbKUXgoRuSgd8pW5QytxtN2JH8y9GRjxStoesYYmXprqpxm76qdAi33yy4S2Nr1M+SCy+QRsaBlzEh71zEJravrg+tCwQUszrThULJtbZHdiqbNOy8EcixB3c81B22lgOnMoy0LJmv80QHohc+p+oJGwkz6kauxUlqrxeixjR7v3SXXP003mw6D9gFlWfx4Iq0PTWuUN8ccw8P4hNMGoNTAiuxI76cbpFTphnKAUpNrNKkurXlYlSye2mbA8kKIAncfl1hqWCLnDfUIvrw9pDpQh3YdST/3MR7A5kU7WZuz/SQrdo5AU/2G/QD9t66AEu0bWBIk15+dYMQpscuLXabZWHcm+ZTqqcuDZQNsJGMD0KCso1jn4no+kfgPEuKpfxlQQ7X2vH4FnbTa22gpJruv76L3Zv2eyg/A6MaDX1+IG0OP/JMSS3nZhgY7uum/SKuszy2nbymiy08/3z++93F5TPrduZDONebtY1vmxJVJ/Ub+zXenGcsrEk1q9IahLC4NwsdHsLYx9N/J9en03/EjQLX0h6ErIezQs5moEDFy3XcuTVLNfKY6brc3SrRTi0hISMWbWF17EYo8NNGX/Hzt69fR+w7u3JP2WiWbTg2XOS/Bz0O86oL6y1MGvi9vlAe5VCww3N2+E82xEI6wgNgtyBs5llfgsU2O7zdLiNsnzo1hKg4nflvhJvvh+NbMOf499QYxZHYQN9FfedNgunnFx/OktPp9Obih0/Ts+Ty6ubj6Yd651s8aYCjeDyKOLxgM2XvZVNL7sR/JVCTpbeFxHmxv/8m4lt93kMBBsiCzWVt0ZvP0RGUi/VX4PE5gotW6Y1l+AdfvY5fj8KaiFOTFH2E2LLCvhx9a2/GXK107//6rQOROhfjEauLr32Me6Uqw5aci6pl/xdV4XCrVsU/7o7X+4ubsx+nVzefu7v3qeBF/P9iDAU0/iIgbW6wI8o9Ndv5tleF9zX8t0Djvwh1IB4=",
+ ),
+ (
+ "util.retry",
+ "eJytVk2P2zYQvetXDFwsLDuC4C2wORhxsUHQFgWKnHqXaYmyiUqkQ1LxGkX/e2dIivpy0h6qw1oa\nDh9nHt/MjmivSluwouVJrVULdSdLq1RjQPilm2ZX49dKJS1/s4049YvB0jLJzlwnwdqo81nIc4K/\ncOi/8jO3v+Mr12lRSNbyotgkSVLxGjS3+p6y0golM2DW8vZqzeElA9NwfqXgDu93GbTsrRgsL7AF\ntCYQH4dT8LeSPJQ0h/Tn/j3bZFA2nMnuevisJMdj9Bkd0Pznzb3+9fdm77BWq9Un1jRw9AGtgdHB\nou1aUDVaQ3hrR5qBTlrRgLBgurLkvDJDRJgb6xqLyYNV8JLDMUa/BmHAXjjIrj1xTciGI5uVIdcb\nEzainLi9cS4jL9kM9/0OmKygUt2pIRNn5cVT0W/J0C3CTbOZULrOAY5zEl2kDGx3bThuiTiRWsqD\nYfoX1TUVRgsl684Xm8NvNQwwoDBbTa4S/yjDI1AjjOUVCPnobKY5aCYMOjgJ9peSEXl3uAm8qNOA\nFVxF2/JKMMubuwvjGK7e5XLV6quo0ItYK/Gm2QkzwwsksBHrbm0KBqy2mASmELMnxD7hz4pU1bVc\nWhOBQohwZYZCwwsTnpu76nSvSV92BKf5l05o1NUSCUPEwzTKBCOSlIEjHnFckbp1ScH1WxtuTETO\nI86R9L526R+9+D3P/SU7NYnSkkBiFBQ4pQBY8YOY0HjsKVxj4bgFSpR6Q7CHwt6M16SyMXWlB9dg\n876inlY8fBj6wX6QjzrnFT9153Q19X6qwBHgJDc2r+AJ0lHbgOkxo66z8YFI7GLP7u12EUiQhA+H\nWI5DJKjd/QSWQhOyVunKCXsP1FeoRJ8MysJeXA/a41ffhPz7agISn1U4EX4IKfQN01id0u6Nf/VQ\n+CFD+LE4uO00qsNtS7fklcF2G/yjqy+/RTNdphZYj7lREQwVv4dVRl8FMXD4Q3d8Gg3ebrjt/SLf\nsJAuduBNPGL+m4T/Kr4S36QyidwSbWM1Ttih1jE/b5DNT7D7D+f9wlAfVVCQu+kq9vUTrxV1M/LE\nJYzl8T3TMyhw4UPW3K2n3/EaAj+M3rfw48JzluWkFJYZz7En7hNvGg2E7AZjLSTKf1YiEt5RbQ1z\ngHB9YOvV10vUfwWheoD1eg0f8T9hqTSz2EKQ2zBHbHLszqylTtYZHEu8/+sA7tmiA2ulRhrL8zyZ\n+8Zh5Hm3G48jz7sB5cR0utlPYEKESfQpImRRowIVxkmNebTt1Q1a3jqeIMZbyeWKA9S8dveP6tyz\nQXhh2PGbwrjjfxBjxPS39Ti7gmR21DLE5PFqyB3v+3U2OsY5EEsjBP3vIlhwFlEKYb/D0v/M0CN2\n7oLjNNTHkvwDPQB6iA==\n",
+ ),
+ (
+ "util.git",
+ "eJzNW+uT27YR/66/ApF7IymWeEk/Xuam4/jReJrGntiZdMZ2JEoEJcQUIRPgyddM/vfuAyDAh+S75tFqxpZIAovdxe5vH8SNx+NndbmxSpdG5LoSqrSySuFGuRVHZXditx2PxyO1P+jKCm38L1OvD5XeSNPcqauiUGt/VcnRKK/0XtRWFclG7/dpmRnhn9blcrPP5jBsr2/k8pDa3ZzufqiVtPgsmp2rQvqZJs3lsi4LVb4f+bUKvd0CvyP4Ftf+KtlK+y38lNV0uSzTvVwuZ6PRaFOkxognMk/rwr7apZX8OjXyaiTgc4BHo+4joNi9NUVCmczFcp++l8t0bXRRWzmt5EHPmJTKBV4lxqaVNajI6RjFuLq8HLsh+Hkg/gkUhHuCKjTCk2sGoXKAC6T3ppBlTOhdMwifwiD/7MKMxQVsV4KTEyCJ31P8b0ZTZAEcjpGIKLWFXScCV11yXQIk4YgH2LriWU4ZoO8lXpKyY12slTVAi+0D6FVGJijpoVA2orjTxoZhH2oNGsWpSSltoTdzMR7PRpE++gOJ1cLYSh2mY1BPmOK49eL8rFU5xfXmglXCEuxSAxLcKAPeMM0kPvaXThRwhe+JlBGvq1ryNvMIIT8qA4KCKnEqOg3OsNVtpNXYwKdvJltlJ3MxAYvFr8VCl7AvpaSL8stJWP7dXGyO2TUSnkV7REIhI7ynHzfyEHtm8jgtCpm95KunVaWrq+7sZ2lhZEv+vBE9x508JzkN6AieOIXzs01airUUqVhXabnZCUAYm24FPkvuoyKz08cFECXVZJOGuz9HMaoES2WtrAEDrulZUMxzeKzSQv1bgveCIewP9pY8wyirq1uRWtaMeJ7TD68xQEShTDmxPGUulAXMLQrUGANjxtqEeStceAXDSay5sDtZolrhLjurn7ipZGphotcubBR6uDd9ZTJVkSwRDIEBJqrM9XRMyyL2A4DMRVutEfROwxNZePjED4YQ1BeuV4CQA4vRsAhbyBlZufmsNeoUW51hIGFburyzGu9qE2imnfltCYakhQn0/IH4u9+iHNT8Xuja4vJwE0AHdt9qp3+Oo3uZKbwApuQGTAGHgIYcLZYZjYA2E/67ZctIxI/sOTskvIM9P+KAQw2ABzoA7e4hHKlDwVtvHD24D+CKll0gIwbDVcbGgWIDdzuwxI0ubQpDMSRrmiFLG3PoqH0r7cR4aYAo8tcWRFXG9h0YtgKjIow0sZWwC754Rc4mUiMilYMGZSKrqoSQcy3++kV7Mx6I78B+02ZtUBLSSJIE4FLonBhjMQxekoexW6UdQg2J1v0qVejpjhP0qMAAgBCYagNDCAQM1TiMfn2YsJ+8G/CGU7PDnFHbzsWY3QEmzlzkauEh3veYZJZAliJu7GEDOO0UAiq8AZDKBEwjq7gPQrRBsbfpiKsgaYSwL/UBUpU3Y1gMPBYc+GZBkR8vFgu4u4BVxhF8zwWgBfjTdUzk+cunc1GX6kZWJi2WpTxifDTXKNosth+ckRxTgOmZ+OxadEyoxX1jqLU91Jhx0FxePAFzz6azhHOIsAJqGviNUKvU1Z7AZADGmPKsFZBA9TB72hC65m0BX4huDdAaJ6jBWS+4McnfJ7xh5ua4OGFK7GKQdgGu4G80oeCPCPoc4J7BryYkGcYuxYjCVmiNLPI5zajkti7Sqj2Uhl2SzM46ISeXle2b+2jAwQDbcuVyqbXWBf8C8fhhJRNyW3bVUzZLdclvtMrIHNnFzJLU8kljU3kYi9k87tI4bF42HDedkQwgEE7AMbEqPQHOQqZZK0XOnE0YsAmKIJShzzmaOMNIC4htJcQF02UntiTK7EHvev0z2IlhAMw1fgcCE16cqC/dyHNEmY2IqovMVKrS9gXaMOo4mWGsyaMEJTlWCgqG8YV5W2KN1Frag+umkGkZ1zAPxOujBmPKISbu4ZkRYxqD5rPXWV1ITEyOOwU5LsVriLRHKY4pxFajRZ5WSQPnrcSWqJCd5tH/2cQho1P+oA16ZtMsW2J6Y/1e8QXVYc1FJAt41qMsw3jAzygbz8Qqmrbyjg6FFwCXjxiYYqzwx4qtHHaHGgUweBWts/K+25OWB5EJZJzFD3MaST+LDNKNgMrvzpK+kpbhCyYR7Hip78AjLLmAaffjc9vw2edxGFg9ZwysN7J0U+YYHb7DJEdxkuO2K9MSsxuH/SrUmh00DIIsbu6Keefs7Z6YhyLhGMpLO6hHc6dRfrGnBgdkjnazm1bonObh9O2rhzPxdppLuPl29hf0VhgjzSY9oHKDWue0UCsh2A9mAPtkW+n6MP2yb1X3s6jznjPoDJjlZXJdbxF7rtBOyHd4KKJHaIXsbx1ToJWz9uRjRjThOua6G/ZH/QmAVpj9oZ0NFYTIqmmxCjUYygj/T4+ArRdm5ng/rbx5WC8qJ+/l0AOxrc0kAEqbxwvzSb4CM3fHUO/mrnc1F61uyTXqMfRt3GV9yCAgLXEkOcw8KMH3FK+7TUbQmcLMzTgaFCiYRKf9gC1EAz4NOQNVeCscRK2CHezPyvOyQjhZOT5XYrOTGyjTqITFVitRY3BP2EyeaI5jWITSErDrWNrxKrv0AMgxB+/eFDWpHtI4o9bFLbCq12tZYUuWchDqCjWNDCcWMQg2MOd62mKfBErh2kjCOjdKrCXgiGwYcyI3dtzoD6kNd3VbaubcQpY3qtIltoin478/f7189c2j758uv3706unyyfPvwW5Q57PhVfre0nQ9G3dtdUHbXHTqCMpwTNj0Zu6QxccEkIU2g/QgYpD0Sd3dKHduRg62f6jcx70MfSVyozCrmdSq/4gNbJI1A12nrF2o4QcEr60C3URDWdt+hKtnntIXGG57GWT2mFYlcAn86rrIsNJ3fYq453FhvoIaxaRraprgE/iOZZnjSksUfIDN2GJI1b2HvX2gCqffM+dqpVPoP0bvQ75oUqfZ5gryExW+G9HN4vtNtpamABBd3iDQcaHuAm+mzmxoCjQV3VeRC/fagGzxoZsWyXfOGrtNndA0dbkNbBkaLEAC4CXdIzp9G+uzE4oVV7P1zbXf5HsgXsAi1VHBHoO4lKiATppqyX/o2SDWu++A1O67DfiM1r6BSO97CKt1XTGjqKU4lSZ1qO3OikORbuRZH4+dePN+SW/KPlU+kWKgaFRlJj8mOG88O2NZDeEB8xrqzobx7T2K+qkDQ85sodMcxI6jhKQWtHeAUANuzhZMunaqS5vIO+cAh/IcpaOwS29c8Gpe2qQFpqO31Jo3et50Yis5QSWLUtI6jgBlomClt3aHbxjZ6/WSb19H/aX4vRDsVnunHmBlmGl0ReIoMC3MQW5UriRk41A1YsMXs2cJSTcvGer6sGp4zYHv9LrvcFqvr3j1mLN0Y+u0gJiNTOSCzRmWBg6d6FggseARCZ8JkCEf3R54R84/zSWa76fftg0pi/pEYZNNs0Vuc5Q9ubrfGX//ahCzuiDxjEigs4JkhjGicd3W2F5EZAKvXzx5cSW+AWwD36SFOU8l7DBQubwnpZlLm27N5b/g0yMSvWzrWVP8Iepxw4bDXAeeZr25w29A7k6xB4mdFyz3alAGzb3Se0lmDzZQWnGElG372cC4xxysIoTEQFLJBXf2uxMCcNGLhLAntLcQKBft8DcQTfwnjoPnhoU3DP4zaCz32+doP9zrM7ztypjrsYYYosrx/Xf7bnT/kD2PMtFmB05moPfchPtuez/N6C52YkR7s/vaHtz62NFY6/+Vj51Imv9H8oMLQzbH2InGw73e0NBgQSdsURPfAfE4HSVRQ7XKDz7uc49yow+3XkVXlPz7KHHlWwG5r+ZdlImCDEGNQjyOHBDPA0FsgQicmihKx2GjWaOdpn46whFEtYO108PlhZlQp6sXCwd0YNhBkEEfv4FfYyGpwXehnsh4SOqwpbkrXnsShiEuZYjrHfcAj+LQTk1PZMdNM6TPgsqjvkZbUG7BDx7sADqNkQxNj6f6fv6pHJ6bK+1WC1eRnLj3mzeh8fIYqZrQHcctpZTdYM+k6cSQXcNtCmSQfWI6R7DgX41RvpkI8RrSp6OuqC4kc/ZnOuh4yJ1bKFzM0PG5fieFG8UwAhehMa2eD1Jn8btnTVDKqLw2DXM1+EiYHGmLKDglEgnIpwfFc0dWmuaO29rzFe6JEhR7myzwEGY8shZ16aqFSFFuThQtsX3Nd+nARZdmm+7jWKGENjyh7WyDwN+8G2Yb5XkdK20bYfR7yJm7r4f9x0WIf8jbtU6r7DkeUKnqgx1gqZc23S26SP94OoYwvy7kvm1pTp0n9YMfPJ6iyjos34+fQe0v3YmYeAtFnkJhmX0F3/xwnUIZaHXoBztL6b47RvW70xnv4jbMYBjyaf0P4MV8GORysXDyoTf/LYDRPkuoVZpNJ4tFqRdUl+raToLF+rDVmiQ/Wpz0Bt9l0+G56NRbpzWZXZ1Yjp9OgsQN1YCF78JBk+aIiSM+KLqzMG50RsHNY+1gMBhsi0c99pCuNuhK8R2gtAUZrrfN71hcJ5vb2q7JnRAMlVIhqHqAxZ6Sr+yHIYiZzprXnrhy1CbQNJWVWFd0YsotD+t9y/nHLi23MsBipswG3Cw6iedaGN2iH+LNUWGNSWun1o39kZC1GfXTF2LKVQ0tC5drWejjjCpfXW24+OaSkvoijkwGxpjDv3JDmQyint7vActVZvwL6z119xHguWcClPZ8tK10R6iQjk3haSaMBd0kEKv4vIb8UCtAGtQI0J40RHD6YsGzJnMPIMqfLqMeToUaX98KXQCXAt8s0jF0TW9Xhvoo/a5X14m9b/lTVvxCvaHxUEx++mISnZoORt89SNrGHYcFPUdlh/D50+TCXO5TyL4qzuAa8z6DZu3ZnPtNO2+fOvn+XYVmwl7muwj9Kd/uVyldB+97dKd+HEq4eAD2PzoZFjVfwDa9cxHQn8i38GwCocOdEyXuDvUSJTrYGKdLLkOK3zv18oE7pAunwj9qnBU7GPzP1nt/bjRvSsb/w3D+QDzW5Y2snG1gM7hs/paCD6/RSd0JlnJcMoYj2s746W8ehv6oYyhhIGU0PkenQeK0Idh0nDU8oU4mGx4+C/1EZol6gNxphNJuI5t4gblzeutb0UwMylO9Udx41RAr6BCUa5DSW3CqBlJc8VBXW4legpHNnUVyMrWzE2RgMgvg28E7lbe8uVOyBWLjh9TSdIcbLn+JJ/16+Yuj++tVe1QEH9GgcQLOtU/xfXuvsgxw04LMFuLEF9FByr4l9vnfQR1t/mh2ZycCzu+g6M/Pcfx5xOmfosffxs3MOfqJ8BWfvnKRi/62p3fkinOeJiUIx1hXOH7lE7zilo5PRMckmkTu5GnncPLKnXbGi2+ePnrij/Px37n9LserXOA4c570P8E3N0c=",
+ ),
+]
+
+### Load the compressed module sources ###
+import sys, imp, base64, zlib
+
+for name, source in module_sources:
+ source = zlib.decompress(base64.b64decode(source))
+ mod = imp.new_module(name)
+ exec(source, mod.__dict__)
+ sys.modules[name] = mod
+
+### Original script follows ###
+#!/usr/bin/python
+"""%prog [-p|--props-file] [-r|--rev revision] [-b|--branch branch]
+ [-s|--shared-dir shared_dir] repo [dest]
+
+Tool to do safe operations with git.
+
+revision/branch on commandline will override those in props-file"""
+
+# Import snippet to find tools lib
+import os
+import site
+import logging
+
+site.addsitedir(
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../lib/python")
+)
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+from util.git import git
+
+
+if __name__ == "__main__":
+ from optparse import OptionParser
+
+ parser = OptionParser(__doc__)
+ parser.set_defaults(
+ revision=os.environ.get("GIT_REV"),
+ branch=os.environ.get("GIT_BRANCH", None),
+ loglevel=logging.INFO,
+ shared_dir=os.environ.get("GIT_SHARE_BASE_DIR"),
+ mirrors=None,
+ clean=False,
+ )
+ parser.add_option(
+ "-r", "--rev", dest="revision", help="which revision to update to"
+ )
+ parser.add_option("-b", "--branch", dest="branch", help="which branch to update to")
+ parser.add_option(
+ "-p",
+ "--props-file",
+ dest="propsfile",
+ help="build json file containing revision information",
+ )
+ parser.add_option(
+ "-s", "--shared-dir", dest="shared_dir", help="clone to a shared directory"
+ )
+ parser.add_option(
+ "--mirror",
+ dest="mirrors",
+ action="append",
+ help="add a mirror to try cloning/pulling from before repo",
+ )
+ parser.add_option(
+ "--clean",
+ dest="clean",
+ action="store_true",
+ default=False,
+ help="run 'git clean' after updating the local repository",
+ )
+ parser.add_option(
+ "-v", "--verbose", dest="loglevel", action="store_const", const=logging.DEBUG
+ )
+
+ options, args = parser.parse_args()
+
+ logging.basicConfig(level=options.loglevel, format="%(asctime)s %(message)s")
+
+ if len(args) not in (1, 2):
+ parser.error("Invalid number of arguments")
+
+ repo = args[0]
+ if len(args) == 2:
+ dest = args[1]
+ else:
+ dest = os.path.basename(repo)
+
+ # Parse propsfile
+ if options.propsfile:
+ js = json.load(open(options.propsfile))
+ if options.revision is None:
+ options.revision = js["sourcestamp"]["revision"]
+ if options.branch is None:
+ options.branch = js["sourcestamp"]["branch"]
+
+ got_revision = git(
+ repo,
+ dest,
+ options.branch,
+ options.revision,
+ shareBase=options.shared_dir,
+ mirrors=options.mirrors,
+ clean_dest=options.clean,
+ )
+
+ print("Got revision %s" % got_revision)
diff --git a/testing/mozharness/external_tools/machine-configuration.json b/testing/mozharness/external_tools/machine-configuration.json
new file mode 100644
index 0000000000..74a0baf5a2
--- /dev/null
+++ b/testing/mozharness/external_tools/machine-configuration.json
@@ -0,0 +1,32 @@
+{
+ "win7": {
+ "screen_resolution": {
+ "x": 1280,
+ "y": 1024
+ },
+ "mouse_position": {
+ "x": 1010,
+ "y": 10
+ }
+ },
+ "win10-hw": {
+ "screen_resolution": {
+ "x": 1920,
+ "y": 1080
+ },
+ "mouse_position": {
+ "x": 1010,
+ "y": 10
+ }
+ },
+ "win10-vm": {
+ "screen_resolution": {
+ "x": 2560,
+ "y": 1440
+ },
+ "mouse_position": {
+ "x": 1010,
+ "y": 10
+ }
+ }
+}
diff --git a/testing/mozharness/external_tools/mouse_and_screen_resolution.py b/testing/mozharness/external_tools/mouse_and_screen_resolution.py
new file mode 100755
index 0000000000..ad1f8d9e4b
--- /dev/null
+++ b/testing/mozharness/external_tools/mouse_and_screen_resolution.py
@@ -0,0 +1,191 @@
+#! /usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+# Script name: mouse_and_screen_resolution.py
+# Purpose: Sets mouse position and screen resolution for Windows 7 32-bit slaves
+# Author(s): Zambrano Gasparnian, Armen <armenzg@mozilla.com>
+# Target: Python 2.7 or newer
+#
+
+import os
+import platform
+import socket
+import sys
+import time
+from ctypes import Structure, byref, c_ulong, windll
+from optparse import OptionParser
+
+try:
+ from urllib2 import urlopen, URLError, HTTPError
+except ImportError:
+ from urllib.request import urlopen
+ from urllib.error import URLError, HTTPError
+try:
+ import json
+except:
+ import simplejson as json
+
+default_screen_resolution = {"x": 1024, "y": 768}
+default_mouse_position = {"x": 1010, "y": 10}
+
+
+def wfetch(url, retries=5):
+ while True:
+ try:
+ return urlopen(url, timeout=30).read()
+ except HTTPError as e:
+ print("Failed to fetch '%s': %s" % (url, str(e)))
+ except URLError as e:
+ print("Failed to fetch '%s': %s" % (url, str(e)))
+ except socket.timeout as e:
+ print("Time out accessing %s: %s" % (url, str(e)))
+ except socket.error as e:
+ print("Socket error when accessing %s: %s" % (url, str(e)))
+ if retries < 0:
+ raise Exception("Could not fetch url '%s'" % url)
+ retries -= 1
+ print("Retrying")
+ time.sleep(60)
+
+
+def main():
+
+ # NOTE: this script was written for windows 7, but works well with windows 10
+ parser = OptionParser()
+ parser.add_option(
+ "--configuration-url",
+ dest="configuration_url",
+ type="string",
+ help="Specifies the url of the configuration file.",
+ )
+ parser.add_option(
+ "--configuration-file",
+ dest="configuration_file",
+ type="string",
+ help="Specifies the path to the configuration file.",
+ )
+ parser.add_option(
+ "--platform",
+ dest="platform",
+ type="string",
+ default="win7",
+ help="Specifies the platform to coose inside the configuratoin-file.",
+ )
+ (options, args) = parser.parse_args()
+
+ if options.configuration_url == None and options.configuration_file == None:
+ print("You must specify --configuration-url or --configuration-file.")
+ return 1
+
+ if options.configuration_file:
+ with open(options.configuration_file) as f:
+ conf_dict = json.load(f)
+ new_screen_resolution = conf_dict[options.platform]["screen_resolution"]
+ new_mouse_position = conf_dict[options.platform]["mouse_position"]
+ else:
+ try:
+ conf_dict = json.loads(wfetch(options.configuration_url))
+ new_screen_resolution = conf_dict[options.platform]["screen_resolution"]
+ new_mouse_position = conf_dict[options.platform]["mouse_position"]
+ except HTTPError as e:
+ print(
+ "This branch does not seem to have the configuration file %s" % str(e)
+ )
+ print("Let's fail over to 1024x768.")
+ new_screen_resolution = default_screen_resolution
+ new_mouse_position = default_mouse_position
+ except URLError as e:
+ print("INFRA-ERROR: We couldn't reach hg.mozilla.org: %s" % str(e))
+ return 1
+ except Exception as e:
+ print("ERROR: We were not expecting any more exceptions: %s" % str(e))
+ return 1
+
+ current_screen_resolution = queryScreenResolution()
+ print("Screen resolution (current): (%(x)s, %(y)s)" % (current_screen_resolution))
+
+ if current_screen_resolution == new_screen_resolution:
+ print("No need to change the screen resolution.")
+ else:
+ print("Changing the screen resolution...")
+ try:
+ changeScreenResolution(
+ new_screen_resolution["x"], new_screen_resolution["y"]
+ )
+ except Exception as e:
+ print(
+ "INFRA-ERROR: We have attempted to change the screen resolution but ",
+ "something went wrong: %s" % str(e),
+ )
+ return 1
+ time.sleep(3) # just in case
+ current_screen_resolution = queryScreenResolution()
+ print("Screen resolution (new): (%(x)s, %(y)s)" % current_screen_resolution)
+
+ print("Mouse position (current): (%(x)s, %(y)s)" % (queryMousePosition()))
+ setCursorPos(new_mouse_position["x"], new_mouse_position["y"])
+ current_mouse_position = queryMousePosition()
+ print("Mouse position (new): (%(x)s, %(y)s)" % (current_mouse_position))
+
+ if (
+ current_screen_resolution != new_screen_resolution
+ or current_mouse_position != new_mouse_position
+ ):
+ print(
+ "INFRA-ERROR: The new screen resolution or mouse positions are not what we expected"
+ )
+ return 1
+ else:
+ return 0
+
+
+class POINT(Structure):
+ _fields_ = [("x", c_ulong), ("y", c_ulong)]
+
+
+def queryMousePosition():
+ pt = POINT()
+ windll.user32.GetCursorPos(byref(pt))
+ return {"x": pt.x, "y": pt.y}
+
+
+def setCursorPos(x, y):
+ windll.user32.SetCursorPos(x, y)
+
+
+def queryScreenResolution():
+ return {
+ "x": windll.user32.GetSystemMetrics(0),
+ "y": windll.user32.GetSystemMetrics(1),
+ }
+
+
+def changeScreenResolution(xres=None, yres=None, BitsPerPixel=None):
+ import struct
+
+ DM_BITSPERPEL = 0x00040000
+ DM_PELSWIDTH = 0x00080000
+ DM_PELSHEIGHT = 0x00100000
+ CDS_FULLSCREEN = 0x00000004
+ SIZEOF_DEVMODE = 148
+
+ DevModeData = struct.calcsize("32BHH") * b"\x00"
+ DevModeData += struct.pack("H", SIZEOF_DEVMODE)
+ DevModeData += struct.calcsize("H") * b"\x00"
+ dwFields = (
+ (xres and DM_PELSWIDTH or 0)
+ | (yres and DM_PELSHEIGHT or 0)
+ | (BitsPerPixel and DM_BITSPERPEL or 0)
+ )
+ DevModeData += struct.pack("L", dwFields)
+ DevModeData += struct.calcsize("l9h32BHL") * b"\x00"
+ DevModeData += struct.pack("LLL", BitsPerPixel or 0, xres or 0, yres or 0)
+ DevModeData += struct.calcsize("8L") * b"\x00"
+
+ return windll.user32.ChangeDisplaySettingsA(DevModeData, 0)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/testing/mozharness/external_tools/packagesymbols.py b/testing/mozharness/external_tools/packagesymbols.py
new file mode 100644
index 0000000000..0cbe23fcb8
--- /dev/null
+++ b/testing/mozharness/external_tools/packagesymbols.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import argparse
+import os
+import subprocess
+import sys
+import zipfile
+
+
+class ProcError(Exception):
+ def __init__(self, returncode, stderr):
+ self.returncode = returncode
+ self.stderr = stderr
+
+
+def check_output(command):
+ proc = subprocess.Popen(
+ command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
+ )
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ raise ProcError(proc.returncode, stderr)
+ return stdout
+
+
+def process_file(dump_syms, path):
+ try:
+ stdout = check_output([dump_syms, path])
+ except ProcError as e:
+ print('Error: running "%s %s": %s' % (dump_syms, path, e.stderr))
+ return None, None, None
+ bits = stdout.splitlines()[0].split(" ", 4)
+ if len(bits) != 5:
+ return None, None, None
+ _, platform, cpu_arch, debug_id, debug_file = bits
+ if debug_file.lower().endswith(".pdb"):
+ sym_file = debug_file[:-4] + ".sym"
+ else:
+ sym_file = debug_file + ".sym"
+ filename = os.path.join(debug_file, debug_id, sym_file)
+ debug_filename = os.path.join(debug_file, debug_id, debug_file)
+ return filename, stdout, debug_filename
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("dump_syms", help="Path to dump_syms binary")
+ parser.add_argument("files", nargs="+", help="Path to files to dump symbols from")
+ parser.add_argument(
+ "--symbol-zip",
+ default="symbols.zip",
+ help="Name of zip file to put dumped symbols in",
+ )
+ parser.add_argument(
+ "--no-binaries",
+ action="store_true",
+ default=False,
+ help="Don't store binaries in zip file",
+ )
+ args = parser.parse_args()
+ count = 0
+ with zipfile.ZipFile(args.symbol_zip, "w", zipfile.ZIP_DEFLATED) as zf:
+ for f in args.files:
+ filename, contents, debug_filename = process_file(args.dump_syms, f)
+ if not (filename and contents):
+ print("Error dumping symbols")
+ sys.exit(1)
+ zf.writestr(filename, contents)
+ count += 1
+ if not args.no_binaries:
+ zf.write(f, debug_filename)
+ count += 1
+ print("Added %d files to %s" % (count, args.symbol_zip))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/mozharness/external_tools/performance-artifact-schema.json b/testing/mozharness/external_tools/performance-artifact-schema.json
new file mode 100644
index 0000000000..a5df29b26c
--- /dev/null
+++ b/testing/mozharness/external_tools/performance-artifact-schema.json
@@ -0,0 +1,233 @@
+{
+ "definitions": {
+ "application_schema": {
+ "properties": {
+ "name": {
+ "title": "Application under performance test",
+ "enum": [
+ "firefox",
+ "chrome",
+ "chrome-m",
+ "chromium",
+ "fennec",
+ "geckoview",
+ "refbrow",
+ "fenix",
+ "safari"
+ ],
+ "maxLength": 10,
+ "type": "string"
+ },
+ "version": {
+ "title": "Application's version",
+ "maxLength": 40,
+ "type": "string"
+ }
+ },
+ "required": [
+ "name"
+ ],
+ "type": "object"
+ },
+ "framework_schema": {
+ "properties": {
+ "name": {
+ "title": "Framework name",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "subtest_schema": {
+ "properties": {
+ "name": {
+ "title": "Subtest name",
+ "type": "string"
+ },
+ "publicName": {
+ "title": "Public subtest name",
+ "description": "Allows renaming test's name, without breaking existing performance data series",
+ "maxLength": 30,
+ "type": "string"
+ },
+ "value": {
+ "description": "Summary value for subtest",
+ "title": "Subtest value",
+ "type": "number",
+ "minimum": -1000000000000.0,
+ "maximum": 1000000000000.0
+ },
+ "unit": {
+ "title": "Measurement unit",
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 20
+ },
+ "lowerIsBetter": {
+ "description": "Whether lower values are better for subtest",
+ "title": "Lower is better",
+ "type": "boolean"
+ },
+ "shouldAlert": {
+ "description": "Whether we should alert",
+ "title": "Should alert",
+ "type": "boolean"
+ },
+ "alertThreshold": {
+ "description": "% change threshold before alerting",
+ "title": "Alert threshold",
+ "type": "number",
+ "minimum": 0.0,
+ "maximum": 1000.0
+ },
+ "minBackWindow": {
+ "description": "Minimum back window to use for alerting",
+ "title": "Minimum back window",
+ "type": "number",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "maxBackWindow": {
+ "description": "Maximum back window to use for alerting",
+ "title": "Maximum back window",
+ "type": "number",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "foreWindow": {
+ "description": "Fore window to use for alerting",
+ "title": "Fore window",
+ "type": "number",
+ "minimum": 1,
+ "maximum": 255
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "type": "object"
+ },
+ "suite_schema": {
+ "properties": {
+ "name": {
+ "title": "Suite name",
+ "type": "string"
+ },
+ "publicName": {
+ "title": "Public suite name",
+ "description": "Allows renaming suite's name, without breaking existing performance data series",
+ "maxLength": 30,
+ "type": "string"
+ },
+ "tags": {
+ "type": "array",
+ "title": "Free form tags, which ease the grouping & searching of performance tests",
+ "description": "Similar to extraOptions, except it does not break existing performance data series",
+ "items": {
+ "type": "string",
+ "pattern": "^[a-zA-Z0-9-]{1,24}$"
+ },
+ "uniqueItems": true,
+ "maxItems": 14
+ },
+ "extraOptions": {
+ "type": "array",
+ "title": "Extra options used in running suite",
+ "items": {
+ "type": "string",
+ "maxLength": 100
+ },
+ "uniqueItems": true,
+ "maxItems": 8
+ },
+ "subtests": {
+ "items": {
+ "$ref": "#/definitions/subtest_schema"
+ },
+ "title": "Subtests",
+ "type": "array"
+ },
+ "value": {
+ "title": "Suite value",
+ "type": "number",
+ "minimum": -1000000000000.0,
+ "maximum": 1000000000000.0
+ },
+ "unit": {
+ "title": "Measurement unit",
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 20
+ },
+ "lowerIsBetter": {
+ "description": "Whether lower values are better for suite",
+ "title": "Lower is better",
+ "type": "boolean"
+ },
+ "shouldAlert": {
+ "description": "Whether we should alert on this suite (overrides default behaviour)",
+ "title": "Should alert",
+ "type": "boolean"
+ },
+ "alertThreshold": {
+ "description": "% change threshold before alerting",
+ "title": "Alert threshold",
+ "type": "number",
+ "minimum": 0.0,
+ "maximum": 1000.0
+ },
+ "minBackWindow": {
+ "description": "Minimum back window to use for alerting",
+ "title": "Minimum back window",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "maxBackWindow": {
+ "description": "Maximum back window to use for alerting",
+ "title": "Maximum back window",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 255
+ },
+ "foreWindow": {
+ "description": "Fore window to use for alerting",
+ "title": "Fore window",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 255
+ }
+ },
+ "required": [
+ "name",
+ "subtests"
+ ],
+ "type": "object"
+ }
+ },
+ "description": "Structure for submitting performance data as part of a job",
+ "id": "https://treeherder.mozilla.org/schemas/v1/performance-artifact.json#",
+ "properties": {
+ "application": {
+ "$ref": "#/definitions/application_schema"
+ },
+ "framework": {
+ "$ref": "#/definitions/framework_schema"
+ },
+ "suites": {
+ "description": "List of suite-level data submitted as part of this structure",
+ "items": {
+ "$ref": "#/definitions/suite_schema"
+ },
+ "title": "Performance suites",
+ "type": "array"
+ }
+ },
+ "required": [
+ "framework",
+ "suites"
+ ],
+ "title": "Perfherder Schema",
+ "type": "object"
+}
diff --git a/testing/mozharness/external_tools/robustcheckout.py b/testing/mozharness/external_tools/robustcheckout.py
new file mode 100644
index 0000000000..661bcfcab1
--- /dev/null
+++ b/testing/mozharness/external_tools/robustcheckout.py
@@ -0,0 +1,832 @@
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Robustly perform a checkout.
+
+This extension provides the ``hg robustcheckout`` command for
+ensuring a working directory is updated to the specified revision
+from a source repo using best practices to ensure optimal clone
+times and storage efficiency.
+"""
+
+import contextlib
+import json
+import os
+import random
+import re
+import socket
+import ssl
+import time
+
+from mercurial.i18n import _
+from mercurial.node import hex, nullid
+from mercurial import (
+ commands,
+ configitems,
+ error,
+ exchange,
+ extensions,
+ hg,
+ match as matchmod,
+ pycompat,
+ registrar,
+ scmutil,
+ urllibcompat,
+ util,
+ vfs,
+)
+
+# Causes worker to purge caches on process exit and for task to retry.
+EXIT_PURGE_CACHE = 72
+
+testedwith = b"4.5 4.6 4.7 4.8 4.9 5.0 5.1 5.2 5.3 5.4 5.5 5.6 5.7 5.8"
+minimumhgversion = b"4.5"
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(b"robustcheckout", b"retryjittermin", default=configitems.dynamicdefault)
+configitem(b"robustcheckout", b"retryjittermax", default=configitems.dynamicdefault)
+
+
+def getsparse():
+ from mercurial import sparse
+
+ return sparse
+
+
+def peerlookup(remote, v):
+ # TRACKING hg46 4.6 added commandexecutor API.
+ if util.safehasattr(remote, "commandexecutor"):
+ with remote.commandexecutor() as e:
+ return e.callcommand(b"lookup", {b"key": v}).result()
+ else:
+ return remote.lookup(v)
+
+
+@command(
+ b"robustcheckout",
+ [
+ (b"", b"upstream", b"", b"URL of upstream repo to clone from"),
+ (b"r", b"revision", b"", b"Revision to check out"),
+ (b"b", b"branch", b"", b"Branch to check out"),
+ (b"", b"purge", False, b"Whether to purge the working directory"),
+ (b"", b"sharebase", b"", b"Directory where shared repos should be placed"),
+ (
+ b"",
+ b"networkattempts",
+ 3,
+ b"Maximum number of attempts for network " b"operations",
+ ),
+ (b"", b"sparseprofile", b"", b"Sparse checkout profile to use (path in repo)"),
+ (
+ b"U",
+ b"noupdate",
+ False,
+ b"the clone will include an empty working directory\n"
+ b"(only a repository)",
+ ),
+ ],
+ b"[OPTION]... URL DEST",
+ norepo=True,
+)
+def robustcheckout(
+ ui,
+ url,
+ dest,
+ upstream=None,
+ revision=None,
+ branch=None,
+ purge=False,
+ sharebase=None,
+ networkattempts=None,
+ sparseprofile=None,
+ noupdate=False,
+):
+ """Ensure a working copy has the specified revision checked out.
+
+ Repository data is automatically pooled into the common directory
+ specified by ``--sharebase``, which is a required argument. It is required
+ because pooling storage prevents excessive cloning, which makes operations
+ complete faster.
+
+ One of ``--revision`` or ``--branch`` must be specified. ``--revision``
+ is preferred, as it is deterministic and there is no ambiguity as to which
+ revision will actually be checked out.
+
+ If ``--upstream`` is used, the repo at that URL is used to perform the
+ initial clone instead of cloning from the repo where the desired revision
+ is located.
+
+ ``--purge`` controls whether to removed untracked and ignored files from
+ the working directory. If used, the end state of the working directory
+ should only contain files explicitly under version control for the requested
+ revision.
+
+ ``--sparseprofile`` can be used to specify a sparse checkout profile to use.
+ The sparse checkout profile corresponds to a file in the revision to be
+ checked out. If a previous sparse profile or config is present, it will be
+ replaced by this sparse profile. We choose not to "widen" the sparse config
+ so operations are as deterministic as possible. If an existing checkout
+ is present and it isn't using a sparse checkout, we error. This is to
+ prevent accidentally enabling sparse on a repository that may have
+ clients that aren't sparse aware. Sparse checkout support requires Mercurial
+ 4.3 or newer and the ``sparse`` extension must be enabled.
+ """
+ if not revision and not branch:
+ raise error.Abort(b"must specify one of --revision or --branch")
+
+ if revision and branch:
+ raise error.Abort(b"cannot specify both --revision and --branch")
+
+ # Require revision to look like a SHA-1.
+ if revision:
+ if (
+ len(revision) < 12
+ or len(revision) > 40
+ or not re.match(b"^[a-f0-9]+$", revision)
+ ):
+ raise error.Abort(
+ b"--revision must be a SHA-1 fragment 12-40 " b"characters long"
+ )
+
+ sharebase = sharebase or ui.config(b"share", b"pool")
+ if not sharebase:
+ raise error.Abort(
+ b"share base directory not defined; refusing to operate",
+ hint=b"define share.pool config option or pass --sharebase",
+ )
+
+ # Sparse profile support was added in Mercurial 4.3, where it was highly
+ # experimental. Because of the fragility of it, we only support sparse
+ # profiles on 4.3. When 4.4 is released, we'll need to opt in to sparse
+ # support. We /could/ silently fall back to non-sparse when not supported.
+ # However, given that sparse has performance implications, we want to fail
+ # fast if we can't satisfy the desired checkout request.
+ if sparseprofile:
+ try:
+ extensions.find(b"sparse")
+ except KeyError:
+ raise error.Abort(
+ b"sparse extension must be enabled to use " b"--sparseprofile"
+ )
+
+ ui.warn(b"(using Mercurial %s)\n" % util.version())
+
+ # worker.backgroundclose only makes things faster if running anti-virus,
+ # which our automation doesn't. Disable it.
+ ui.setconfig(b"worker", b"backgroundclose", False)
+
+ # By default the progress bar starts after 3s and updates every 0.1s. We
+ # change this so it shows and updates every 1.0s.
+ # We also tell progress to assume a TTY is present so updates are printed
+ # even if there is no known TTY.
+ # We make the config change here instead of in a config file because
+ # otherwise we're at the whim of whatever configs are used in automation.
+ ui.setconfig(b"progress", b"delay", 1.0)
+ ui.setconfig(b"progress", b"refresh", 1.0)
+ ui.setconfig(b"progress", b"assume-tty", True)
+
+ sharebase = os.path.realpath(sharebase)
+
+ optimes = []
+ behaviors = set()
+ start = time.time()
+
+ try:
+ return _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattempts,
+ sparse_profile=sparseprofile,
+ noupdate=noupdate,
+ )
+ finally:
+ overall = time.time() - start
+
+ # We store the overall time multiple ways in order to help differentiate
+ # the various "flavors" of operations.
+
+ # ``overall`` is always the total operation time.
+ optimes.append(("overall", overall))
+
+ def record_op(name):
+ # If special behaviors due to "corrupt" storage occur, we vary the
+ # name to convey that.
+ if "remove-store" in behaviors:
+ name += "_rmstore"
+ if "remove-wdir" in behaviors:
+ name += "_rmwdir"
+
+ optimes.append((name, overall))
+
+ # We break out overall operations primarily by their network interaction
+ # We have variants within for working directory operations.
+ if "clone" in behaviors and "create-store" in behaviors:
+ record_op("overall_clone")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_clone_sparsecheckout")
+ else:
+ record_op("overall_clone_fullcheckout")
+
+ elif "pull" in behaviors or "clone" in behaviors:
+ record_op("overall_pull")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_pull_sparsecheckout")
+ else:
+ record_op("overall_pull_fullcheckout")
+
+ if "empty-wdir" in behaviors:
+ record_op("overall_pull_emptywdir")
+ else:
+ record_op("overall_pull_populatedwdir")
+
+ else:
+ record_op("overall_nopull")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_nopull_sparsecheckout")
+ else:
+ record_op("overall_nopull_fullcheckout")
+
+ if "empty-wdir" in behaviors:
+ record_op("overall_nopull_emptywdir")
+ else:
+ record_op("overall_nopull_populatedwdir")
+
+ server_url = urllibcompat.urlreq.urlparse(url).netloc
+
+ if "TASKCLUSTER_INSTANCE_TYPE" in os.environ:
+ perfherder = {
+ "framework": {
+ "name": "vcs",
+ },
+ "suites": [],
+ }
+ for op, duration in optimes:
+ perfherder["suites"].append(
+ {
+ "name": op,
+ "value": duration,
+ "lowerIsBetter": True,
+ "shouldAlert": False,
+ "serverUrl": server_url.decode("utf-8"),
+ "hgVersion": util.version().decode("utf-8"),
+ "extraOptions": [os.environ["TASKCLUSTER_INSTANCE_TYPE"]],
+ "subtests": [],
+ }
+ )
+ ui.write(
+ b"PERFHERDER_DATA: %s\n"
+ % pycompat.bytestr(json.dumps(perfherder, sort_keys=True))
+ )
+
+
+def _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattemptlimit,
+ networkattempts=None,
+ sparse_profile=None,
+ noupdate=False,
+):
+ if not networkattempts:
+ networkattempts = [1]
+
+ def callself():
+ return _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattemptlimit,
+ networkattempts=networkattempts,
+ sparse_profile=sparse_profile,
+ noupdate=noupdate,
+ )
+
+ @contextlib.contextmanager
+ def timeit(op, behavior):
+ behaviors.add(behavior)
+ errored = False
+ try:
+ start = time.time()
+ yield
+ except Exception:
+ errored = True
+ raise
+ finally:
+ elapsed = time.time() - start
+
+ if errored:
+ op += "_errored"
+
+ optimes.append((op, elapsed))
+
+ ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))
+
+ # We assume that we're the only process on the machine touching the
+ # repository paths that we were told to use. This means our recovery
+ # scenario when things aren't "right" is to just nuke things and start
+ # from scratch. This is easier to implement than verifying the state
+ # of the data and attempting recovery. And in some scenarios (such as
+ # potential repo corruption), it is probably faster, since verifying
+ # repos can take a while.
+
+ destvfs = vfs.vfs(dest, audit=False, realpath=True)
+
+ def deletesharedstore(path=None):
+ storepath = path or destvfs.read(b".hg/sharedpath").strip()
+ if storepath.endswith(b".hg"):
+ storepath = os.path.dirname(storepath)
+
+ storevfs = vfs.vfs(storepath, audit=False)
+ storevfs.rmtree(forcibly=True)
+
+ if destvfs.exists() and not destvfs.exists(b".hg"):
+ raise error.Abort(b"destination exists but no .hg directory")
+
+ # Refuse to enable sparse checkouts on existing checkouts. The reasoning
+ # here is that another consumer of this repo may not be sparse aware. If we
+ # enabled sparse, we would lock them out.
+ if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
+ raise error.Abort(
+ b"cannot enable sparse profile on existing " b"non-sparse checkout",
+ hint=b"use a separate working directory to use sparse",
+ )
+
+ # And the other direction for symmetry.
+ if not sparse_profile and destvfs.exists(b".hg/sparse"):
+ raise error.Abort(
+ b"cannot use non-sparse checkout on existing sparse " b"checkout",
+ hint=b"use a separate working directory to use sparse",
+ )
+
+ # Require checkouts to be tied to shared storage because efficiency.
+ if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
+ ui.warn(b"(destination is not shared; deleting)\n")
+ with timeit("remove_unshared_dest", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ # Verify the shared path exists and is using modern pooled storage.
+ if destvfs.exists(b".hg/sharedpath"):
+ storepath = destvfs.read(b".hg/sharedpath").strip()
+
+ ui.write(b"(existing repository shared store: %s)\n" % storepath)
+
+ if not os.path.exists(storepath):
+ ui.warn(b"(shared store does not exist; deleting destination)\n")
+ with timeit("removed_missing_shared_store", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+ elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
+ ui.warn(
+ b"(shared store does not belong to pooled storage; "
+ b"deleting destination to improve efficiency)\n"
+ )
+ with timeit("remove_unpooled_store", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ if destvfs.isfileorlink(b".hg/wlock"):
+ ui.warn(
+ b"(dest has an active working directory lock; assuming it is "
+ b"left over from a previous process and that the destination "
+ b"is corrupt; deleting it just to be sure)\n"
+ )
+ with timeit("remove_locked_wdir", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ def handlerepoerror(e):
+ if pycompat.bytestr(e) == _(b"abandoned transaction found"):
+ ui.warn(b"(abandoned transaction found; trying to recover)\n")
+ repo = hg.repository(ui, dest)
+ if not repo.recover():
+ ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
+ with timeit("remove_unrecovered_shared_store", "remove-store"):
+ deletesharedstore()
+
+ ui.warn(b"(attempting checkout from beginning)\n")
+ return callself()
+
+ raise
+
+ # At this point we either have an existing working directory using
+ # shared, pooled storage or we have nothing.
+
+ def handlenetworkfailure():
+ if networkattempts[0] >= networkattemptlimit:
+ raise error.Abort(
+ b"reached maximum number of network attempts; " b"giving up\n"
+ )
+
+ ui.warn(
+ b"(retrying after network failure on attempt %d of %d)\n"
+ % (networkattempts[0], networkattemptlimit)
+ )
+
+ # Do a backoff on retries to mitigate the thundering herd
+ # problem. This is an exponential backoff with a multipler
+ # plus random jitter thrown in for good measure.
+ # With the default settings, backoffs will be:
+ # 1) 2.5 - 6.5
+ # 2) 5.5 - 9.5
+ # 3) 11.5 - 15.5
+ backoff = (2 ** networkattempts[0] - 1) * 1.5
+ jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
+ jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
+ backoff += float(random.randint(jittermin, jittermax)) / 1000.0
+ ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
+ time.sleep(backoff)
+
+ networkattempts[0] += 1
+
+ def handlepullerror(e):
+ """Handle an exception raised during a pull.
+
+ Returns True if caller should call ``callself()`` to retry.
+ """
+ if isinstance(e, error.Abort):
+ if e.args[0] == _(b"repository is unrelated"):
+ ui.warn(b"(repository is unrelated; deleting)\n")
+ destvfs.rmtree(forcibly=True)
+ return True
+ elif e.args[0].startswith(_(b"stream ended unexpectedly")):
+ ui.warn(b"%s\n" % e.args[0])
+ # Will raise if failure limit reached.
+ handlenetworkfailure()
+ return True
+ # TODO test this branch
+ elif isinstance(e, error.ResponseError):
+ if e.args[0].startswith(_(b"unexpected response from remote server:")):
+ ui.warn(b"(unexpected response from remote server; retrying)\n")
+ destvfs.rmtree(forcibly=True)
+ # Will raise if failure limit reached.
+ handlenetworkfailure()
+ return True
+ elif isinstance(e, ssl.SSLError):
+ # Assume all SSL errors are due to the network, as Mercurial
+ # should convert non-transport errors like cert validation failures
+ # to error.Abort.
+ ui.warn(b"ssl error: %s\n" % pycompat.bytestr(str(e)))
+ handlenetworkfailure()
+ return True
+ elif isinstance(e, urllibcompat.urlerr.urlerror):
+ if isinstance(e.reason, socket.error):
+ ui.warn(b"socket error: %s\n" % pycompat.bytestr(str(e.reason)))
+ handlenetworkfailure()
+ return True
+ else:
+ ui.warn(
+ b"unhandled URLError; reason type: %s; value: %s\n"
+ % (
+ pycompat.bytestr(e.reason.__class__.__name__),
+ pycompat.bytestr(str(e.reason)),
+ )
+ )
+ else:
+ ui.warn(
+ b"unhandled exception during network operation; type: %s; "
+ b"value: %s\n"
+ % (pycompat.bytestr(e.__class__.__name__), pycompat.bytestr(str(e)))
+ )
+
+ return False
+
+ # Perform sanity checking of store. We may or may not know the path to the
+ # local store. It depends if we have an existing destvfs pointing to a
+ # share. To ensure we always find a local store, perform the same logic
+ # that Mercurial's pooled storage does to resolve the local store path.
+ cloneurl = upstream or url
+
+ try:
+ clonepeer = hg.peer(ui, {}, cloneurl)
+ rootnode = peerlookup(clonepeer, b"0")
+ except error.RepoLookupError:
+ raise error.Abort(b"unable to resolve root revision from clone " b"source")
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+
+ if rootnode == nullid:
+ raise error.Abort(b"source repo appears to be empty")
+
+ storepath = os.path.join(sharebase, hex(rootnode))
+ storevfs = vfs.vfs(storepath, audit=False)
+
+ if storevfs.isfileorlink(b".hg/store/lock"):
+ ui.warn(
+ b"(shared store has an active lock; assuming it is left "
+ b"over from a previous process and that the store is "
+ b"corrupt; deleting store and destination just to be "
+ b"sure)\n"
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_active_lock", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_active_lock", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ if storevfs.exists() and not storevfs.exists(b".hg/requires"):
+ ui.warn(
+ b"(shared store missing requires file; this is a really "
+ b"odd failure; deleting store and destination)\n"
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_no_requires", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_no_requires", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ if storevfs.exists(b".hg/requires"):
+ requires = set(storevfs.read(b".hg/requires").splitlines())
+ # FUTURE when we require generaldelta, this is where we can check
+ # for that.
+ required = {b"dotencode", b"fncache"}
+
+ missing = required - requires
+ if missing:
+ ui.warn(
+ b"(shared store missing requirements: %s; deleting "
+ b"store and destination to ensure optimal behavior)\n"
+ % b", ".join(sorted(missing))
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_missing_requires", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_missing_requires", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ created = False
+
+ if not destvfs.exists():
+ # Ensure parent directories of destination exist.
+ # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
+ if util.safehasattr(util, "ensuredirs"):
+ makedirs = util.ensuredirs
+ else:
+ makedirs = util.makedirs
+
+ makedirs(os.path.dirname(destvfs.base), notindexed=True)
+ makedirs(sharebase, notindexed=True)
+
+ if upstream:
+ ui.write(b"(cloning from upstream repo %s)\n" % upstream)
+
+ if not storevfs.exists():
+ behaviors.add(b"create-store")
+
+ try:
+ with timeit("clone", "clone"):
+ shareopts = {b"pool": sharebase, b"mode": b"identity"}
+ res = hg.clone(
+ ui,
+ {},
+ clonepeer,
+ dest=dest,
+ update=False,
+ shareopts=shareopts,
+ stream=True,
+ )
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+ except error.RepoError as e:
+ return handlerepoerror(e)
+ except error.RevlogError as e:
+ ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
+ with timeit("remove_shared_store_revlogerror", "remote-store"):
+ deletesharedstore()
+ return callself()
+
+ # TODO retry here.
+ if res is None:
+ raise error.Abort(b"clone failed")
+
+ # Verify it is using shared pool storage.
+ if not destvfs.exists(b".hg/sharedpath"):
+ raise error.Abort(b"clone did not create a shared repo")
+
+ created = True
+
+ # The destination .hg directory should exist. Now make sure we have the
+ # wanted revision.
+
+ repo = hg.repository(ui, dest)
+
+ # We only pull if we are using symbolic names or the requested revision
+ # doesn't exist.
+ havewantedrev = False
+
+ if revision:
+ try:
+ ctx = scmutil.revsingle(repo, revision)
+ except error.RepoLookupError:
+ ctx = None
+
+ if ctx:
+ if not ctx.hex().startswith(revision):
+ raise error.Abort(
+ b"--revision argument is ambiguous",
+ hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
+ )
+
+ checkoutrevision = ctx.hex()
+ havewantedrev = True
+
+ if not havewantedrev:
+ ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))
+
+ remote = None
+ try:
+ remote = hg.peer(repo, {}, url)
+ pullrevs = [peerlookup(remote, revision or branch)]
+ checkoutrevision = hex(pullrevs[0])
+ if branch:
+ ui.warn(
+ b"(remote resolved %s to %s; "
+ b"result is not deterministic)\n" % (branch, checkoutrevision)
+ )
+
+ if checkoutrevision in repo:
+ ui.warn(b"(revision already present locally; not pulling)\n")
+ else:
+ with timeit("pull", "pull"):
+ pullop = exchange.pull(repo, remote, heads=pullrevs)
+ if not pullop.rheads:
+ raise error.Abort(b"unable to pull requested revision")
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+ except error.RepoError as e:
+ return handlerepoerror(e)
+ except error.RevlogError as e:
+ ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
+ deletesharedstore()
+ return callself()
+ finally:
+ if remote:
+ remote.close()
+
+ # Now we should have the wanted revision in the store. Perform
+ # working directory manipulation.
+
+ # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
+ if noupdate:
+ ui.write(b"(skipping update since `-U` was passed)\n")
+ return None
+
+ # Purge if requested. We purge before update because this way we're
+ # guaranteed to not have conflicts on `hg update`.
+ if purge and not created:
+ ui.write(b"(purging working directory)\n")
+ purge = getattr(commands, "purge", None)
+ if not purge:
+ purge = extensions.find(b"purge").purge
+
+ # Mercurial 4.3 doesn't purge files outside the sparse checkout.
+ # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
+ # purging by monkeypatching the sparse matcher.
+ try:
+ old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
+ if old_sparse_fn is not None:
+ # TRACKING hg50
+ # Arguments passed to `matchmod.always` were unused and have been removed
+ if util.versiontuple(n=2) >= (5, 0):
+ repo.dirstate._sparsematchfn = lambda: matchmod.always()
+ else:
+ repo.dirstate._sparsematchfn = lambda: matchmod.always(
+ repo.root, ""
+ )
+
+ with timeit("purge", "purge"):
+ if purge(
+ ui,
+ repo,
+ all=True,
+ abort_on_err=True,
+ # The function expects all arguments to be
+ # defined.
+ **{"print": None, "print0": None, "dirs": None, "files": None}
+ ):
+ raise error.Abort(b"error purging")
+ finally:
+ if old_sparse_fn is not None:
+ repo.dirstate._sparsematchfn = old_sparse_fn
+
+ # Update the working directory.
+
+ if repo[b"."].node() == nullid:
+ behaviors.add("empty-wdir")
+ else:
+ behaviors.add("populated-wdir")
+
+ if sparse_profile:
+ sparsemod = getsparse()
+
+ # By default, Mercurial will ignore unknown sparse profiles. This could
+ # lead to a full checkout. Be more strict.
+ try:
+ repo.filectx(sparse_profile, changeid=checkoutrevision).data()
+ except error.ManifestLookupError:
+ raise error.Abort(
+ b"sparse profile %s does not exist at revision "
+ b"%s" % (sparse_profile, checkoutrevision)
+ )
+
+ # TRACKING hg48 - parseconfig takes `action` param
+ if util.versiontuple(n=2) >= (4, 8):
+ old_config = sparsemod.parseconfig(
+ repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
+ )
+ else:
+ old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread(b"sparse"))
+
+ old_includes, old_excludes, old_profiles = old_config
+
+ if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
+ ui.write(
+ b"(sparse profile %s already set; no need to update "
+ b"sparse config)\n" % sparse_profile
+ )
+ else:
+ if old_includes or old_excludes or old_profiles:
+ ui.write(
+ b"(replacing existing sparse config with profile "
+ b"%s)\n" % sparse_profile
+ )
+ else:
+ ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)
+
+ # If doing an incremental update, this will perform two updates:
+ # one to change the sparse profile and another to update to the new
+ # revision. This is not desired. But there's not a good API in
+ # Mercurial to do this as one operation.
+ with repo.wlock(), timeit("sparse_update_config", "sparse-update-config"):
+ # pylint --py3k: W1636
+ fcounts = list(
+ map(
+ len,
+ sparsemod._updateconfigandrefreshwdir(
+ repo, [], [], [sparse_profile], force=True
+ ),
+ )
+ )
+
+ repo.ui.status(
+ b"%d files added, %d files dropped, "
+ b"%d files conflicting\n" % tuple(fcounts)
+ )
+
+ ui.write(b"(sparse refresh complete)\n")
+
+ op = "update_sparse" if sparse_profile else "update"
+ behavior = "update-sparse" if sparse_profile else "update"
+
+ with timeit(op, behavior):
+ if commands.update(ui, repo, rev=checkoutrevision, clean=True):
+ raise error.Abort(b"error updating")
+
+ ui.write(b"updated to %s\n" % checkoutrevision)
+
+ return None
+
+
+def extsetup(ui):
+ # Ensure required extensions are loaded.
+ for ext in (b"purge", b"share"):
+ try:
+ extensions.find(ext)
+ except KeyError:
+ extensions.load(ui, ext, None)
diff --git a/testing/mozharness/external_tools/tooltool.py b/testing/mozharness/external_tools/tooltool.py
new file mode 100755
index 0000000000..7ae48dce3c
--- /dev/null
+++ b/testing/mozharness/external_tools/tooltool.py
@@ -0,0 +1,1679 @@
+#!/usr/bin/env python
+
+# tooltool is a lookaside cache implemented in Python
+# Copyright (C) 2011 John H. Ford <john@johnford.info>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation version 2
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# A manifest file specifies files in that directory that are stored
+# elsewhere. This file should only list files in the same directory
+# in which the manifest file resides and it should be called
+# 'manifest.tt'
+
+import base64
+import calendar
+import hashlib
+import hmac
+import json
+import logging
+import math
+import optparse
+import os
+import pprint
+import re
+import shutil
+import sys
+import tarfile
+import tempfile
+import threading
+import time
+import zipfile
+from contextlib import contextmanager, closing
+from functools import wraps
+
+from io import open
+from io import BytesIO
+from random import random
+from subprocess import PIPE
+from subprocess import Popen
+
+__version__ = "1"
+
+# Allowed request header characters:
+# !#$%&'()*+,-./:;<=>?@[]^_`{|}~ and space, a-z, A-Z, 0-9, \, "
+REQUEST_HEADER_ATTRIBUTE_CHARS = re.compile(
+ r"^[ a-zA-Z0-9_\!#\$%&'\(\)\*\+,\-\./\:;<\=>\?@\[\]\^`\{\|\}~]*$"
+)
+DEFAULT_MANIFEST_NAME = "manifest.tt"
+TOOLTOOL_PACKAGE_SUFFIX = ".TOOLTOOL-PACKAGE"
+HAWK_VER = 1
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ six_binary_type = bytes
+ unicode = (
+ str # Silence `pyflakes` from reporting `undefined name 'unicode'` in Python 3.
+ )
+ import urllib.request as urllib2
+ from http.client import HTTPSConnection, HTTPConnection
+ from urllib.parse import urlparse, urljoin
+ from urllib.request import Request
+ from urllib.error import HTTPError, URLError
+else:
+ six_binary_type = str
+ import urllib2
+ from httplib import HTTPSConnection, HTTPConnection
+ from urllib2 import Request, HTTPError, URLError
+ from urlparse import urlparse, urljoin
+
+
+log = logging.getLogger(__name__)
+
+
+# Vendored code from `redo` module
+def retrier(attempts=5, sleeptime=10, max_sleeptime=300, sleepscale=1.5, jitter=1):
+ """
+ This function originates from redo 2.0.3 https://github.com/mozilla-releng/redo
+ A generator function that sleeps between retries, handles exponential
+ backoff and jitter. The action you are retrying is meant to run after
+ retrier yields.
+ """
+ jitter = jitter or 0 # py35 barfs on the next line if jitter is None
+ if jitter > sleeptime:
+ # To prevent negative sleep times
+ raise Exception(
+ "jitter ({}) must be less than sleep time ({})".format(jitter, sleeptime)
+ )
+
+ sleeptime_real = sleeptime
+ for _ in range(attempts):
+ log.debug("attempt %i/%i", _ + 1, attempts)
+
+ yield sleeptime_real
+
+ if jitter:
+ sleeptime_real = sleeptime + random.uniform(-jitter, jitter)
+ # our jitter should scale along with the sleeptime
+ jitter = jitter * sleepscale
+ else:
+ sleeptime_real = sleeptime
+
+ sleeptime *= sleepscale
+
+ if sleeptime_real > max_sleeptime:
+ sleeptime_real = max_sleeptime
+
+ # Don't need to sleep the last time
+ if _ < attempts - 1:
+ log.debug(
+ "sleeping for %.2fs (attempt %i/%i)", sleeptime_real, _ + 1, attempts
+ )
+ time.sleep(sleeptime_real)
+
+
+def retry(
+ action,
+ attempts=5,
+ sleeptime=60,
+ max_sleeptime=5 * 60,
+ sleepscale=1.5,
+ jitter=1,
+ retry_exceptions=(Exception,),
+ cleanup=None,
+ args=(),
+ kwargs={},
+ log_args=True,
+):
+ """
+ This function originates from redo 2.0.3 https://github.com/mozilla-releng/redo
+ Calls an action function until it succeeds, or we give up.
+ """
+ assert callable(action)
+ assert not cleanup or callable(cleanup)
+
+ action_name = getattr(action, "__name__", action)
+ if log_args and (args or kwargs):
+ log_attempt_args = (
+ "retry: calling %s with args: %s," " kwargs: %s, attempt #%d",
+ action_name,
+ args,
+ kwargs,
+ )
+ else:
+ log_attempt_args = ("retry: calling %s, attempt #%d", action_name)
+
+ if max_sleeptime < sleeptime:
+ log.debug("max_sleeptime %d less than sleeptime %d", max_sleeptime, sleeptime)
+
+ n = 1
+ for _ in retrier(
+ attempts=attempts,
+ sleeptime=sleeptime,
+ max_sleeptime=max_sleeptime,
+ sleepscale=sleepscale,
+ jitter=jitter,
+ ):
+ try:
+ logfn = log.info if n != 1 else log.debug
+ logfn_args = log_attempt_args + (n,)
+ logfn(*logfn_args)
+ return action(*args, **kwargs)
+ except retry_exceptions:
+ log.debug("retry: Caught exception: ", exc_info=True)
+ if cleanup:
+ cleanup()
+ if n == attempts:
+ log.info("retry: Giving up on %s", action_name)
+ raise
+ continue
+ finally:
+ n += 1
+
+
+def retriable(*retry_args, **retry_kwargs):
+ """
+ This function originates from redo 2.0.3 https://github.com/mozilla-releng/redo
+ A decorator factory for retry(). Wrap your function in @retriable(...) to
+ give it retry powers!
+ """
+
+ def _retriable_factory(func):
+ @wraps(func)
+ def _retriable_wrapper(*args, **kwargs):
+ return retry(func, args=args, kwargs=kwargs, *retry_args, **retry_kwargs)
+
+ return _retriable_wrapper
+
+ return _retriable_factory
+
+
+# end of vendored code from redo module
+
+
+def request_has_data(req):
+ if PY3:
+ return req.data is not None
+ return req.has_data()
+
+
+def get_hexdigest(val):
+ return hashlib.sha512(val).hexdigest()
+
+
+class FileRecordJSONEncoderException(Exception):
+ pass
+
+
+class InvalidManifest(Exception):
+ pass
+
+
+class ExceptionWithFilename(Exception):
+ def __init__(self, filename):
+ Exception.__init__(self)
+ self.filename = filename
+
+
+class BadFilenameException(ExceptionWithFilename):
+ pass
+
+
+class DigestMismatchException(ExceptionWithFilename):
+ pass
+
+
+class MissingFileException(ExceptionWithFilename):
+ pass
+
+
+class InvalidCredentials(Exception):
+ pass
+
+
+class BadHeaderValue(Exception):
+ pass
+
+
+def parse_url(url):
+ url_parts = urlparse(url)
+ url_dict = {
+ "scheme": url_parts.scheme,
+ "hostname": url_parts.hostname,
+ "port": url_parts.port,
+ "path": url_parts.path,
+ "resource": url_parts.path,
+ "query": url_parts.query,
+ }
+ if len(url_dict["query"]) > 0:
+ url_dict["resource"] = "%s?%s" % (
+ url_dict["resource"], # pragma: no cover
+ url_dict["query"],
+ )
+
+ if url_parts.port is None:
+ if url_parts.scheme == "http":
+ url_dict["port"] = 80
+ elif url_parts.scheme == "https": # pragma: no cover
+ url_dict["port"] = 443
+ return url_dict
+
+
+def utc_now(offset_in_seconds=0.0):
+ return int(math.floor(calendar.timegm(time.gmtime()) + float(offset_in_seconds)))
+
+
+def random_string(length):
+ return base64.urlsafe_b64encode(os.urandom(length))[:length]
+
+
+def prepare_header_val(val):
+ if isinstance(val, six_binary_type):
+ val = val.decode("utf-8")
+
+ if not REQUEST_HEADER_ATTRIBUTE_CHARS.match(val):
+ raise BadHeaderValue( # pragma: no cover
+ "header value value={val} contained an illegal character".format(
+ val=repr(val)
+ )
+ )
+
+ return val
+
+
+def parse_content_type(content_type): # pragma: no cover
+ if content_type:
+ return content_type.split(";")[0].strip().lower()
+ else:
+ return ""
+
+
+def calculate_payload_hash(algorithm, payload, content_type): # pragma: no cover
+ parts = [
+ part if isinstance(part, six_binary_type) else part.encode("utf8")
+ for part in [
+ "hawk." + str(HAWK_VER) + ".payload\n",
+ parse_content_type(content_type) + "\n",
+ payload or "",
+ "\n",
+ ]
+ ]
+
+ p_hash = hashlib.new(algorithm)
+ for p in parts:
+ p_hash.update(p)
+
+ log.debug(
+ "calculating payload hash from:\n{parts}".format(parts=pprint.pformat(parts))
+ )
+
+ return base64.b64encode(p_hash.digest())
+
+
+def validate_taskcluster_credentials(credentials):
+ if not hasattr(credentials, "__getitem__"):
+ raise InvalidCredentials(
+ "credentials must be a dict-like object"
+ ) # pragma: no cover
+ try:
+ credentials["clientId"]
+ credentials["accessToken"]
+ except KeyError: # pragma: no cover
+ etype, val, tb = sys.exc_info()
+ raise InvalidCredentials("{etype}: {val}".format(etype=etype, val=val))
+
+
+def normalize_header_attr(val):
+ if isinstance(val, six_binary_type):
+ return val.decode("utf-8")
+ return val # pragma: no cover
+
+
+def normalize_string(
+ mac_type,
+ timestamp,
+ nonce,
+ method,
+ name,
+ host,
+ port,
+ content_hash,
+):
+ return "\n".join(
+ [
+ normalize_header_attr(header)
+ # The blank lines are important. They follow what the Node Hawk lib does.
+ for header in [
+ "hawk." + str(HAWK_VER) + "." + mac_type,
+ timestamp,
+ nonce,
+ method or "",
+ name or "",
+ host,
+ port,
+ content_hash or "",
+ "", # for ext which is empty in this case
+ "", # Add trailing new line.
+ ]
+ ]
+ )
+
+
+def calculate_mac(
+ mac_type,
+ access_token,
+ algorithm,
+ timestamp,
+ nonce,
+ method,
+ name,
+ host,
+ port,
+ content_hash,
+):
+ normalized = normalize_string(
+ mac_type, timestamp, nonce, method, name, host, port, content_hash
+ )
+ log.debug(u"normalized resource for mac calc: {norm}".format(norm=normalized))
+ digestmod = getattr(hashlib, algorithm)
+
+ if not isinstance(normalized, six_binary_type):
+ normalized = normalized.encode("utf8")
+
+ if not isinstance(access_token, six_binary_type):
+ access_token = access_token.encode("ascii")
+
+ result = hmac.new(access_token, normalized, digestmod)
+ return base64.b64encode(result.digest())
+
+
+def make_taskcluster_header(credentials, req):
+ validate_taskcluster_credentials(credentials)
+
+ url = req.get_full_url()
+ method = req.get_method()
+ algorithm = "sha256"
+ timestamp = str(utc_now())
+ nonce = random_string(6)
+ url_parts = parse_url(url)
+
+ content_hash = None
+ if request_has_data(req):
+ if PY3:
+ data = req.data
+ else:
+ data = req.get_data()
+ content_hash = calculate_payload_hash( # pragma: no cover
+ algorithm,
+ data,
+ # maybe we should detect this from req.headers but we anyway expect json
+ content_type="application/json",
+ )
+
+ mac = calculate_mac(
+ "header",
+ credentials["accessToken"],
+ algorithm,
+ timestamp,
+ nonce,
+ method,
+ url_parts["resource"],
+ url_parts["hostname"],
+ str(url_parts["port"]),
+ content_hash,
+ )
+
+ header = u'Hawk mac="{}"'.format(prepare_header_val(mac))
+
+ if content_hash: # pragma: no cover
+ header = u'{}, hash="{}"'.format(header, prepare_header_val(content_hash))
+
+ header = u'{header}, id="{id}", ts="{ts}", nonce="{nonce}"'.format(
+ header=header,
+ id=prepare_header_val(credentials["clientId"]),
+ ts=prepare_header_val(timestamp),
+ nonce=prepare_header_val(nonce),
+ )
+
+ log.debug("Hawk header for URL={} method={}: {}".format(url, method, header))
+
+ return header
+
+
+class FileRecord(object):
+ def __init__(
+ self,
+ filename,
+ size,
+ digest,
+ algorithm,
+ unpack=False,
+ version=None,
+ visibility=None,
+ ):
+ object.__init__(self)
+ if "/" in filename or "\\" in filename:
+ log.error(
+ "The filename provided contains path information and is, therefore, invalid."
+ )
+ raise BadFilenameException(filename=filename)
+ self.filename = filename
+ self.size = size
+ self.digest = digest
+ self.algorithm = algorithm
+ self.unpack = unpack
+ self.version = version
+ self.visibility = visibility
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if (
+ self.filename == other.filename
+ and self.size == other.size
+ and self.digest == other.digest
+ and self.algorithm == other.algorithm
+ and self.version == other.version
+ and self.visibility == other.visibility
+ ):
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ # pylint --py3k: W1641
+ return hash(
+ (
+ self.filename,
+ self.size,
+ self.digest,
+ self.algorithm,
+ self.version,
+ self.visibility,
+ )
+ )
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return (
+ "%s.%s(filename='%s', size=%s, digest='%s', algorithm='%s', visibility=%r)"
+ % (
+ __name__,
+ self.__class__.__name__,
+ self.filename,
+ self.size,
+ self.digest,
+ self.algorithm,
+ self.visibility,
+ )
+ )
+
+ def present(self):
+ # Doesn't check validity
+ return os.path.exists(self.filename)
+
+ def validate_size(self):
+ if self.present():
+ return self.size == os.path.getsize(self.filename)
+ else:
+ log.debug("trying to validate size on a missing file, %s", self.filename)
+ raise MissingFileException(filename=self.filename)
+
+ def validate_digest(self):
+ if self.present():
+ with open(self.filename, "rb") as f:
+ return self.digest == digest_file(f, self.algorithm)
+ else:
+ log.debug("trying to validate digest on a missing file, %s', self.filename")
+ raise MissingFileException(filename=self.filename)
+
+ def validate(self):
+ if self.size is None or self.validate_size():
+ if self.validate_digest():
+ return True
+ return False
+
+ def describe(self):
+ if self.present() and self.validate():
+ return "'%s' is present and valid" % self.filename
+ elif self.present():
+ return "'%s' is present and invalid" % self.filename
+ else:
+ return "'%s' is absent" % self.filename
+
+
+def create_file_record(filename, algorithm):
+ fo = open(filename, "rb")
+ stored_filename = os.path.split(filename)[1]
+ fr = FileRecord(
+ stored_filename,
+ os.path.getsize(filename),
+ digest_file(fo, algorithm),
+ algorithm,
+ )
+ fo.close()
+ return fr
+
+
+class FileRecordJSONEncoder(json.JSONEncoder):
+ def encode_file_record(self, obj):
+ if not issubclass(type(obj), FileRecord):
+ err = (
+ "FileRecordJSONEncoder is only for FileRecord and lists of FileRecords, "
+ "not %s" % obj.__class__.__name__
+ )
+ log.warn(err)
+ raise FileRecordJSONEncoderException(err)
+ else:
+ rv = {
+ "filename": obj.filename,
+ "size": obj.size,
+ "algorithm": obj.algorithm,
+ "digest": obj.digest,
+ }
+ if obj.unpack:
+ rv["unpack"] = True
+ if obj.version:
+ rv["version"] = obj.version
+ if obj.visibility is not None:
+ rv["visibility"] = obj.visibility
+ return rv
+
+ def default(self, f):
+ if issubclass(type(f), list):
+ record_list = []
+ for i in f:
+ record_list.append(self.encode_file_record(i))
+ return record_list
+ else:
+ return self.encode_file_record(f)
+
+
+class FileRecordJSONDecoder(json.JSONDecoder):
+
+ """I help the json module materialize a FileRecord from
+ a JSON file. I understand FileRecords and lists of
+ FileRecords. I ignore things that I don't expect for now"""
+
+ # TODO: make this more explicit in what it's looking for
+ # and error out on unexpected things
+
+ def process_file_records(self, obj):
+ if isinstance(obj, list):
+ record_list = []
+ for i in obj:
+ record = self.process_file_records(i)
+ if issubclass(type(record), FileRecord):
+ record_list.append(record)
+ return record_list
+ required_fields = [
+ "filename",
+ "size",
+ "algorithm",
+ "digest",
+ ]
+ if isinstance(obj, dict):
+ missing = False
+ for req in required_fields:
+ if req not in obj:
+ missing = True
+ break
+
+ if not missing:
+ unpack = obj.get("unpack", False)
+ version = obj.get("version", None)
+ visibility = obj.get("visibility", None)
+ rv = FileRecord(
+ obj["filename"],
+ obj["size"],
+ obj["digest"],
+ obj["algorithm"],
+ unpack,
+ version,
+ visibility,
+ )
+ log.debug("materialized %s" % rv)
+ return rv
+ return obj
+
+ def decode(self, s):
+ decoded = json.JSONDecoder.decode(self, s)
+ rv = self.process_file_records(decoded)
+ return rv
+
+
+class Manifest(object):
+
+ valid_formats = ("json",)
+
+ def __init__(self, file_records=None):
+ self.file_records = file_records or []
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if len(self.file_records) != len(other.file_records):
+ log.debug("Manifests differ in number of files")
+ return False
+ # sort the file records by filename before comparing
+ mine = sorted((fr.filename, fr) for fr in self.file_records)
+ theirs = sorted((fr.filename, fr) for fr in other.file_records)
+ return mine == theirs
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ # pylint --py3k: W1641
+ return hash(tuple(sorted((fr.filename, fr) for fr in self.file_records)))
+
+ def __deepcopy__(self, memo):
+ # This is required for a deep copy
+ return Manifest(self.file_records[:])
+
+ def __copy__(self):
+ return Manifest(self.file_records)
+
+ def copy(self):
+ return Manifest(self.file_records[:])
+
+ def present(self):
+ return all(i.present() for i in self.file_records)
+
+ def validate_sizes(self):
+ return all(i.validate_size() for i in self.file_records)
+
+ def validate_digests(self):
+ return all(i.validate_digest() for i in self.file_records)
+
+ def validate(self):
+ return all(i.validate() for i in self.file_records)
+
+ def load(self, data_file, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ try:
+ self.file_records.extend(
+ json.load(data_file, cls=FileRecordJSONDecoder)
+ )
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def loads(self, data_string, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ try:
+ self.file_records.extend(
+ json.loads(data_string, cls=FileRecordJSONDecoder)
+ )
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def dump(self, output_file, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ return json.dump(
+ self.file_records,
+ output_file,
+ indent=2,
+ separators=(",", ": "),
+ cls=FileRecordJSONEncoder,
+ )
+
+ def dumps(self, fmt="json"):
+ assert fmt in self.valid_formats
+ if fmt == "json":
+ return json.dumps(
+ self.file_records,
+ indent=2,
+ separators=(",", ": "),
+ cls=FileRecordJSONEncoder,
+ )
+
+
+def digest_file(f, a):
+ """I take a file like object 'f' and return a hex-string containing
+ of the result of the algorithm 'a' applied to 'f'."""
+ h = hashlib.new(a)
+ chunk_size = 1024 * 10
+ data = f.read(chunk_size)
+ while data:
+ h.update(data)
+ data = f.read(chunk_size)
+ name = repr(f.name) if hasattr(f, "name") else "a file"
+ log.debug("hashed %s with %s to be %s", name, a, h.hexdigest())
+ return h.hexdigest()
+
+
+def execute(cmd):
+ """Execute CMD, logging its stdout at the info level"""
+ process = Popen(cmd, shell=True, stdout=PIPE)
+ while True:
+ line = process.stdout.readline()
+ if not line:
+ break
+ log.info(line.replace("\n", " "))
+ return process.wait() == 0
+
+
+def open_manifest(manifest_file):
+ """I know how to take a filename and load it into a Manifest object"""
+ if os.path.exists(manifest_file):
+ manifest = Manifest()
+ with open(manifest_file, "r" if PY3 else "rb") as f:
+ manifest.load(f)
+ log.debug("loaded manifest from file '%s'" % manifest_file)
+ return manifest
+ else:
+ log.debug("tried to load absent file '%s' as manifest" % manifest_file)
+ raise InvalidManifest("manifest file '%s' does not exist" % manifest_file)
+
+
+def list_manifest(manifest_file):
+ """I know how print all the files in a location"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error(
+ "failed to load manifest file at '%s': %s"
+ % (
+ manifest_file,
+ str(e),
+ )
+ )
+ return False
+ for f in manifest.file_records:
+ print(
+ "{}\t{}\t{}".format(
+ "P" if f.present() else "-",
+ "V" if f.present() and f.validate() else "-",
+ f.filename,
+ )
+ )
+ return True
+
+
+def validate_manifest(manifest_file):
+ """I validate that all files in a manifest are present and valid but
+ don't fetch or delete them if they aren't"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error(
+ "failed to load manifest file at '%s': %s"
+ % (
+ manifest_file,
+ str(e),
+ )
+ )
+ return False
+ invalid_files = []
+ absent_files = []
+ for f in manifest.file_records:
+ if not f.present():
+ absent_files.append(f)
+ else:
+ if not f.validate():
+ invalid_files.append(f)
+ if len(invalid_files + absent_files) == 0:
+ return True
+ else:
+ return False
+
+
+def add_files(manifest_file, algorithm, filenames, version, visibility, unpack):
+ # returns True if all files successfully added, False if not
+ # and doesn't catch library Exceptions. If any files are already
+ # tracked in the manifest, return will be False because they weren't
+ # added
+ all_files_added = True
+ # Create a old_manifest object to add to
+ if os.path.exists(manifest_file):
+ old_manifest = open_manifest(manifest_file)
+ else:
+ old_manifest = Manifest()
+ log.debug("creating a new manifest file")
+ new_manifest = Manifest() # use a different manifest for the output
+ for filename in filenames:
+ log.debug("adding %s" % filename)
+ path, name = os.path.split(filename)
+ new_fr = create_file_record(filename, algorithm)
+ new_fr.version = version
+ new_fr.visibility = visibility
+ new_fr.unpack = unpack
+ log.debug("appending a new file record to manifest file")
+ add = True
+ for fr in old_manifest.file_records:
+ log.debug(
+ "manifest file has '%s'"
+ % "', ".join([x.filename for x in old_manifest.file_records])
+ )
+ if new_fr == fr:
+ log.info("file already in old_manifest")
+ add = False
+ elif filename == fr.filename:
+ log.error(
+ "manifest already contains a different file named %s" % filename
+ )
+ add = False
+ if add:
+ new_manifest.file_records.append(new_fr)
+ log.debug("added '%s' to manifest" % filename)
+ else:
+ all_files_added = False
+ # copy any files in the old manifest that aren't in the new one
+ new_filenames = set(fr.filename for fr in new_manifest.file_records)
+ for old_fr in old_manifest.file_records:
+ if old_fr.filename not in new_filenames:
+ new_manifest.file_records.append(old_fr)
+ if PY3:
+ with open(manifest_file, mode="w") as output:
+ new_manifest.dump(output, fmt="json")
+ else:
+ with open(manifest_file, mode="wb") as output:
+ new_manifest.dump(output, fmt="json")
+ return all_files_added
+
+
+def touch(f):
+ """Used to modify mtime in cached files;
+ mtime is used by the purge command"""
+ try:
+ os.utime(f, None)
+ except OSError:
+ log.warn("impossible to update utime of file %s" % f)
+
+
+@contextmanager
+@retriable(sleeptime=2)
+def request(url, auth_file=None):
+ req = Request(url)
+ _authorize(req, auth_file)
+ with closing(urllib2.urlopen(req)) as f:
+ log.debug("opened %s for reading" % url)
+ yield f
+
+
+def fetch_file(base_urls, file_record, grabchunk=1024 * 4, auth_file=None, region=None):
+ # A file which is requested to be fetched that exists locally will be
+ # overwritten by this function
+ fd, temp_path = tempfile.mkstemp(dir=os.getcwd())
+ os.close(fd)
+ fetched_path = None
+ for base_url in base_urls:
+ # Generate the URL for the file on the server side
+ url = urljoin(base_url, "%s/%s" % (file_record.algorithm, file_record.digest))
+ if region is not None:
+ url += "?region=" + region
+
+ log.info("Attempting to fetch from '%s'..." % base_url)
+
+ # Well, the file doesn't exist locally. Let's fetch it.
+ try:
+ with request(url, auth_file) as f, open(temp_path, mode="wb") as out:
+ k = True
+ size = 0
+ while k:
+ # TODO: print statistics as file transfers happen both for info and to stop
+ # buildbot timeouts
+ indata = f.read(grabchunk)
+ out.write(indata)
+ size += len(indata)
+ if len(indata) == 0:
+ k = False
+ log.info(
+ "File %s fetched from %s as %s"
+ % (file_record.filename, base_url, temp_path)
+ )
+ fetched_path = temp_path
+ break
+ except (URLError, HTTPError, ValueError):
+ log.info(
+ "...failed to fetch '%s' from %s" % (file_record.filename, base_url),
+ exc_info=True,
+ )
+ except IOError: # pragma: no cover
+ log.info(
+ "failed to write to temporary file for '%s'" % file_record.filename,
+ exc_info=True,
+ )
+
+ # cleanup temp file in case of issues
+ if fetched_path:
+ return os.path.split(fetched_path)[1]
+ else:
+ try:
+ os.remove(temp_path)
+ except OSError: # pragma: no cover
+ pass
+ return None
+
+
+def clean_path(dirname):
+ """Remove a subtree if is exists. Helper for unpack_file()."""
+ if os.path.exists(dirname):
+ log.info("rm tree: %s" % dirname)
+ shutil.rmtree(dirname)
+
+
+CHECKSUM_SUFFIX = ".checksum"
+
+
+def unpack_file(filename):
+ """Untar `filename`, assuming it is uncompressed or compressed with bzip2,
+ xz, gzip, or unzip a zip file. The file is assumed to contain a single
+ directory with a name matching the base of the given filename.
+ Xz support is handled by shelling out to 'tar'."""
+ if os.path.isfile(filename) and tarfile.is_tarfile(filename):
+ tar_file, zip_ext = os.path.splitext(filename)
+ base_file, tar_ext = os.path.splitext(tar_file)
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ tar = tarfile.open(filename)
+ tar.extractall()
+ tar.close()
+ elif os.path.isfile(filename) and filename.endswith(".tar.xz"):
+ base_file = filename.replace(".tar.xz", "")
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ # Not using tar -Jxf because it fails on Windows for some reason.
+ process = Popen(["xz", "-d", "-c", filename], stdout=PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode != 0:
+ return False
+ fileobj = BytesIO()
+ fileobj.write(stdout)
+ fileobj.seek(0)
+ tar = tarfile.open(fileobj=fileobj, mode="r|")
+ tar.extractall()
+ tar.close()
+ elif os.path.isfile(filename) and zipfile.is_zipfile(filename):
+ base_file = filename.replace(".zip", "")
+ clean_path(base_file)
+ log.info('unzipping "%s"' % filename)
+ z = zipfile.ZipFile(filename)
+ z.extractall()
+ z.close()
+ else:
+ log.error("Unknown archive extension for filename '%s'" % filename)
+ return False
+ return True
+
+
+def fetch_files(
+ manifest_file,
+ base_urls,
+ filenames=[],
+ cache_folder=None,
+ auth_file=None,
+ region=None,
+):
+ # Lets load the manifest file
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error(
+ "failed to load manifest file at '%s': %s"
+ % (
+ manifest_file,
+ str(e),
+ )
+ )
+ return False
+
+ # we want to track files already in current working directory AND valid
+ # we will not need to fetch these
+ present_files = []
+
+ # We want to track files that fail to be fetched as well as
+ # files that are fetched
+ failed_files = []
+ fetched_files = []
+
+ # Files that we want to unpack.
+ unpack_files = []
+
+ # Lets go through the manifest and fetch the files that we want
+ for f in manifest.file_records:
+ # case 1: files are already present
+ if f.present():
+ if f.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # we have an invalid file here, better to cleanup!
+ # this invalid file needs to be replaced with a good one
+ # from the local cash or fetched from a tooltool server
+ log.info(
+ "File %s is present locally but it is invalid, so I will remove it "
+ "and try to fetch it" % f.filename
+ )
+ os.remove(os.path.join(os.getcwd(), f.filename))
+
+ # check if file is already in cache
+ if cache_folder and f.filename not in present_files:
+ try:
+ shutil.copy(
+ os.path.join(cache_folder, f.digest),
+ os.path.join(os.getcwd(), f.filename),
+ )
+ log.info(
+ "File %s retrieved from local cache %s" % (f.filename, cache_folder)
+ )
+ touch(os.path.join(cache_folder, f.digest))
+
+ filerecord_for_validation = FileRecord(
+ f.filename, f.size, f.digest, f.algorithm
+ )
+ if filerecord_for_validation.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # the file copied from the cache is invalid, better to
+ # clean up the cache version itself as well
+ log.warn(
+ "File %s retrieved from cache is invalid! I am deleting it from the "
+ "cache as well" % f.filename
+ )
+ os.remove(os.path.join(os.getcwd(), f.filename))
+ os.remove(os.path.join(cache_folder, f.digest))
+ except IOError:
+ log.info(
+ "File %s not present in local cache folder %s"
+ % (f.filename, cache_folder)
+ )
+
+ # now I will try to fetch all files which are not already present and
+ # valid, appending a suffix to avoid race conditions
+ temp_file_name = None
+ # 'filenames' is the list of filenames to be managed, if this variable
+ # is a non empty list it can be used to filter if filename is in
+ # present_files, it means that I have it already because it was already
+ # either in the working dir or in the cache
+ if (
+ f.filename in filenames or len(filenames) == 0
+ ) and f.filename not in present_files:
+ log.debug("fetching %s" % f.filename)
+ temp_file_name = fetch_file(
+ base_urls, f, auth_file=auth_file, region=region
+ )
+ if temp_file_name:
+ fetched_files.append((f, temp_file_name))
+ else:
+ failed_files.append(f.filename)
+ else:
+ log.debug("skipping %s" % f.filename)
+
+ # lets ensure that fetched files match what the manifest specified
+ for localfile, temp_file_name in fetched_files:
+ # since I downloaded to a temp file, I need to perform all validations on the temp file
+ # this is why filerecord_for_validation is created
+
+ filerecord_for_validation = FileRecord(
+ temp_file_name, localfile.size, localfile.digest, localfile.algorithm
+ )
+
+ if filerecord_for_validation.validate():
+ # great!
+ # I can rename the temp file
+ log.info(
+ "File integrity verified, renaming %s to %s"
+ % (temp_file_name, localfile.filename)
+ )
+ os.rename(
+ os.path.join(os.getcwd(), temp_file_name),
+ os.path.join(os.getcwd(), localfile.filename),
+ )
+
+ if localfile.unpack:
+ unpack_files.append(localfile.filename)
+
+ # if I am using a cache and a new file has just been retrieved from a
+ # remote location, I need to update the cache as well
+ if cache_folder:
+ log.info("Updating local cache %s..." % cache_folder)
+ try:
+ if not os.path.exists(cache_folder):
+ log.info("Creating cache in %s..." % cache_folder)
+ os.makedirs(cache_folder, 0o0700)
+ shutil.copy(
+ os.path.join(os.getcwd(), localfile.filename),
+ os.path.join(cache_folder, localfile.digest),
+ )
+ log.info(
+ "Local cache %s updated with %s"
+ % (cache_folder, localfile.filename)
+ )
+ touch(os.path.join(cache_folder, localfile.digest))
+ except (OSError, IOError):
+ log.warning(
+ "Impossible to add file %s to cache folder %s"
+ % (localfile.filename, cache_folder),
+ exc_info=True,
+ )
+ else:
+ failed_files.append(localfile.filename)
+ log.error("'%s'" % filerecord_for_validation.describe())
+ os.remove(temp_file_name)
+
+ # Unpack files that need to be unpacked.
+ for filename in unpack_files:
+ if not unpack_file(filename):
+ failed_files.append(filename)
+
+ # If we failed to fetch or validate a file, we need to fail
+ if len(failed_files) > 0:
+ log.error("The following files failed: '%s'" % "', ".join(failed_files))
+ return False
+ return True
+
+
+def freespace(p):
+ "Returns the number of bytes free under directory `p`"
+ if sys.platform == "win32": # pragma: no cover
+ # os.statvfs doesn't work on Windows
+ import win32file
+
+ secsPerClus, bytesPerSec, nFreeClus, totClus = win32file.GetDiskFreeSpace(p)
+ return secsPerClus * bytesPerSec * nFreeClus
+ else:
+ r = os.statvfs(p)
+ return r.f_frsize * r.f_bavail
+
+
+def purge(folder, gigs):
+ """If gigs is non 0, it deletes files in `folder` until `gigs` GB are free,
+ starting from older files. If gigs is 0, a full purge will be performed.
+ No recursive deletion of files in subfolder is performed."""
+
+ full_purge = bool(gigs == 0)
+ gigs *= 1024 * 1024 * 1024
+
+ if not full_purge and freespace(folder) >= gigs:
+ log.info("No need to cleanup")
+ return
+
+ files = []
+ for f in os.listdir(folder):
+ p = os.path.join(folder, f)
+ # it deletes files in folder without going into subfolders,
+ # assuming the cache has a flat structure
+ if not os.path.isfile(p):
+ continue
+ mtime = os.path.getmtime(p)
+ files.append((mtime, p))
+
+ # iterate files sorted by mtime
+ for _, f in sorted(files):
+ log.info("removing %s to free up space" % f)
+ try:
+ os.remove(f)
+ except OSError:
+ log.info("Impossible to remove %s" % f, exc_info=True)
+ if not full_purge and freespace(folder) >= gigs:
+ break
+
+
+def _log_api_error(e):
+ if hasattr(e, "hdrs") and e.hdrs["content-type"] == "application/json":
+ json_resp = json.load(e.fp)
+ log.error(
+ "%s: %s" % (json_resp["error"]["name"], json_resp["error"]["description"])
+ )
+ else:
+ log.exception("Error making RelengAPI request:")
+
+
+def _authorize(req, auth_file):
+ if not auth_file:
+ return
+
+ is_taskcluster_auth = False
+ with open(auth_file) as f:
+ auth_file_content = f.read().strip()
+ try:
+ auth_file_content = json.loads(auth_file_content)
+ is_taskcluster_auth = True
+ except Exception:
+ pass
+
+ if is_taskcluster_auth:
+ taskcluster_header = make_taskcluster_header(auth_file_content, req)
+ log.debug("Using taskcluster credentials in %s" % auth_file)
+ req.add_unredirected_header("Authorization", taskcluster_header)
+ else:
+ log.debug("Using Bearer token in %s" % auth_file)
+ req.add_unredirected_header("Authorization", "Bearer %s" % auth_file_content)
+
+
+def _send_batch(base_url, auth_file, batch, region):
+ url = urljoin(base_url, "upload")
+ if region is not None:
+ url += "?region=" + region
+ data = json.dumps(batch)
+ if PY3:
+ data = data.encode("utf-8")
+ req = Request(url, data, {"Content-Type": "application/json"})
+ _authorize(req, auth_file)
+ try:
+ resp = urllib2.urlopen(req)
+ except (URLError, HTTPError) as e:
+ _log_api_error(e)
+ return None
+ return json.load(resp)["result"]
+
+
+def _s3_upload(filename, file):
+ # urllib2 does not support streaming, so we fall back to good old httplib
+ url = urlparse(file["put_url"])
+ cls = HTTPSConnection if url.scheme == "https" else HTTPConnection
+ host, port = url.netloc.split(":") if ":" in url.netloc else (url.netloc, 443)
+ port = int(port)
+ conn = cls(host, port)
+ try:
+ req_path = "%s?%s" % (url.path, url.query) if url.query else url.path
+ with open(filename, "rb") as f:
+ content = f.read()
+ content_length = len(content)
+ f.seek(0)
+ conn.request(
+ "PUT",
+ req_path,
+ f,
+ {
+ "Content-Type": "application/octet-stream",
+ "Content-Length": str(content_length),
+ },
+ )
+ resp = conn.getresponse()
+ resp_body = resp.read()
+ conn.close()
+ if resp.status != 200:
+ raise RuntimeError(
+ "Non-200 return from AWS: %s %s\n%s"
+ % (resp.status, resp.reason, resp_body)
+ )
+ except Exception:
+ file["upload_exception"] = sys.exc_info()
+ file["upload_ok"] = False
+ else:
+ file["upload_ok"] = True
+
+
+def _notify_upload_complete(base_url, auth_file, file):
+ req = Request(urljoin(base_url, "upload/complete/%(algorithm)s/%(digest)s" % file))
+ _authorize(req, auth_file)
+ try:
+ urllib2.urlopen(req)
+ except HTTPError as e:
+ if e.code != 409:
+ _log_api_error(e)
+ return
+ # 409 indicates that the upload URL hasn't expired yet and we
+ # should retry after a delay
+ to_wait = int(e.headers.get("X-Retry-After", 60))
+ log.warning("Waiting %d seconds for upload URLs to expire" % to_wait)
+ time.sleep(to_wait)
+ _notify_upload_complete(base_url, auth_file, file)
+ except Exception:
+ log.exception("While notifying server of upload completion:")
+
+
+def upload(manifest, message, base_urls, auth_file, region):
+ try:
+ manifest = open_manifest(manifest)
+ except InvalidManifest:
+ log.exception("failed to load manifest file at '%s'")
+ return False
+
+ # verify the manifest, since we'll need the files present to upload
+ if not manifest.validate():
+ log.error("manifest is invalid")
+ return False
+
+ if any(fr.visibility is None for fr in manifest.file_records):
+ log.error("All files in a manifest for upload must have a visibility set")
+
+ # convert the manifest to an upload batch
+ batch = {
+ "message": message,
+ "files": {},
+ }
+ for fr in manifest.file_records:
+ batch["files"][fr.filename] = {
+ "size": fr.size,
+ "digest": fr.digest,
+ "algorithm": fr.algorithm,
+ "visibility": fr.visibility,
+ }
+
+ # make the upload request
+ resp = _send_batch(base_urls[0], auth_file, batch, region)
+ if not resp:
+ return None
+ files = resp["files"]
+
+ # Upload the files, each in a thread. This allows us to start all of the
+ # uploads before any of the URLs expire.
+ threads = {}
+ for filename, file in files.items():
+ if "put_url" in file:
+ log.info("%s: starting upload" % (filename,))
+ thd = threading.Thread(target=_s3_upload, args=(filename, file))
+ thd.daemon = 1
+ thd.start()
+ threads[filename] = thd
+ else:
+ log.info("%s: already exists on server" % (filename,))
+
+ # re-join all of those threads as they exit
+ success = True
+ while threads:
+ for filename, thread in list(threads.items()):
+ if not thread.is_alive():
+ # _s3_upload has annotated file with result information
+ file = files[filename]
+ thread.join()
+ if file["upload_ok"]:
+ log.info("%s: uploaded" % filename)
+ else:
+ log.error(
+ "%s: failed" % filename, exc_info=file["upload_exception"]
+ )
+ success = False
+ del threads[filename]
+
+ # notify the server that the uploads are completed. If the notification
+ # fails, we don't consider that an error (the server will notice
+ # eventually)
+ for filename, file in files.items():
+ if "put_url" in file and file["upload_ok"]:
+ log.info("notifying server of upload completion for %s" % (filename,))
+ _notify_upload_complete(base_urls[0], auth_file, file)
+
+ return success
+
+
+def send_operation_on_file(data, base_urls, digest, auth_file):
+ url = base_urls[0]
+ url = urljoin(url, "file/sha512/" + digest)
+
+ data = json.dumps(data)
+
+ req = Request(url, data, {"Content-Type": "application/json"})
+ req.get_method = lambda: "PATCH"
+
+ _authorize(req, auth_file)
+
+ try:
+ urllib2.urlopen(req)
+ except (URLError, HTTPError) as e:
+ _log_api_error(e)
+ return False
+ return True
+
+
+def change_visibility(base_urls, digest, visibility, auth_file):
+ data = [
+ {
+ "op": "set_visibility",
+ "visibility": visibility,
+ }
+ ]
+ return send_operation_on_file(data, base_urls, digest, visibility, auth_file)
+
+
+def delete_instances(base_urls, digest, auth_file):
+ data = [
+ {
+ "op": "delete_instances",
+ }
+ ]
+ return send_operation_on_file(data, base_urls, digest, auth_file)
+
+
+def process_command(options, args):
+ """I know how to take a list of program arguments and
+ start doing the right thing with them"""
+ cmd = args[0]
+ cmd_args = args[1:]
+ log.debug("processing '%s' command with args '%s'" % (cmd, '", "'.join(cmd_args)))
+ log.debug("using options: %s" % options)
+
+ if cmd == "list":
+ return list_manifest(options["manifest"])
+ if cmd == "validate":
+ return validate_manifest(options["manifest"])
+ elif cmd == "add":
+ return add_files(
+ options["manifest"],
+ options["algorithm"],
+ cmd_args,
+ options["version"],
+ options["visibility"],
+ options["unpack"],
+ )
+ elif cmd == "purge":
+ if options["cache_folder"]:
+ purge(folder=options["cache_folder"], gigs=options["size"])
+ else:
+ log.critical("please specify the cache folder to be purged")
+ return False
+ elif cmd == "fetch":
+ return fetch_files(
+ options["manifest"],
+ options["base_url"],
+ cmd_args,
+ cache_folder=options["cache_folder"],
+ auth_file=options.get("auth_file"),
+ region=options.get("region"),
+ )
+ elif cmd == "upload":
+ if not options.get("message"):
+ log.critical("upload command requires a message")
+ return False
+ return upload(
+ options.get("manifest"),
+ options.get("message"),
+ options.get("base_url"),
+ options.get("auth_file"),
+ options.get("region"),
+ )
+ elif cmd == "change-visibility":
+ if not options.get("digest"):
+ log.critical("change-visibility command requires a digest option")
+ return False
+ if not options.get("visibility"):
+ log.critical("change-visibility command requires a visibility option")
+ return False
+ return change_visibility(
+ options.get("base_url"),
+ options.get("digest"),
+ options.get("visibility"),
+ options.get("auth_file"),
+ )
+ elif cmd == "delete":
+ if not options.get("digest"):
+ log.critical("change-visibility command requires a digest option")
+ return False
+ return delete_instances(
+ options.get("base_url"),
+ options.get("digest"),
+ options.get("auth_file"),
+ )
+ else:
+ log.critical('command "%s" is not implemented' % cmd)
+ return False
+
+
+def main(argv, _skip_logging=False):
+ # Set up option parsing
+ parser = optparse.OptionParser()
+ parser.add_option(
+ "-q",
+ "--quiet",
+ default=logging.INFO,
+ dest="loglevel",
+ action="store_const",
+ const=logging.ERROR,
+ )
+ parser.add_option(
+ "-v", "--verbose", dest="loglevel", action="store_const", const=logging.DEBUG
+ )
+ parser.add_option(
+ "-m",
+ "--manifest",
+ default=DEFAULT_MANIFEST_NAME,
+ dest="manifest",
+ action="store",
+ help="specify the manifest file to be operated on",
+ )
+ parser.add_option(
+ "-d",
+ "--algorithm",
+ default="sha512",
+ dest="algorithm",
+ action="store",
+ help="hashing algorithm to use (only sha512 is allowed)",
+ )
+ parser.add_option(
+ "--digest",
+ default=None,
+ dest="digest",
+ action="store",
+ help="digest hash to change visibility for",
+ )
+ parser.add_option(
+ "--visibility",
+ default=None,
+ dest="visibility",
+ choices=["internal", "public"],
+ help='Visibility level of this file; "internal" is for '
+ "files that cannot be distributed out of the company "
+ 'but not for secrets; "public" files are available to '
+ "anyone without restriction",
+ )
+ parser.add_option(
+ "--unpack",
+ default=False,
+ dest="unpack",
+ action="store_true",
+ help="Request unpacking this file after fetch."
+ " This is helpful with tarballs.",
+ )
+ parser.add_option(
+ "--version",
+ default=None,
+ dest="version",
+ action="store",
+ help="Version string for this file. This annotates the "
+ "manifest entry with a version string to help "
+ "identify the contents.",
+ )
+ parser.add_option(
+ "-o",
+ "--overwrite",
+ default=False,
+ dest="overwrite",
+ action="store_true",
+ help="UNUSED; present for backward compatibility",
+ )
+ parser.add_option(
+ "--url",
+ dest="base_url",
+ action="append",
+ help="RelengAPI URL ending with /tooltool/; default "
+ "is appropriate for Mozilla",
+ )
+ parser.add_option(
+ "-c", "--cache-folder", dest="cache_folder", help="Local cache folder"
+ )
+ parser.add_option(
+ "-s",
+ "--size",
+ help="free space required (in GB)",
+ dest="size",
+ type="float",
+ default=0.0,
+ )
+ parser.add_option(
+ "-r",
+ "--region",
+ help="Preferred AWS region for upload or fetch; " "example: --region=us-west-2",
+ )
+ parser.add_option(
+ "--message",
+ help='The "commit message" for an upload; format with a bug number '
+ "and brief comment",
+ dest="message",
+ )
+ parser.add_option(
+ "--authentication-file",
+ help="Use the RelengAPI token found in the given file to "
+ "authenticate to the RelengAPI server.",
+ dest="auth_file",
+ )
+
+ (options_obj, args) = parser.parse_args(argv[1:])
+
+ if not options_obj.base_url:
+ tooltool_host = os.environ.get("TOOLTOOL_HOST", "tooltool.mozilla-releng.net")
+ taskcluster_proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")
+ if taskcluster_proxy_url:
+ tooltool_url = "{}/{}".format(taskcluster_proxy_url, tooltool_host)
+ else:
+ tooltool_url = "https://{}".format(tooltool_host)
+
+ options_obj.base_url = [tooltool_url]
+
+ # ensure all URLs have a trailing slash
+ def add_slash(url):
+ return url if url.endswith("/") else (url + "/")
+
+ options_obj.base_url = [add_slash(u) for u in options_obj.base_url]
+
+ # expand ~ in --authentication-file
+ if options_obj.auth_file:
+ options_obj.auth_file = os.path.expanduser(options_obj.auth_file)
+
+ # Dictionaries are easier to work with
+ options = vars(options_obj)
+
+ log.setLevel(options["loglevel"])
+
+ # Set up logging, for now just to the console
+ if not _skip_logging: # pragma: no cover
+ ch = logging.StreamHandler()
+ cf = logging.Formatter("%(levelname)s - %(message)s")
+ ch.setFormatter(cf)
+ log.addHandler(ch)
+
+ if options["algorithm"] != "sha512":
+ parser.error("only --algorithm sha512 is supported")
+
+ if len(args) < 1:
+ parser.error("You must specify a command")
+
+ return 0 if process_command(options, args) else 1
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(main(sys.argv))
diff --git a/testing/mozharness/mach_commands.py b/testing/mozharness/mach_commands.py
new file mode 100644
index 0000000000..dc82ff3d5d
--- /dev/null
+++ b/testing/mozharness/mach_commands.py
@@ -0,0 +1,226 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+
+import mozinfo
+from six.moves.urllib.parse import urljoin
+from six.moves.urllib.request import pathname2url
+
+from mach.decorators import (
+ CommandArgument,
+ Command,
+)
+
+from mozbuild.base import MozbuildObject
+from mozbuild.base import MachCommandConditions as conditions
+from argparse import ArgumentParser
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "suite_name",
+ nargs=1,
+ type=str,
+ action="store",
+ help="Suite to run in mozharness",
+ )
+ parser.add_argument(
+ "mozharness_args",
+ nargs=argparse.REMAINDER,
+ help="Extra arguments to pass to mozharness",
+ )
+ return parser
+
+
+class MozharnessRunner(MozbuildObject):
+ def __init__(self, *args, **kwargs):
+ MozbuildObject.__init__(self, *args, **kwargs)
+
+ self.test_packages_url = self._test_packages_url()
+ self.installer_url = self._installer_url()
+
+ desktop_unittest_config = [
+ "--config-file",
+ lambda: self.config_path(
+ "unittests", "%s_unittest.py" % mozinfo.info["os"]
+ ),
+ "--config-file",
+ lambda: self.config_path("developer_config.py"),
+ ]
+
+ self.config = {
+ "__defaults__": {
+ "config": [
+ "--download-symbols",
+ "ondemand",
+ "--installer-url",
+ self.installer_url,
+ "--test-packages-url",
+ self.test_packages_url,
+ ]
+ },
+ "mochitest-valgrind": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--mochitest-suite", "valgrind-plain"],
+ },
+ "mochitest": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--mochitest-suite", "plain"],
+ },
+ "mochitest-chrome": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--mochitest-suite", "chrome"],
+ },
+ "mochitest-browser-chrome": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--mochitest-suite", "browser-chrome"],
+ },
+ "mochitest-browser-a11y": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--mochitest-suite", "mochitest-browser-a11y"],
+ },
+ "mochitest-browser-media": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--mochitest-suite", "mochitest-browser-media"],
+ },
+ "mochitest-devtools-chrome": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--mochitest-suite", "mochitest-devtools-chrome"],
+ },
+ "mochitest-remote": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--mochitest-suite", "mochitest-remote"],
+ },
+ "crashtest": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--reftest-suite", "crashtest"],
+ },
+ "jsreftest": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--reftest-suite", "jsreftest"],
+ },
+ "reftest": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--reftest-suite", "reftest"],
+ },
+ "reftest-no-accel": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--reftest-suite", "reftest-no-accel"],
+ },
+ "cppunittest": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--cppunittest-suite", "cppunittest"],
+ },
+ "xpcshell": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--xpcshell-suite", "xpcshell"],
+ },
+ "xpcshell-addons": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config
+ + ["--xpcshell-suite", "xpcshell-addons"],
+ },
+ "jittest": {
+ "script": "desktop_unittest.py",
+ "config": desktop_unittest_config + ["--jittest-suite", "jittest"],
+ },
+ "marionette": {
+ "script": "marionette.py",
+ "config": [
+ "--config-file",
+ self.config_path("marionette", "test_config.py"),
+ ],
+ },
+ "web-platform-tests": {
+ "script": "web_platform_tests.py",
+ "config": [
+ "--config-file",
+ self.config_path("web_platform_tests", self.wpt_config),
+ ],
+ },
+ }
+
+ def path_to_url(self, path):
+ return urljoin("file:", pathname2url(path))
+
+ def _installer_url(self):
+ package_re = {
+ "linux": re.compile("^firefox-\d+\..+\.tar\.bz2$"),
+ "win": re.compile("^firefox-\d+\..+\.installer\.exe$"),
+ "mac": re.compile("^firefox-\d+\..+\.mac(?:64)?\.dmg$"),
+ }[mozinfo.info["os"]]
+ dist_path = os.path.join(self.topobjdir, "dist")
+ filenames = [item for item in os.listdir(dist_path) if package_re.match(item)]
+ assert len(filenames) == 1
+ return self.path_to_url(os.path.join(dist_path, filenames[0]))
+
+ def _test_packages_url(self):
+ dist_path = os.path.join(self.topobjdir, "dist")
+ filenames = [
+ item
+ for item in os.listdir(dist_path)
+ if item.endswith("test_packages.json")
+ ]
+ assert len(filenames) == 1
+ return self.path_to_url(os.path.join(dist_path, filenames[0]))
+
+ def config_path(self, *parts):
+ return self.path_to_url(
+ os.path.join(self.topsrcdir, "testing", "mozharness", "configs", *parts)
+ )
+
+ @property
+ def wpt_config(self):
+ return (
+ "test_config.py"
+ if mozinfo.info["os"] != "win"
+ else "test_config_windows.py"
+ )
+
+ def run_suite(self, suite, **kwargs):
+ default_config = self.config.get("__defaults__")
+ suite_config = self.config.get(suite)
+
+ if suite_config is None:
+ print("Unknown suite %s" % suite)
+ return 1
+
+ script = os.path.join(
+ self.topsrcdir, "testing", "mozharness", "scripts", suite_config["script"]
+ )
+ options = [
+ item() if callable(item) else item
+ for item in default_config["config"] + suite_config["config"]
+ ]
+
+ cmd = [script] + options
+
+ rv = subprocess.call(cmd, cwd=os.path.dirname(script))
+ return rv
+
+
+@Command(
+ "mozharness",
+ category="testing",
+ description="Run tests using mozharness.",
+ conditions=[conditions.is_firefox_or_android],
+ parser=get_parser,
+)
+def mozharness(command_context, **kwargs):
+ runner = command_context._spawn(MozharnessRunner)
+ return runner.run_suite(kwargs.pop("suite_name")[0], **kwargs)
diff --git a/testing/mozharness/moz.build b/testing/mozharness/moz.build
new file mode 100644
index 0000000000..5bc4cdad9b
--- /dev/null
+++ b/testing/mozharness/moz.build
@@ -0,0 +1,8 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Release Engineering", "Applications: MozharnessCore")
diff --git a/testing/mozharness/mozharness/__init__.py b/testing/mozharness/mozharness/__init__.py
new file mode 100644
index 0000000000..ab191837df
--- /dev/null
+++ b/testing/mozharness/mozharness/__init__.py
@@ -0,0 +1,6 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+version = (0, 7)
+version_string = ".".join(["%d" % i for i in version])
diff --git a/testing/mozharness/mozharness/base/__init__.py b/testing/mozharness/mozharness/base/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/base/__init__.py
diff --git a/testing/mozharness/mozharness/base/config.py b/testing/mozharness/mozharness/base/config.py
new file mode 100644
index 0000000000..d12b3aecad
--- /dev/null
+++ b/testing/mozharness/mozharness/base/config.py
@@ -0,0 +1,693 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic config parsing and dumping, the way I remember it from scripts
+gone by.
+
+The config should be built from script-level defaults, overlaid by
+config-file defaults, overlaid by command line options.
+
+ (For buildbot-analogues that would be factory-level defaults,
+ builder-level defaults, and build request/scheduler settings.)
+
+The config should then be locked (set to read-only, to prevent runtime
+alterations). Afterwards we should dump the config to a file that is
+uploaded with the build, and can be used to debug or replicate the build
+at a later time.
+
+TODO:
+
+* check_required_settings or something -- run at init, assert that
+ these settings are set.
+"""
+
+import os
+import socket
+import sys
+import time
+from copy import deepcopy
+from optparse import Option, OptionGroup, OptionParser
+
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, FATAL, INFO, WARNING
+
+try:
+ from urllib2 import URLError, urlopen
+except ImportError:
+ from urllib.error import URLError
+ from urllib.request import urlopen
+
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+
+# optparse {{{1
+class ExtendedOptionParser(OptionParser):
+ """OptionParser, but with ExtendOption as the option_class."""
+
+ def __init__(self, **kwargs):
+ kwargs["option_class"] = ExtendOption
+ OptionParser.__init__(self, **kwargs)
+
+
+class ExtendOption(Option):
+ """from http://docs.python.org/library/optparse.html?highlight=optparse#adding-new-actions"""
+
+ ACTIONS = Option.ACTIONS + ("extend",)
+ STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
+ TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
+ ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
+
+ def take_action(self, action, dest, opt, value, values, parser):
+ if action == "extend":
+ lvalue = value.split(",")
+ values.ensure_value(dest, []).extend(lvalue)
+ else:
+ Option.take_action(self, action, dest, opt, value, values, parser)
+
+
+def make_immutable(item):
+ if isinstance(item, list) or isinstance(item, tuple):
+ result = LockedTuple(item)
+ elif isinstance(item, dict):
+ result = ReadOnlyDict(item)
+ result.lock()
+ else:
+ result = item
+ return result
+
+
+class LockedTuple(tuple):
+ def __new__(cls, items):
+ return tuple.__new__(cls, (make_immutable(x) for x in items))
+
+ def __deepcopy__(self, memo):
+ return [deepcopy(elem, memo) for elem in self]
+
+
+# ReadOnlyDict {{{1
+class ReadOnlyDict(dict):
+ def __init__(self, dictionary):
+ self._lock = False
+ self.update(dictionary.copy())
+
+ def _check_lock(self):
+ assert not self._lock, "ReadOnlyDict is locked!"
+
+ def lock(self):
+ for (k, v) in list(self.items()):
+ self[k] = make_immutable(v)
+ self._lock = True
+
+ def __setitem__(self, *args):
+ self._check_lock()
+ return dict.__setitem__(self, *args)
+
+ def __delitem__(self, *args):
+ self._check_lock()
+ return dict.__delitem__(self, *args)
+
+ def clear(self, *args):
+ self._check_lock()
+ return dict.clear(self, *args)
+
+ def pop(self, *args):
+ self._check_lock()
+ return dict.pop(self, *args)
+
+ def popitem(self, *args):
+ self._check_lock()
+ return dict.popitem(self, *args)
+
+ def setdefault(self, *args):
+ self._check_lock()
+ return dict.setdefault(self, *args)
+
+ def update(self, *args):
+ self._check_lock()
+ dict.update(self, *args)
+
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ result = cls.__new__(cls)
+ memo[id(self)] = result
+ for k, v in list(self.__dict__.items()):
+ setattr(result, k, deepcopy(v, memo))
+ result._lock = False
+ for k, v in list(self.items()):
+ result[k] = deepcopy(v, memo)
+ return result
+
+
+DEFAULT_CONFIG_PATH = os.path.join(
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ "configs",
+)
+
+
+# parse_config_file {{{1
+def parse_config_file(
+ file_name, quiet=False, search_path=None, config_dict_name="config"
+):
+ """Read a config file and return a dictionary."""
+ file_path = None
+ if os.path.exists(file_name):
+ file_path = file_name
+ else:
+ if not search_path:
+ search_path = [".", DEFAULT_CONFIG_PATH]
+ for path in search_path:
+ if os.path.exists(os.path.join(path, file_name)):
+ file_path = os.path.join(path, file_name)
+ break
+ else:
+ raise IOError("Can't find %s in %s!" % (file_name, search_path))
+ if file_name.endswith(".py"):
+ global_dict = {}
+ local_dict = {}
+ exec(
+ compile(open(file_path, "rb").read(), file_path, "exec"),
+ global_dict,
+ local_dict,
+ )
+ config = local_dict[config_dict_name]
+ elif file_name.endswith(".json"):
+ fh = open(file_path)
+ config = {}
+ json_config = json.load(fh)
+ config = dict(json_config)
+ fh.close()
+ else:
+ raise RuntimeError(
+ "Unknown config file type %s! (config files must end in .json or .py)"
+ % file_name
+ )
+ # TODO return file_path
+ return config
+
+
+def download_config_file(url, file_name):
+ n = 0
+ attempts = 5
+ sleeptime = 60
+ max_sleeptime = 5 * 60
+ while True:
+ if n >= attempts:
+ print(
+ "Failed to download from url %s after %d attempts, quiting..."
+ % (url, attempts)
+ )
+ raise SystemError(-1)
+ try:
+ contents = urlopen(url, timeout=30).read()
+ break
+ except URLError as e:
+ print("Error downloading from url %s: %s" % (url, str(e)))
+ except socket.timeout as e:
+ print("Time out accessing %s: %s" % (url, str(e)))
+ except socket.error as e:
+ print("Socket error when accessing %s: %s" % (url, str(e)))
+ print("Sleeping %d seconds before retrying" % sleeptime)
+ time.sleep(sleeptime)
+ sleeptime = sleeptime * 2
+ if sleeptime > max_sleeptime:
+ sleeptime = max_sleeptime
+ n += 1
+
+ try:
+ f = open(file_name, "w")
+ f.write(contents)
+ f.close()
+ except IOError as e:
+ print("Error writing downloaded contents to file %s: %s" % (file_name, str(e)))
+ raise SystemError(-1)
+
+
+# BaseConfig {{{1
+class BaseConfig(object):
+ """Basic config setting/getting."""
+
+ def __init__(
+ self,
+ config=None,
+ initial_config_file=None,
+ config_options=None,
+ all_actions=None,
+ default_actions=None,
+ volatile_config=None,
+ option_args=None,
+ require_config_file=False,
+ append_env_variables_from_configs=False,
+ usage="usage: %prog [options]",
+ ):
+ self._config = {}
+ self.all_cfg_files_and_dicts = []
+ self.actions = []
+ self.config_lock = False
+ self.require_config_file = require_config_file
+ # It allows to append env variables from multiple config files
+ self.append_env_variables_from_configs = append_env_variables_from_configs
+
+ if all_actions:
+ self.all_actions = all_actions[:]
+ else:
+ self.all_actions = ["clobber", "build"]
+ if default_actions:
+ self.default_actions = default_actions[:]
+ else:
+ self.default_actions = self.all_actions[:]
+ if volatile_config is None:
+ self.volatile_config = {
+ "actions": None,
+ "add_actions": None,
+ "no_actions": None,
+ }
+ else:
+ self.volatile_config = deepcopy(volatile_config)
+
+ if config:
+ self.set_config(config)
+ if initial_config_file:
+ initial_config = parse_config_file(initial_config_file)
+ self.all_cfg_files_and_dicts.append((initial_config_file, initial_config))
+ self.set_config(initial_config)
+ # Since initial_config_file is only set when running unit tests,
+ # if no option_args have been specified, then the parser will
+ # parse sys.argv which in this case would be the command line
+ # options specified to run the tests, e.g. nosetests -v. Clearly,
+ # the options passed to nosetests (such as -v) should not be
+ # interpreted by mozharness as mozharness options, so we specify
+ # a dummy command line with no options, so that the parser does
+ # not add anything from the test invocation command line
+ # arguments to the mozharness options.
+ if option_args is None:
+ option_args = [
+ "dummy_mozharness_script_with_no_command_line_options.py"
+ ]
+ if config_options is None:
+ config_options = []
+ self._create_config_parser(config_options, usage)
+ # we allow manually passing of option args for things like nosetests
+ self.parse_args(args=option_args)
+
+ def get_read_only_config(self):
+ return ReadOnlyDict(self._config)
+
+ def _create_config_parser(self, config_options, usage):
+ self.config_parser = ExtendedOptionParser(usage=usage)
+ self.config_parser.add_option(
+ "--work-dir",
+ action="store",
+ dest="work_dir",
+ type="string",
+ default="build",
+ help="Specify the work_dir (subdir of base_work_dir)",
+ )
+ self.config_parser.add_option(
+ "--base-work-dir",
+ action="store",
+ dest="base_work_dir",
+ type="string",
+ default=os.getcwd(),
+ help="Specify the absolute path of the parent of the working directory",
+ )
+ self.config_parser.add_option(
+ "--extra-config-path",
+ action="extend",
+ dest="config_paths",
+ type="string",
+ help="Specify additional paths to search for config files.",
+ )
+ self.config_parser.add_option(
+ "-c",
+ "--config-file",
+ "--cfg",
+ action="extend",
+ dest="config_files",
+ default=[],
+ type="string",
+ help="Specify a config file; can be repeated",
+ )
+ self.config_parser.add_option(
+ "-C",
+ "--opt-config-file",
+ "--opt-cfg",
+ action="extend",
+ dest="opt_config_files",
+ type="string",
+ default=[],
+ help="Specify an optional config file, like --config-file but with no "
+ "error if the file is missing; can be repeated",
+ )
+ self.config_parser.add_option(
+ "--dump-config",
+ action="store_true",
+ dest="dump_config",
+ help="List and dump the config generated from this run to " "a JSON file.",
+ )
+ self.config_parser.add_option(
+ "--dump-config-hierarchy",
+ action="store_true",
+ dest="dump_config_hierarchy",
+ help="Like --dump-config but will list and dump which config "
+ "files were used making up the config and specify their own "
+ "keys/values that were not overwritten by another cfg -- "
+ "held the highest hierarchy.",
+ )
+ self.config_parser.add_option(
+ "--append-env-variables-from-configs",
+ action="store_true",
+ dest="append_env_variables_from_configs",
+ help="Merge environment variables from config files.",
+ )
+
+ # Logging
+ log_option_group = OptionGroup(self.config_parser, "Logging")
+ log_option_group.add_option(
+ "--log-level",
+ action="store",
+ type="choice",
+ dest="log_level",
+ default=INFO,
+ choices=[DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL],
+ help="Set log level (debug|info|warning|error|critical|fatal)",
+ )
+ log_option_group.add_option(
+ "-q",
+ "--quiet",
+ action="store_false",
+ dest="log_to_console",
+ default=True,
+ help="Don't log to the console",
+ )
+ log_option_group.add_option(
+ "--append-to-log",
+ action="store_true",
+ dest="append_to_log",
+ default=False,
+ help="Append to the log",
+ )
+ log_option_group.add_option(
+ "--multi-log",
+ action="store_const",
+ const="multi",
+ dest="log_type",
+ help="Log using MultiFileLogger",
+ )
+ log_option_group.add_option(
+ "--simple-log",
+ action="store_const",
+ const="simple",
+ dest="log_type",
+ help="Log using SimpleFileLogger",
+ )
+ self.config_parser.add_option_group(log_option_group)
+
+ # Actions
+ action_option_group = OptionGroup(
+ self.config_parser,
+ "Actions",
+ "Use these options to list or enable/disable actions.",
+ )
+ action_option_group.add_option(
+ "--list-actions",
+ action="store_true",
+ dest="list_actions",
+ help="List all available actions, then exit",
+ )
+ action_option_group.add_option(
+ "--add-action",
+ action="extend",
+ dest="add_actions",
+ metavar="ACTIONS",
+ help="Add action %s to the list of actions" % self.all_actions,
+ )
+ action_option_group.add_option(
+ "--no-action",
+ action="extend",
+ dest="no_actions",
+ metavar="ACTIONS",
+ help="Don't perform action",
+ )
+ action_option_group.add_option(
+ "--requires-gpu",
+ action="store_true",
+ dest="requires_gpu",
+ default=False,
+ help="Indicates if the task requires gpu. ",
+ )
+ for action in self.all_actions:
+ action_option_group.add_option(
+ "--%s" % action,
+ action="append_const",
+ dest="actions",
+ const=action,
+ help="Add %s to the limited list of actions" % action,
+ )
+ action_option_group.add_option(
+ "--no-%s" % action,
+ action="append_const",
+ dest="no_actions",
+ const=action,
+ help="Remove %s from the list of actions to perform" % action,
+ )
+ self.config_parser.add_option_group(action_option_group)
+ # Child-specified options
+ # TODO error checking for overlapping options
+ if config_options:
+ for option in config_options:
+ self.config_parser.add_option(*option[0], **option[1])
+
+ # Initial-config-specified options
+ config_options = self._config.get("config_options", None)
+ if config_options:
+ for option in config_options:
+ self.config_parser.add_option(*option[0], **option[1])
+
+ def set_config(self, config, overwrite=False):
+ """This is probably doable some other way."""
+ if self._config and not overwrite:
+ self._config.update(config)
+ else:
+ self._config = config
+ return self._config
+
+ def get_actions(self):
+ return self.actions
+
+ def verify_actions(self, action_list, quiet=False):
+ for action in action_list:
+ if action not in self.all_actions:
+ if not quiet:
+ print("Invalid action %s not in %s!" % (action, self.all_actions))
+ raise SystemExit(-1)
+ return action_list
+
+ def verify_actions_order(self, action_list):
+ try:
+ indexes = [self.all_actions.index(elt) for elt in action_list]
+ sorted_indexes = sorted(indexes)
+ for i in range(len(indexes)):
+ if indexes[i] != sorted_indexes[i]:
+ print(
+ ("Action %s comes in different order in %s\n" + "than in %s")
+ % (action_list[i], action_list, self.all_actions)
+ )
+ raise SystemExit(-1)
+ except ValueError as e:
+ print("Invalid action found: " + str(e))
+ raise SystemExit(-1)
+
+ def list_actions(self):
+ print("Actions available:")
+ for a in self.all_actions:
+ print(" " + ("*" if a in self.default_actions else " "), a)
+ raise SystemExit(0)
+
+ def get_cfgs_from_files(self, all_config_files, options):
+ """Returns the configuration derived from the list of configuration
+ files. The result is represented as a list of `(filename,
+ config_dict)` tuples; they will be combined with keys in later
+ dictionaries taking precedence over earlier.
+
+ `all_config_files` is all files specified with `--config-file` and
+ `--opt-config-file`; `options` is the argparse options object giving
+ access to any other command-line options.
+
+ This function is also responsible for downloading any configuration
+ files specified by URL. It uses ``parse_config_file`` in this module
+ to parse individual files.
+
+ This method can be overridden in a subclass to add extra logic to the
+ way that self.config is made up. See
+ `mozharness.mozilla.building.buildbase.BuildingConfig` for an example.
+ """
+ config_paths = options.config_paths or ["."]
+ all_cfg_files_and_dicts = []
+ for cf in all_config_files:
+ try:
+ if "://" in cf: # config file is an url
+ file_name = os.path.basename(cf)
+ file_path = os.path.join(os.getcwd(), file_name)
+ download_config_file(cf, file_path)
+ all_cfg_files_and_dicts.append(
+ (
+ file_path,
+ parse_config_file(
+ file_path,
+ search_path=["."],
+ ),
+ )
+ )
+ else:
+ all_cfg_files_and_dicts.append(
+ (
+ cf,
+ parse_config_file(
+ cf,
+ search_path=config_paths + [DEFAULT_CONFIG_PATH],
+ ),
+ )
+ )
+ except Exception:
+ if cf in options.opt_config_files:
+ print("WARNING: optional config file not found %s" % cf)
+ else:
+ raise
+
+ if "EXTRA_MOZHARNESS_CONFIG" in os.environ:
+ env_config = json.loads(os.environ["EXTRA_MOZHARNESS_CONFIG"])
+ all_cfg_files_and_dicts.append(("[EXTRA_MOZHARENSS_CONFIG]", env_config))
+
+ return all_cfg_files_and_dicts
+
+ def parse_args(self, args=None):
+ """Parse command line arguments in a generic way.
+ Return the parser object after adding the basic options, so
+ child objects can manipulate it.
+ """
+ self.command_line = " ".join(sys.argv)
+ if args is None:
+ args = sys.argv[1:]
+ (options, args) = self.config_parser.parse_args(args)
+
+ defaults = self.config_parser.defaults.copy()
+
+ if not options.config_files:
+ if self.require_config_file:
+ if options.list_actions:
+ self.list_actions()
+ print("Required config file not set! (use --config-file option)")
+ raise SystemExit(-1)
+
+ os.environ["REQUIRE_GPU"] = "0"
+ if options.requires_gpu:
+ os.environ["REQUIRE_GPU"] = "1"
+
+ # this is what get_cfgs_from_files returns. It will represent each
+ # config file name and its assoctiated dict
+ # eg ('builds/branch_specifics.py', {'foo': 'bar'})
+ # let's store this to self for things like --interpret-config-files
+ self.all_cfg_files_and_dicts.extend(
+ self.get_cfgs_from_files(
+ # append opt_config to allow them to overwrite previous configs
+ options.config_files + options.opt_config_files,
+ options=options,
+ )
+ )
+ config = {}
+ if (
+ self.append_env_variables_from_configs
+ or options.append_env_variables_from_configs
+ ):
+ # We only append values from various configs for the 'env' entry
+ # For everything else we follow the standard behaviour
+ for i, (c_file, c_dict) in enumerate(self.all_cfg_files_and_dicts):
+ for v in list(c_dict.keys()):
+ if v == "env" and v in config:
+ config[v].update(c_dict[v])
+ else:
+ config[v] = c_dict[v]
+ else:
+ for i, (c_file, c_dict) in enumerate(self.all_cfg_files_and_dicts):
+ config.update(c_dict)
+ # assign or update self._config depending on if it exists or not
+ # NOTE self._config will be passed to ReadOnlyConfig's init -- a
+ # dict subclass with immutable locking capabilities -- and serve
+ # as the keys/values that make up that instance. Ultimately,
+ # this becomes self.config during BaseScript's init
+ self.set_config(config)
+
+ for key in list(defaults.keys()):
+ value = getattr(options, key)
+ if value is None:
+ continue
+ # Don't override config_file defaults with config_parser defaults
+ if key in defaults and value == defaults[key] and key in self._config:
+ continue
+ self._config[key] = value
+
+ # The idea behind the volatile_config is we don't want to save this
+ # info over multiple runs. This defaults to the action-specific
+ # config options, but can be anything.
+ for key in list(self.volatile_config.keys()):
+ if self._config.get(key) is not None:
+ self.volatile_config[key] = self._config[key]
+ del self._config[key]
+
+ self.update_actions()
+ if options.list_actions:
+ self.list_actions()
+
+ # Keep? This is for saving the volatile config in the dump_config
+ self._config["volatile_config"] = self.volatile_config
+
+ self.options = options
+ self.args = args
+ return (self.options, self.args)
+
+ def update_actions(self):
+ """Update actions after reading in config.
+
+ Seems a little complex, but the logic goes:
+
+ First, if default_actions is specified in the config, set our
+ default actions even if the script specifies other default actions.
+
+ Without any other action-specific options, run with default actions.
+
+ If we specify --ACTION or --only-ACTION once or multiple times,
+ we want to override the default_actions list with the one(s) we list.
+
+ Otherwise, if we specify --add-action ACTION, we want to add an
+ action to the list.
+
+ Finally, if we specify --no-ACTION, remove that from the list of
+ actions to perform.
+ """
+ if self._config.get("default_actions"):
+ default_actions = self.verify_actions(self._config["default_actions"])
+ self.default_actions = default_actions
+ self.verify_actions_order(self.default_actions)
+ self.actions = self.default_actions[:]
+ if self.volatile_config["actions"]:
+ actions = self.verify_actions(self.volatile_config["actions"])
+ self.actions = actions
+ elif self.volatile_config["add_actions"]:
+ actions = self.verify_actions(self.volatile_config["add_actions"])
+ self.actions.extend(actions)
+ if self.volatile_config["no_actions"]:
+ actions = self.verify_actions(self.volatile_config["no_actions"])
+ for action in actions:
+ if action in self.actions:
+ self.actions.remove(action)
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ pass
diff --git a/testing/mozharness/mozharness/base/diskutils.py b/testing/mozharness/mozharness/base/diskutils.py
new file mode 100644
index 0000000000..d61661cc36
--- /dev/null
+++ b/testing/mozharness/mozharness/base/diskutils.py
@@ -0,0 +1,169 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Disk utility module, no mixins here!
+
+ examples:
+ 1) get disk size
+ from mozharness.base.diskutils import DiskInfo, DiskutilsError
+ ...
+ try:
+ DiskSize().get_size(path='/', unit='Mb')
+ except DiskutilsError as e:
+ # manage the exception e.g: log.error(e)
+ pass
+ log.info("%s" % di)
+
+
+ 2) convert disk size:
+ from mozharness.base.diskutils import DiskutilsError, convert_to
+ ...
+ file_size = <function that gets file size in bytes>
+ # convert file_size to GB
+ try:
+ file_size = convert_to(file_size, from_unit='bytes', to_unit='GB')
+ except DiskutilsError as e:
+ # manage the exception e.g: log.error(e)
+ pass
+
+"""
+import ctypes
+import logging
+import os
+import sys
+
+from mozharness.base.log import INFO, numeric_log_level
+from six import string_types
+
+# use mozharness log
+log = logging.getLogger(__name__)
+
+
+class DiskutilsError(Exception):
+ """Exception thrown by Diskutils module"""
+
+ pass
+
+
+def convert_to(size, from_unit, to_unit):
+ """Helper method to convert filesystem sizes to kB/ MB/ GB/ TB/
+ valid values for source_format and destination format are:
+ * bytes
+ * kB
+ * MB
+ * GB
+ * TB
+ returns: size converted from source_format to destination_format.
+ """
+ sizes = {
+ "bytes": 1,
+ "kB": 1024,
+ "MB": 1024 * 1024,
+ "GB": 1024 * 1024 * 1024,
+ "TB": 1024 * 1024 * 1024 * 1024,
+ }
+ try:
+ df = sizes[to_unit]
+ sf = sizes[from_unit]
+ # pylint --py3k W1619
+ return size * sf / df
+ except KeyError:
+ raise DiskutilsError("conversion error: Invalid source or destination format")
+ except TypeError:
+ raise DiskutilsError("conversion error: size (%s) is not a number" % size)
+
+
+class DiskInfo(object):
+ """Stores basic information about the disk"""
+
+ def __init__(self):
+ self.unit = "bytes"
+ self.free = 0
+ self.used = 0
+ self.total = 0
+
+ def __str__(self):
+ string = ["Disk space info (in %s)" % self.unit]
+ string += ["total: %s" % self.total]
+ string += ["used: %s" % self.used]
+ string += ["free: %s" % self.free]
+ return " ".join(string)
+
+ def _to(self, unit):
+ from_unit = self.unit
+ to_unit = unit
+ self.free = convert_to(self.free, from_unit=from_unit, to_unit=to_unit)
+ self.used = convert_to(self.used, from_unit=from_unit, to_unit=to_unit)
+ self.total = convert_to(self.total, from_unit=from_unit, to_unit=to_unit)
+ self.unit = unit
+
+
+class DiskSize(object):
+ """DiskSize object"""
+
+ @staticmethod
+ def _posix_size(path):
+ """returns the disk size in bytes
+ disk size is relative to path
+ """
+ # we are on a POSIX system
+ st = os.statvfs(path)
+ disk_info = DiskInfo()
+ disk_info.free = st.f_bavail * st.f_frsize
+ disk_info.used = (st.f_blocks - st.f_bfree) * st.f_frsize
+ disk_info.total = st.f_blocks * st.f_frsize
+ return disk_info
+
+ @staticmethod
+ def _windows_size(path):
+ """returns size in bytes, works only on windows platforms"""
+ # we're on a non POSIX system (windows)
+ # DLL call
+ disk_info = DiskInfo()
+ dummy = ctypes.c_ulonglong() # needed by the dll call but not used
+ total = ctypes.c_ulonglong() # stores the total space value
+ free = ctypes.c_ulonglong() # stores the free space value
+ # depending on path format (unicode or not) and python version (2 or 3)
+ # we need to call GetDiskFreeSpaceExW or GetDiskFreeSpaceExA
+ called_function = ctypes.windll.kernel32.GetDiskFreeSpaceExA
+ if isinstance(path, string_types) or sys.version_info >= (3,):
+ called_function = ctypes.windll.kernel32.GetDiskFreeSpaceExW
+ # we're ready for the dll call. On error it returns 0
+ if (
+ called_function(
+ path, ctypes.byref(dummy), ctypes.byref(total), ctypes.byref(free)
+ )
+ != 0
+ ):
+ # success, we can use the values returned by the dll call
+ disk_info.free = free.value
+ disk_info.total = total.value
+ disk_info.used = total.value - free.value
+ return disk_info
+
+ @staticmethod
+ def get_size(path, unit, log_level=INFO):
+ """Disk info stats:
+ total => size of the disk
+ used => space used
+ free => free space
+ In case of error raises a DiskutilError Exception
+ """
+ try:
+ # let's try to get the disk size using os module
+ disk_info = DiskSize()._posix_size(path)
+ except AttributeError:
+ try:
+ # os module failed. let's try to get the size using
+ # ctypes.windll...
+ disk_info = DiskSize()._windows_size(path)
+ except AttributeError:
+ # No luck! This is not a posix nor window platform
+ # raise an exception
+ raise DiskutilsError("Unsupported platform")
+
+ disk_info._to(unit)
+ lvl = numeric_log_level(log_level)
+ log.log(lvl, msg="%s" % disk_info)
+ return disk_info
diff --git a/testing/mozharness/mozharness/base/errors.py b/testing/mozharness/mozharness/base/errors.py
new file mode 100755
index 0000000000..814dd2e045
--- /dev/null
+++ b/testing/mozharness/mozharness/base/errors.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic error lists.
+
+Error lists are used to parse output in mozharness.base.log.OutputParser.
+
+Each line of output is matched against each substring or regular expression
+in the error list. On a match, we determine the 'level' of that line,
+whether IGNORE, DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL.
+
+TODO: Context lines (requires work on the OutputParser side)
+
+TODO: We could also create classes that generate these, but with the
+appropriate level (please don't die on any errors; please die on any
+warning; etc.) or platform or language or whatever.
+"""
+
+import re
+
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, WARNING
+
+
+# Exceptions
+class VCSException(Exception):
+ pass
+
+
+# ErrorLists {{{1
+BaseErrorList = [{"substr": r"""command not found""", "level": ERROR}]
+
+HgErrorList = BaseErrorList + [
+ {
+ "regex": re.compile(r"""^abort:"""),
+ "level": ERROR,
+ "explanation": "Automation Error: hg not responding",
+ },
+ {
+ "substr": r"""unknown exception encountered""",
+ "level": ERROR,
+ "explanation": "Automation Error: python exception in hg",
+ },
+ {
+ "substr": r"""failed to import extension""",
+ "level": WARNING,
+ "explanation": "Automation Error: hg extension missing",
+ },
+]
+
+GitErrorList = BaseErrorList + [
+ {"substr": r"""Permission denied (publickey).""", "level": ERROR},
+ {"substr": r"""fatal: The remote end hung up unexpectedly""", "level": ERROR},
+ {"substr": r"""does not appear to be a git repository""", "level": ERROR},
+ {"substr": r"""error: src refspec""", "level": ERROR},
+ {"substr": r"""invalid author/committer line -""", "level": ERROR},
+ {"substr": r"""remote: fatal: Error in object""", "level": ERROR},
+ {
+ "substr": r"""fatal: sha1 file '<stdout>' write error: Broken pipe""",
+ "level": ERROR,
+ },
+ {"substr": r"""error: failed to push some refs to """, "level": ERROR},
+ {"substr": r"""remote: error: denying non-fast-forward """, "level": ERROR},
+ {"substr": r"""! [remote rejected] """, "level": ERROR},
+ {"regex": re.compile(r"""remote:.*No such file or directory"""), "level": ERROR},
+]
+
+PythonErrorList = BaseErrorList + [
+ {"regex": re.compile(r"""Warning:.*Error: """), "level": WARNING},
+ {"regex": re.compile(r"""package.*> Error:"""), "level": ERROR},
+ {"substr": r"""Traceback (most recent call last)""", "level": ERROR},
+ {"substr": r"""SyntaxError: """, "level": ERROR},
+ {"substr": r"""TypeError: """, "level": ERROR},
+ {"substr": r"""NameError: """, "level": ERROR},
+ {"substr": r"""ZeroDivisionError: """, "level": ERROR},
+ {"regex": re.compile(r"""raise \w*Exception: """), "level": CRITICAL},
+ {"regex": re.compile(r"""raise \w*Error: """), "level": CRITICAL},
+]
+
+VirtualenvErrorList = [
+ {"substr": r"""not found or a compiler error:""", "level": WARNING},
+ {"regex": re.compile("""\d+: error: """), "level": ERROR},
+ {"regex": re.compile("""\d+: warning: """), "level": WARNING},
+ {
+ "regex": re.compile(r"""Downloading .* \(.*\): *([0-9]+%)? *[0-9\.]+[kmKM]b"""),
+ "level": DEBUG,
+ },
+] + PythonErrorList
+
+RustErrorList = [
+ {"regex": re.compile(r"""error\[E\d+\]:"""), "level": ERROR},
+ {"substr": r"""error: Could not compile""", "level": ERROR},
+ {"substr": r"""error: aborting due to previous error""", "level": ERROR},
+ {"substr": r"""thread 'main' panicked at""", "level": ERROR},
+]
+
+# We may need to have various MakefileErrorLists for differing amounts of
+# warning-ignoring-ness.
+MakefileErrorList = (
+ BaseErrorList
+ + PythonErrorList
+ + RustErrorList
+ + [
+ {"substr": r""": error: """, "level": ERROR},
+ {"substr": r"""No rule to make target """, "level": ERROR},
+ {"regex": re.compile(r"""akefile.*was not found\."""), "level": ERROR},
+ {"regex": re.compile(r"""Stop\.$"""), "level": ERROR},
+ {
+ "regex": re.compile(r"""make\[\d+\]: \*\*\* \[.*\] Error \d+"""),
+ "level": ERROR,
+ },
+ {"regex": re.compile(r""":\d+: warning:"""), "level": WARNING},
+ {"regex": re.compile(r"""make(?:\[\d+\])?: \*\*\*/"""), "level": ERROR},
+ {"substr": r"""Warning: """, "level": WARNING},
+ ]
+)
+
+TarErrorList = BaseErrorList + [
+ {"substr": r"""(stdin) is not a bzip2 file.""", "level": ERROR},
+ {"regex": re.compile(r"""Child returned status [1-9]"""), "level": ERROR},
+ {"substr": r"""Error exit delayed from previous errors""", "level": ERROR},
+ {"substr": r"""stdin: unexpected end of file""", "level": ERROR},
+ {"substr": r"""stdin: not in gzip format""", "level": ERROR},
+ {"substr": r"""Cannot exec: No such file or directory""", "level": ERROR},
+ {"substr": r""": Error is not recoverable: exiting now""", "level": ERROR},
+]
+
+ZipErrorList = BaseErrorList + [
+ {
+ "substr": r"""zip warning:""",
+ "level": WARNING,
+ },
+ {
+ "substr": r"""zip error:""",
+ "level": ERROR,
+ },
+ {
+ "substr": r"""Cannot open file: it does not appear to be a valid archive""",
+ "level": ERROR,
+ },
+]
+
+ZipalignErrorList = BaseErrorList + [
+ {
+ "regex": re.compile(r"""Unable to open .* as a zip archive"""),
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile(r"""Output file .* exists"""),
+ "level": ERROR,
+ },
+ {
+ "substr": r"""Input and output can't be the same file""",
+ "level": ERROR,
+ },
+]
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ """TODO: unit tests."""
+ pass
diff --git a/testing/mozharness/mozharness/base/log.py b/testing/mozharness/mozharness/base/log.py
new file mode 100755
index 0000000000..3276696751
--- /dev/null
+++ b/testing/mozharness/mozharness/base/log.py
@@ -0,0 +1,783 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic logging classes and functionalities for single and multi file logging.
+Capturing console output and providing general logging functionalities.
+
+Attributes:
+ FATAL_LEVEL (int): constant logging level value set based on the logging.CRITICAL
+ value
+ DEBUG (str): mozharness `debug` log name
+ INFO (str): mozharness `info` log name
+ WARNING (str): mozharness `warning` log name
+ CRITICAL (str): mozharness `critical` log name
+ FATAL (str): mozharness `fatal` log name
+ IGNORE (str): mozharness `ignore` log name
+ LOG_LEVELS (dict): mapping of the mozharness log level names to logging values
+ ROOT_LOGGER (logging.Logger): instance of a logging.Logger class
+
+TODO:
+- network logging support.
+- log rotation config
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+from datetime import datetime
+
+from six import binary_type
+
+# Define our own FATAL_LEVEL
+FATAL_LEVEL = logging.CRITICAL + 10
+logging.addLevelName(FATAL_LEVEL, "FATAL")
+
+# mozharness log levels.
+DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL, IGNORE = (
+ "debug",
+ "info",
+ "warning",
+ "error",
+ "critical",
+ "fatal",
+ "ignore",
+)
+
+
+LOG_LEVELS = {
+ DEBUG: logging.DEBUG,
+ INFO: logging.INFO,
+ WARNING: logging.WARNING,
+ ERROR: logging.ERROR,
+ CRITICAL: logging.CRITICAL,
+ FATAL: FATAL_LEVEL,
+}
+
+# mozharness root logger
+ROOT_LOGGER = logging.getLogger()
+
+# Force logging to use UTC timestamps
+logging.Formatter.converter = time.gmtime
+
+
+# LogMixin {{{1
+class LogMixin(object):
+ """This is a mixin for any object to access similar logging functionality
+
+ The logging functionality described here is specially useful for those
+ objects with self.config and self.log_obj member variables
+ """
+
+ def _log_level_at_least(self, level):
+ """Check if the current logging level is greater or equal than level
+
+ Args:
+ level (str): log level name to compare against mozharness log levels
+ names
+
+ Returns:
+ bool: True if the current logging level is great or equal than level,
+ False otherwise
+ """
+ log_level = INFO
+ levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL]
+ if hasattr(self, "config"):
+ log_level = self.config.get("log_level", INFO)
+ return levels.index(level) >= levels.index(log_level)
+
+ def _print(self, message, stderr=False):
+ """prints a message to the sys.stdout or sys.stderr according to the
+ value of the stderr argument.
+
+ Args:
+ message (str): The message to be printed
+ stderr (bool, optional): if True, message will be printed to
+ sys.stderr. Defaults to False.
+
+ Returns:
+ None
+ """
+ if not hasattr(self, "config") or self.config.get("log_to_console", True):
+ if stderr:
+ print(message, file=sys.stderr)
+ else:
+ print(message)
+
+ def log(self, message, level=INFO, exit_code=-1):
+ """log the message passed to it according to level, exit if level == FATAL
+
+ Args:
+ message (str): message to be logged
+ level (str, optional): logging level of the message. Defaults to INFO
+ exit_code (int, optional): exit code to log before the scripts calls
+ SystemExit.
+
+ Returns:
+ None
+ """
+ if self.log_obj:
+ return self.log_obj.log_message(
+ message,
+ level=level,
+ exit_code=exit_code,
+ post_fatal_callback=self._post_fatal,
+ )
+ if level == INFO:
+ if self._log_level_at_least(level):
+ self._print(message)
+ elif level == DEBUG:
+ if self._log_level_at_least(level):
+ self._print("DEBUG: %s" % message)
+ elif level in (WARNING, ERROR, CRITICAL):
+ if self._log_level_at_least(level):
+ self._print("%s: %s" % (level.upper(), message), stderr=True)
+ elif level == FATAL:
+ if self._log_level_at_least(level):
+ self._print("FATAL: %s" % message, stderr=True)
+ raise SystemExit(exit_code)
+
+ def worst_level(self, target_level, existing_level, levels=None):
+ """Compare target_level with existing_level according to levels values
+ and return the worst among them.
+
+ Args:
+ target_level (str): minimum logging level to which the current object
+ should be set
+ existing_level (str): current logging level
+ levels (list(str), optional): list of logging levels names to compare
+ target_level and existing_level against.
+ Defaults to mozharness log level
+ list sorted from most to less critical.
+
+ Returns:
+ str: the logging lavel that is closest to the first levels value,
+ i.e. levels[0]
+ """
+ if not levels:
+ levels = [FATAL, CRITICAL, ERROR, WARNING, INFO, DEBUG, IGNORE]
+ if target_level not in levels:
+ self.fatal("'%s' not in %s'." % (target_level, levels))
+ for l in levels:
+ if l in (target_level, existing_level):
+ return l
+
+ # Copying Bear's dumpException():
+ # https://hg.mozilla.org/build/tools/annotate/1485f23c38e0/sut_tools/sut_lib.py#l23
+ def exception(self, message=None, level=ERROR):
+ """log an exception message base on the log level passed to it.
+
+ This function fetches the information of the current exception being handled and
+ adds it to the message argument.
+
+ Args:
+ message (str, optional): message to be printed at the beginning of the log.
+ Default to an empty string.
+ level (str, optional): log level to use for the logging. Defaults to ERROR
+
+ Returns:
+ None
+ """
+ tb_type, tb_value, tb_traceback = sys.exc_info()
+ if message is None:
+ message = ""
+ else:
+ message = "%s\n" % message
+ for s in traceback.format_exception(tb_type, tb_value, tb_traceback):
+ message += "%s\n" % s
+ # Log at the end, as a fatal will attempt to exit after the 1st line.
+ self.log(message, level=level)
+
+ def debug(self, message):
+ """calls the log method with DEBUG as logging level
+
+ Args:
+ message (str): message to log
+ """
+ self.log(message, level=DEBUG)
+
+ def info(self, message):
+ """calls the log method with INFO as logging level
+
+ Args:
+ message (str): message to log
+ """
+ self.log(message, level=INFO)
+
+ def warning(self, message):
+ """calls the log method with WARNING as logging level
+
+ Args:
+ message (str): message to log
+ """
+ self.log(message, level=WARNING)
+
+ def error(self, message):
+ """calls the log method with ERROR as logging level
+
+ Args:
+ message (str): message to log
+ """
+ self.log(message, level=ERROR)
+
+ def critical(self, message):
+ """calls the log method with CRITICAL as logging level
+
+ Args:
+ message (str): message to log
+ """
+ self.log(message, level=CRITICAL)
+
+ def fatal(self, message, exit_code=-1):
+ """calls the log method with FATAL as logging level
+
+ Args:
+ message (str): message to log
+ exit_code (int, optional): exit code to use for the SystemExit
+ exception to be raised. Default to -1.
+ """
+ self.log(message, level=FATAL, exit_code=exit_code)
+
+ def _post_fatal(self, message=None, exit_code=None):
+ """Sometimes you want to create a report or cleanup
+ or notify on fatal(); override this method to do so.
+
+ Please don't use this for anything significantly long-running.
+
+ Args:
+ message (str, optional): message to report. Default to None
+ exit_code (int, optional): exit code to use for the SystemExit
+ exception to be raised. Default to None
+ """
+ pass
+
+
+# OutputParser {{{1
+class OutputParser(LogMixin):
+ """Helper object to parse command output.
+
+ This will buffer output if needed, so we can go back and mark
+ [(linenum - 10) : linenum+10] as errors if need be, without having to
+ get all the output first.
+
+ linenum+10 will be easy; we can set self.num_post_context_lines to 10,
+ and self.num_post_context_lines-- as we mark each line to at least error
+ level X.
+
+ linenum-10 will be trickier. We'll not only need to save the line
+ itself, but also the level that we've set for that line previously,
+ whether by matching on that line, or by a previous line's context.
+ We should only log that line if all output has ended (self.finish() ?);
+ otherwise store a list of dictionaries in self.context_buffer that is
+ buffered up to self.num_pre_context_lines (set to the largest
+ pre-context-line setting in error_list.)
+ """
+
+ def __init__(
+ self, config=None, log_obj=None, error_list=None, log_output=True, **kwargs
+ ):
+ """Initialization method for the OutputParser class
+
+ Args:
+ config (dict, optional): dictionary containing values such as `log_level`
+ or `log_to_console`. Defaults to `None`.
+ log_obj (BaseLogger, optional): instance of the BaseLogger class. Defaults
+ to `None`.
+ error_list (list, optional): list of the error to look for. Defaults to
+ `None`.
+ log_output (boolean, optional): flag for deciding if the commands
+ output should be logged or not.
+ Defaults to `True`.
+ """
+ self.config = config
+ self.log_obj = log_obj
+ self.error_list = error_list or []
+ self.log_output = log_output
+ self.num_errors = 0
+ self.num_warnings = 0
+ # TODO context_lines.
+ # Not in use yet, but will be based off error_list.
+ self.context_buffer = []
+ self.num_pre_context_lines = 0
+ self.num_post_context_lines = 0
+ self.worst_log_level = INFO
+
+ def parse_single_line(self, line):
+ """parse a console output line and check if it matches one in `error_list`,
+ if so then log it according to `log_output`.
+
+ Args:
+ line (str): command line output to parse.
+
+ Returns:
+ If the line hits a match in the error_list, the new log level the line was
+ (or should be) logged at is returned. Otherwise, returns None.
+ """
+ for error_check in self.error_list:
+ # TODO buffer for context_lines.
+ match = False
+ if "substr" in error_check:
+ if error_check["substr"] in line:
+ match = True
+ elif "regex" in error_check:
+ if error_check["regex"].search(line):
+ match = True
+ else:
+ self.warning("error_list: 'substr' and 'regex' not in %s" % error_check)
+ if match:
+ log_level = error_check.get("level", INFO)
+ if self.log_output:
+ message = " %s" % line
+ if error_check.get("explanation"):
+ message += "\n %s" % error_check["explanation"]
+ if error_check.get("summary"):
+ self.add_summary(message, level=log_level)
+ else:
+ self.log(message, level=log_level)
+ if log_level in (ERROR, CRITICAL, FATAL):
+ self.num_errors += 1
+ if log_level == WARNING:
+ self.num_warnings += 1
+ self.worst_log_level = self.worst_level(log_level, self.worst_log_level)
+ return log_level
+
+ if self.log_output:
+ self.info(" %s" % line)
+
+ def add_lines(self, output):
+ """process a string or list of strings, decode them to utf-8,strip
+ them of any trailing whitespaces and parse them using `parse_single_line`
+
+ strings consisting only of whitespaces are ignored.
+
+ Args:
+ output (str | list): string or list of string to parse
+ """
+ if not isinstance(output, list):
+ output = [output]
+
+ for line in output:
+ if not line or line.isspace():
+ continue
+
+ if isinstance(line, binary_type):
+ line = line.decode("utf-8", "replace")
+
+ line = line.rstrip()
+ self.parse_single_line(line)
+
+
+# BaseLogger {{{1
+class BaseLogger(object):
+ """Base class in charge of logging handling logic such as creating logging
+ files, dirs, attaching to the console output and managing its output.
+
+ Attributes:
+ LEVELS (dict): flat copy of the `LOG_LEVELS` attribute of the `log` module.
+
+ TODO: status? There may be a status object or status capability in
+ either logging or config that allows you to count the number of
+ error,critical,fatal messages for us to count up at the end (aiming
+ for 0).
+ """
+
+ LEVELS = LOG_LEVELS
+
+ def __init__(
+ self,
+ log_level=INFO,
+ log_format="%(message)s",
+ log_date_format="%H:%M:%S",
+ log_name="test",
+ log_to_console=True,
+ log_dir=".",
+ log_to_raw=False,
+ logger_name="",
+ append_to_log=False,
+ ):
+ """BaseLogger constructor
+
+ Args:
+ log_level (str, optional): mozharness log level name. Defaults to INFO.
+ log_format (str, optional): message format string to instantiate a
+ `logging.Formatter`. Defaults to '%(message)s'
+ log_date_format (str, optional): date format string to instantiate a
+ `logging.Formatter`. Defaults to '%H:%M:%S'
+ log_name (str, optional): name to use for the log files to be created.
+ Defaults to 'test'
+ log_to_console (bool, optional): set to True in order to create a Handler
+ instance base on the `Logger`
+ current instance. Defaults to True.
+ log_dir (str, optional): directory location to store the log files.
+ Defaults to '.', i.e. current working directory.
+ log_to_raw (bool, optional): set to True in order to create a *raw.log
+ file. Defaults to False.
+ logger_name (str, optional): currently useless parameter. According
+ to the code comments, it could be useful
+ if we were to have multiple logging
+ objects that don't trample each other.
+ append_to_log (bool, optional): set to True if the logging content should
+ be appended to old logging files. Defaults to False
+ """
+
+ self.log_format = log_format
+ self.log_date_format = log_date_format
+ self.log_to_console = log_to_console
+ self.log_to_raw = log_to_raw
+ self.log_level = log_level
+ self.log_name = log_name
+ self.log_dir = log_dir
+ self.append_to_log = append_to_log
+
+ # Not sure what I'm going to use this for; useless unless we
+ # can have multiple logging objects that don't trample each other
+ self.logger_name = logger_name
+
+ self.all_handlers = []
+ self.log_files = {}
+
+ self.create_log_dir()
+
+ def create_log_dir(self):
+ """create a logging directory if it doesn't exits. If there is a file with
+ same name as the future logging directory it will be deleted.
+ """
+
+ if os.path.exists(self.log_dir):
+ if not os.path.isdir(self.log_dir):
+ os.remove(self.log_dir)
+ if not os.path.exists(self.log_dir):
+ os.makedirs(self.log_dir)
+ self.abs_log_dir = os.path.abspath(self.log_dir)
+
+ def init_message(self, name=None):
+ """log an init message stating the name passed to it, the current date
+ and time and, the current working directory.
+
+ Args:
+ name (str, optional): name to use for the init log message. Defaults to
+ the current instance class name.
+ """
+
+ if not name:
+ name = self.__class__.__name__
+ self.log_message(
+ "%s online at %s in %s"
+ % (name, datetime.utcnow().strftime("%Y%m%d %H:%M:%SZ"), os.getcwd())
+ )
+
+ def get_logger_level(self, level=None):
+ """translate the level name passed to it and return its numeric value
+ according to `LEVELS` values.
+
+ Args:
+ level (str, optional): level name to be translated. Defaults to the current
+ instance `log_level`.
+
+ Returns:
+ int: numeric value of the log level name passed to it or 0 (NOTSET) if the
+ name doesn't exists
+ """
+
+ if not level:
+ level = self.log_level
+ return self.LEVELS.get(level, logging.NOTSET)
+
+ def get_log_formatter(self, log_format=None, date_format=None):
+ """create a `logging.Formatter` base on the log and date format.
+
+ Args:
+ log_format (str, optional): log format to use for the Formatter constructor.
+ Defaults to the current instance log format.
+ date_format (str, optional): date format to use for the Formatter constructor.
+ Defaults to the current instance date format.
+
+ Returns:
+ logging.Formatter: instance created base on the passed arguments
+ """
+
+ if not log_format:
+ log_format = self.log_format
+ if not date_format:
+ date_format = self.log_date_format
+ return logging.Formatter(log_format, date_format)
+
+ def new_logger(self):
+ """Create a new logger based on the ROOT_LOGGER instance. By default there are no handlers.
+ The new logger becomes a member variable of the current instance as `self.logger`.
+ """
+
+ self.logger = ROOT_LOGGER
+ self.logger.setLevel(self.get_logger_level())
+ self._clear_handlers()
+ if self.log_to_console:
+ self.add_console_handler()
+ if self.log_to_raw:
+ self.log_files["raw"] = "%s_raw.log" % self.log_name
+ self.add_file_handler(
+ os.path.join(self.abs_log_dir, self.log_files["raw"]),
+ log_format="%(message)s",
+ )
+
+ def _clear_handlers(self):
+ """remove all handlers stored in `self.all_handlers`.
+
+ To prevent dups -- logging will preserve Handlers across
+ objects :(
+ """
+ attrs = dir(self)
+ if "all_handlers" in attrs and "logger" in attrs:
+ for handler in self.all_handlers:
+ self.logger.removeHandler(handler)
+ self.all_handlers = []
+
+ def __del__(self):
+ """BaseLogger class destructor; shutdown, flush and remove all handlers"""
+ logging.shutdown()
+ self._clear_handlers()
+
+ def add_console_handler(self, log_level=None, log_format=None, date_format=None):
+ """create a `logging.StreamHandler` using `sys.stderr` for logging the console
+ output and add it to the `all_handlers` member variable
+
+ Args:
+ log_level (str, optional): useless argument. Not used here.
+ Defaults to None.
+ log_format (str, optional): format used for the Formatter attached to the
+ StreamHandler. Defaults to None.
+ date_format (str, optional): format used for the Formatter attached to the
+ StreamHandler. Defaults to None.
+ """
+
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(
+ self.get_log_formatter(log_format=log_format, date_format=date_format)
+ )
+ self.logger.addHandler(console_handler)
+ self.all_handlers.append(console_handler)
+
+ def add_file_handler(
+ self, log_path, log_level=None, log_format=None, date_format=None
+ ):
+ """create a `logging.FileHandler` base on the path, log and date format
+ and add it to the `all_handlers` member variable.
+
+ Args:
+ log_path (str): filepath to use for the `FileHandler`.
+ log_level (str, optional): useless argument. Not used here.
+ Defaults to None.
+ log_format (str, optional): log format to use for the Formatter constructor.
+ Defaults to the current instance log format.
+ date_format (str, optional): date format to use for the Formatter constructor.
+ Defaults to the current instance date format.
+ """
+
+ if not self.append_to_log and os.path.exists(log_path):
+ os.remove(log_path)
+ file_handler = logging.FileHandler(log_path)
+ file_handler.setLevel(self.get_logger_level(log_level))
+ file_handler.setFormatter(
+ self.get_log_formatter(log_format=log_format, date_format=date_format)
+ )
+ self.logger.addHandler(file_handler)
+ self.all_handlers.append(file_handler)
+
+ def log_message(self, message, level=INFO, exit_code=-1, post_fatal_callback=None):
+ """Generic log method.
+ There should be more options here -- do or don't split by line,
+ use os.linesep instead of assuming \n, be able to pass in log level
+ by name or number.
+
+ Adding the IGNORE special level for runCommand.
+
+ Args:
+ message (str): message to log using the current `logger`
+ level (str, optional): log level of the message. Defaults to INFO.
+ exit_code (int, optional): exit code to use in case of a FATAL level is used.
+ Defaults to -1.
+ post_fatal_callback (function, optional): function to callback in case of
+ of a fatal log level. Defaults None.
+ """
+
+ if level == IGNORE:
+ return
+ for line in message.splitlines():
+ self.logger.log(self.get_logger_level(level), line)
+ if level == FATAL:
+ if callable(post_fatal_callback):
+ self.logger.log(FATAL_LEVEL, "Running post_fatal callback...")
+ post_fatal_callback(message=message, exit_code=exit_code)
+ self.logger.log(FATAL_LEVEL, "Exiting %d" % exit_code)
+ raise SystemExit(exit_code)
+
+
+# SimpleFileLogger {{{1
+class SimpleFileLogger(BaseLogger):
+ """Subclass of the BaseLogger.
+
+ Create one logFile. Possibly also output to the terminal and a raw log
+ (no prepending of level or date)
+ """
+
+ def __init__(
+ self,
+ log_format="%(asctime)s %(levelname)8s - %(message)s",
+ logger_name="Simple",
+ log_dir="logs",
+ **kwargs
+ ):
+ """SimpleFileLogger constructor. Calls its superclass constructor,
+ creates a new logger instance and log an init message.
+
+ Args:
+ log_format (str, optional): message format string to instantiate a
+ `logging.Formatter`. Defaults to
+ '%(asctime)s %(levelname)8s - %(message)s'
+ log_name (str, optional): name to use for the log files to be created.
+ Defaults to 'Simple'
+ log_dir (str, optional): directory location to store the log files.
+ Defaults to 'logs'
+ **kwargs: Arbitrary keyword arguments passed to the BaseLogger constructor
+ """
+
+ BaseLogger.__init__(
+ self,
+ logger_name=logger_name,
+ log_format=log_format,
+ log_dir=log_dir,
+ **kwargs
+ )
+ self.new_logger()
+ self.init_message()
+
+ def new_logger(self):
+ """calls the BaseLogger.new_logger method and adds a file handler to it."""
+
+ BaseLogger.new_logger(self)
+ self.log_path = os.path.join(self.abs_log_dir, "%s.log" % self.log_name)
+ self.log_files["default"] = self.log_path
+ self.add_file_handler(self.log_path)
+
+
+# MultiFileLogger {{{1
+class MultiFileLogger(BaseLogger):
+ """Subclass of the BaseLogger class. Create a log per log level in log_dir.
+ Possibly also output to the terminal and a raw log (no prepending of level or date)
+ """
+
+ def __init__(
+ self,
+ logger_name="Multi",
+ log_format="%(asctime)s %(levelname)8s - %(message)s",
+ log_dir="logs",
+ log_to_raw=True,
+ **kwargs
+ ):
+ """MultiFileLogger constructor. Calls its superclass constructor,
+ creates a new logger instance and log an init message.
+
+ Args:
+ log_format (str, optional): message format string to instantiate a
+ `logging.Formatter`. Defaults to
+ '%(asctime)s %(levelname)8s - %(message)s'
+ log_name (str, optional): name to use for the log files to be created.
+ Defaults to 'Multi'
+ log_dir (str, optional): directory location to store the log files.
+ Defaults to 'logs'
+ log_to_raw (bool, optional): set to True in order to create a *raw.log
+ file. Defaults to False.
+ **kwargs: Arbitrary keyword arguments passed to the BaseLogger constructor
+ """
+
+ BaseLogger.__init__(
+ self,
+ logger_name=logger_name,
+ log_format=log_format,
+ log_to_raw=log_to_raw,
+ log_dir=log_dir,
+ **kwargs
+ )
+
+ self.new_logger()
+ self.init_message()
+
+ def new_logger(self):
+ """calls the BaseLogger.new_logger method and adds a file handler per
+ logging level in the `LEVELS` class attribute.
+ """
+
+ BaseLogger.new_logger(self)
+ min_logger_level = self.get_logger_level(self.log_level)
+ for level in list(self.LEVELS.keys()):
+ if self.get_logger_level(level) >= min_logger_level:
+ self.log_files[level] = "%s_%s.log" % (self.log_name, level)
+ self.add_file_handler(
+ os.path.join(self.abs_log_dir, self.log_files[level]),
+ log_level=level,
+ )
+
+
+# ConsoleLogger {{{1
+class ConsoleLogger(BaseLogger):
+ """Subclass of the BaseLogger.
+
+ Output logs to stderr.
+ """
+
+ def __init__(
+ self,
+ log_format="%(levelname)8s - %(message)s",
+ log_date_format="%H:%M:%S",
+ logger_name="Console",
+ **kwargs
+ ):
+ """ConsoleLogger constructor. Calls its superclass constructor,
+ creates a new logger instance and log an init message.
+
+ Args:
+ log_format (str, optional): message format string to instantiate a
+ `logging.Formatter`. Defaults to
+ '%(levelname)8s - %(message)s'
+ **kwargs: Arbitrary keyword arguments passed to the BaseLogger
+ constructor
+ """
+
+ BaseLogger.__init__(
+ self, logger_name=logger_name, log_format=log_format, **kwargs
+ )
+ self.new_logger()
+ self.init_message()
+
+ def new_logger(self):
+ """Create a new logger based on the ROOT_LOGGER instance. By default
+ there are no handlers. The new logger becomes a member variable of the
+ current instance as `self.logger`.
+ """
+ self.logger = ROOT_LOGGER
+ self.logger.setLevel(self.get_logger_level())
+ self._clear_handlers()
+ self.add_console_handler()
+
+
+def numeric_log_level(level):
+ """Converts a mozharness log level (string) to the corresponding logger
+ level (number). This function makes possible to set the log level
+ in functions that do not inherit from LogMixin
+
+ Args:
+ level (str): log level name to convert.
+
+ Returns:
+ int: numeric value of the log level name.
+ """
+ return LOG_LEVELS[level]
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ """Useless comparison, due to the `pass` keyword on its body"""
+ pass
diff --git a/testing/mozharness/mozharness/base/parallel.py b/testing/mozharness/mozharness/base/parallel.py
new file mode 100755
index 0000000000..678dadeede
--- /dev/null
+++ b/testing/mozharness/mozharness/base/parallel.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic ways to parallelize jobs.
+"""
+
+
+# ChunkingMixin {{{1
+class ChunkingMixin(object):
+ """Generic Chunking helper methods."""
+
+ def query_chunked_list(self, possible_list, this_chunk, total_chunks, sort=False):
+ """Split a list of items into a certain number of chunks and
+ return the subset of that will occur in this chunk.
+
+ Ported from build.l10n.getLocalesForChunk in build/tools.
+ """
+ if sort:
+ possible_list = sorted(possible_list)
+ else:
+ # Copy to prevent altering
+ possible_list = possible_list[:]
+ length = len(possible_list)
+ for c in range(1, total_chunks + 1):
+ n = length // total_chunks
+ # If the total number of items isn't evenly divisible by the
+ # number of chunks, we need to append one more onto some chunks
+ if c <= (length % total_chunks):
+ n += 1
+ if c == this_chunk:
+ return possible_list[0:n]
+ del possible_list[0:n]
diff --git a/testing/mozharness/mozharness/base/python.py b/testing/mozharness/mozharness/base/python.py
new file mode 100644
index 0000000000..ba2514c926
--- /dev/null
+++ b/testing/mozharness/mozharness/base/python.py
@@ -0,0 +1,1186 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Python usage, esp. virtualenv.
+"""
+
+import errno
+import json
+import os
+import shutil
+import site
+import socket
+import subprocess
+import sys
+import traceback
+from pathlib import Path
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+import mozharness
+from mozharness.base.errors import VirtualenvErrorList
+from mozharness.base.log import FATAL, WARNING
+from mozharness.base.script import (
+ PostScriptAction,
+ PostScriptRun,
+ PreScriptAction,
+ ScriptMixin,
+)
+from six import string_types
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+
+def get_tlsv1_post():
+ # Monkeypatch to work around SSL errors in non-bleeding-edge Python.
+ # Taken from https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
+ import ssl
+
+ import requests
+ from requests.packages.urllib3.poolmanager import PoolManager
+
+ class TLSV1Adapter(requests.adapters.HTTPAdapter):
+ def init_poolmanager(self, connections, maxsize, block=False):
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ ssl_version=ssl.PROTOCOL_TLSv1,
+ )
+
+ s = requests.Session()
+ s.mount("https://", TLSV1Adapter())
+ return s.post
+
+
+# Virtualenv {{{1
+virtualenv_config_options = [
+ [
+ ["--virtualenv-path"],
+ {
+ "action": "store",
+ "dest": "virtualenv_path",
+ "default": "venv",
+ "help": "Specify the path to the virtualenv top level directory",
+ },
+ ],
+ [
+ ["--find-links"],
+ {
+ "action": "extend",
+ "dest": "find_links",
+ "default": ["https://pypi.pub.build.mozilla.org/pub/"],
+ "help": "URL to look for packages at",
+ },
+ ],
+ [
+ ["--pip-index"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "pip_index",
+ "help": "Use pip indexes",
+ },
+ ],
+ [
+ ["--no-pip-index"],
+ {
+ "action": "store_false",
+ "dest": "pip_index",
+ "help": "Don't use pip indexes (default)",
+ },
+ ],
+]
+
+
+class VirtualenvMixin(object):
+ """BaseScript mixin, designed to create and use virtualenvs.
+
+ Config items:
+ * virtualenv_path points to the virtualenv location on disk.
+ * virtualenv_modules lists the module names.
+ * MODULE_url list points to the module URLs (optional)
+ Requires virtualenv to be in PATH.
+ Depends on ScriptMixin
+ """
+
+ python_paths = {}
+ site_packages_path = None
+
+ def __init__(self, *args, **kwargs):
+ self._virtualenv_modules = []
+ super(VirtualenvMixin, self).__init__(*args, **kwargs)
+
+ def register_virtualenv_module(
+ self,
+ name=None,
+ url=None,
+ method=None,
+ requirements=None,
+ optional=False,
+ two_pass=False,
+ editable=False,
+ ):
+ """Register a module to be installed with the virtualenv.
+
+ This method can be called up until create_virtualenv() to register
+ modules that should be installed in the virtualenv.
+
+ See the documentation for install_module for how the arguments are
+ applied.
+ """
+ self._virtualenv_modules.append(
+ (name, url, method, requirements, optional, two_pass, editable)
+ )
+
+ def query_virtualenv_path(self):
+ """Determine the absolute path to the virtualenv."""
+ dirs = self.query_abs_dirs()
+
+ if "abs_virtualenv_dir" in dirs:
+ return dirs["abs_virtualenv_dir"]
+
+ p = self.config["virtualenv_path"]
+ if not p:
+ self.fatal(
+ "virtualenv_path config option not set; " "this should never happen"
+ )
+
+ if os.path.isabs(p):
+ return p
+ else:
+ return os.path.join(dirs["abs_work_dir"], p)
+
+ def query_python_path(self, binary="python"):
+ """Return the path of a binary inside the virtualenv, if
+ c['virtualenv_path'] is set; otherwise return the binary name.
+ Otherwise return None
+ """
+ if binary not in self.python_paths:
+ bin_dir = "bin"
+ if self._is_windows():
+ bin_dir = "Scripts"
+ virtualenv_path = self.query_virtualenv_path()
+ self.python_paths[binary] = os.path.abspath(
+ os.path.join(virtualenv_path, bin_dir, binary)
+ )
+
+ return self.python_paths[binary]
+
+ def query_python_site_packages_path(self):
+ if self.site_packages_path:
+ return self.site_packages_path
+ python = self.query_python_path()
+ self.site_packages_path = self.get_output_from_command(
+ [
+ python,
+ "-c",
+ "from distutils.sysconfig import get_python_lib; "
+ + "print(get_python_lib())",
+ ]
+ )
+ return self.site_packages_path
+
+ def package_versions(
+ self, pip_freeze_output=None, error_level=WARNING, log_output=False
+ ):
+ """
+ reads packages from `pip freeze` output and returns a dict of
+ {package_name: 'version'}
+ """
+ packages = {}
+
+ if pip_freeze_output is None:
+ # get the output from `pip freeze`
+ pip = self.query_python_path("pip")
+ if not pip:
+ self.log("package_versions: Program pip not in path", level=error_level)
+ return {}
+ pip_freeze_output = self.get_output_from_command(
+ [pip, "list", "--format", "freeze", "--no-index"],
+ silent=True,
+ ignore_errors=True,
+ )
+ if not isinstance(pip_freeze_output, string_types):
+ self.fatal(
+ "package_versions: Error encountered running `pip freeze`: "
+ + pip_freeze_output
+ )
+
+ for line in pip_freeze_output.splitlines():
+ # parse the output into package, version
+ line = line.strip()
+ if not line:
+ # whitespace
+ continue
+ if line.startswith("-"):
+ # not a package, probably like '-e http://example.com/path#egg=package-dev'
+ continue
+ if "==" not in line:
+ self.fatal("pip_freeze_packages: Unrecognized output line: %s" % line)
+ package, version = line.split("==", 1)
+ packages[package] = version
+
+ if log_output:
+ self.info("Current package versions:")
+ for package in sorted(packages):
+ self.info(" %s == %s" % (package, packages[package]))
+
+ return packages
+
+ def is_python_package_installed(self, package_name, error_level=WARNING):
+ """
+ Return whether the package is installed
+ """
+ # pylint --py3k W1655
+ package_versions = self.package_versions(error_level=error_level)
+ return package_name.lower() in [package.lower() for package in package_versions]
+
+ def install_module(
+ self,
+ module=None,
+ module_url=None,
+ install_method=None,
+ requirements=(),
+ optional=False,
+ global_options=[],
+ no_deps=False,
+ editable=False,
+ ):
+ """
+ Install module via pip.
+
+ module_url can be a url to a python package tarball, a path to
+ a directory containing a setup.py (absolute or relative to work_dir)
+ or None, in which case it will default to the module name.
+
+ requirements is a list of pip requirements files. If specified, these
+ will be combined with the module_url (if any), like so:
+
+ pip install -r requirements1.txt -r requirements2.txt module_url
+ """
+ import http.client
+ import time
+ import urllib.error
+ import urllib.request
+
+ c = self.config
+ dirs = self.query_abs_dirs()
+ env = self.query_env()
+ venv_path = self.query_virtualenv_path()
+ self.info("Installing %s into virtualenv %s" % (module, venv_path))
+ if not module_url:
+ module_url = module
+ if install_method in (None, "pip"):
+ if not module_url and not requirements:
+ self.fatal("Must specify module and/or requirements")
+ pip = self.query_python_path("pip")
+ if c.get("verbose_pip"):
+ command = [pip, "-v", "install"]
+ else:
+ command = [pip, "install"]
+ if no_deps:
+ command += ["--no-deps"]
+ # To avoid timeouts with our pypi server, increase default timeout:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1007230#c802
+ command += ["--timeout", str(c.get("pip_timeout", 120))]
+ for requirement in requirements:
+ command += ["-r", requirement]
+ if c.get("find_links") and not c["pip_index"]:
+ command += ["--no-index"]
+ for opt in global_options:
+ command += ["--global-option", opt]
+ elif install_method == "easy_install":
+ if not module:
+ self.fatal(
+ "module parameter required with install_method='easy_install'"
+ )
+ if requirements:
+ # Install pip requirements files separately, since they're
+ # not understood by easy_install.
+ self.install_module(requirements=requirements, install_method="pip")
+ command = [self.query_python_path(), "-m", "easy_install"]
+ else:
+ self.fatal(
+ "install_module() doesn't understand an install_method of %s!"
+ % install_method
+ )
+
+ # find_links connection check while loop
+ find_links_added = 0
+ fl_retry_sleep_seconds = 10
+ fl_max_retry_minutes = 5
+ fl_retry_loops = (fl_max_retry_minutes * 60) / fl_retry_sleep_seconds
+ for link in c.get("find_links", []):
+ parsed = urlparse.urlparse(link)
+ dns_result = None
+ get_result = None
+ retry_counter = 0
+ while retry_counter < fl_retry_loops and (
+ dns_result is None or get_result is None
+ ):
+ try:
+ dns_result = socket.gethostbyname(parsed.hostname)
+ get_result = urllib.request.urlopen(link, timeout=10).read()
+ break
+ except socket.gaierror:
+ retry_counter += 1
+ self.warning(
+ "find_links: dns check failed for %s, sleeping %ss and retrying..."
+ % (parsed.hostname, fl_retry_sleep_seconds)
+ )
+ time.sleep(fl_retry_sleep_seconds)
+ except (
+ urllib.error.HTTPError,
+ urllib.error.URLError,
+ socket.timeout,
+ http.client.RemoteDisconnected,
+ ) as e:
+ retry_counter += 1
+ self.warning(
+ "find_links: connection check failed for %s, sleeping %ss and retrying..."
+ % (link, fl_retry_sleep_seconds)
+ )
+ self.warning("find_links: exception: %s" % e)
+ time.sleep(fl_retry_sleep_seconds)
+ # now that the connectivity check is good, add the link
+ if dns_result and get_result:
+ self.info("find_links: connection checks passed for %s, adding." % link)
+ find_links_added += 1
+ command.extend(["--find-links", link])
+ else:
+ self.warning(
+ "find_links: connection checks failed for %s"
+ ", but max retries reached. continuing..." % link
+ )
+
+ # TODO: make this fatal if we always see failures after this
+ if find_links_added == 0:
+ self.warning(
+ "find_links: no find_links added. pip installation will probably fail!"
+ )
+
+ # module_url can be None if only specifying requirements files
+ if module_url:
+ if editable:
+ if install_method in (None, "pip"):
+ command += ["-e"]
+ else:
+ self.fatal(
+ "editable installs not supported for install_method %s"
+ % install_method
+ )
+ command += [module_url]
+
+ # If we're only installing a single requirements file, use
+ # the file's directory as cwd, so relative paths work correctly.
+ cwd = dirs["abs_work_dir"]
+ if not module and len(requirements) == 1:
+ cwd = os.path.dirname(requirements[0])
+
+ # Allow for errors while building modules, but require a
+ # return status of 0.
+ self.retry(
+ self.run_command,
+ # None will cause default value to be used
+ attempts=1 if optional else None,
+ good_statuses=(0,),
+ error_level=WARNING if optional else FATAL,
+ error_message=("Could not install python package: failed all attempts."),
+ args=[
+ command,
+ ],
+ kwargs={
+ "error_list": VirtualenvErrorList,
+ "cwd": cwd,
+ "env": env,
+ # WARNING only since retry will raise final FATAL if all
+ # retry attempts are unsuccessful - and we only want
+ # an ERROR of FATAL if *no* retry attempt works
+ "error_level": WARNING,
+ },
+ )
+
+ def create_virtualenv(self, modules=(), requirements=()):
+ """
+ Create a python virtualenv.
+
+ This uses the copy of virtualenv that is vendored in mozharness.
+
+ virtualenv_modules can be a list of module names to install, e.g.
+
+ virtualenv_modules = ['module1', 'module2']
+
+ or it can be a heterogeneous list of modules names and dicts that
+ define a module by its name, url-or-path, and a list of its global
+ options.
+
+ virtualenv_modules = [
+ {
+ 'name': 'module1',
+ 'url': None,
+ 'global_options': ['--opt', '--without-gcc']
+ },
+ {
+ 'name': 'module2',
+ 'url': 'http://url/to/package',
+ 'global_options': ['--use-clang']
+ },
+ {
+ 'name': 'module3',
+ 'url': os.path.join('path', 'to', 'setup_py', 'dir')
+ 'global_options': []
+ },
+ 'module4'
+ ]
+
+ virtualenv_requirements is an optional list of pip requirements files to
+ use when invoking pip, e.g.,
+
+ virtualenv_requirements = [
+ '/path/to/requirements1.txt',
+ '/path/to/requirements2.txt'
+ ]
+ """
+ c = self.config
+ dirs = self.query_abs_dirs()
+ venv_path = self.query_virtualenv_path()
+ self.info("Creating virtualenv %s" % venv_path)
+
+ # Always use the virtualenv that is vendored since that is deterministic.
+ # base_work_dir is for when we're running with mozharness.zip, e.g. on
+ # test jobs
+ # abs_src_dir is for when we're running out of a checked out copy of
+ # the source code
+ vendor_search_dirs = [
+ os.path.join("{base_work_dir}", "mozharness"),
+ "{abs_src_dir}",
+ ]
+ if "abs_src_dir" not in dirs and "repo_path" in self.config:
+ dirs["abs_src_dir"] = os.path.normpath(self.config["repo_path"])
+ for d in vendor_search_dirs:
+ try:
+ src_dir = Path(d.format(**dirs))
+ except KeyError:
+ continue
+
+ pip_wheel_path = (
+ src_dir
+ / "third_party"
+ / "python"
+ / "_venv"
+ / "wheels"
+ / "pip-21.2.3-py3-none-any.whl"
+ )
+ setuptools_wheel_path = (
+ src_dir
+ / "third_party"
+ / "python"
+ / "_venv"
+ / "wheels"
+ / "setuptools-51.2.0-py3-none-any.whl"
+ )
+
+ if all(path.exists() for path in (pip_wheel_path, setuptools_wheel_path)):
+ break
+ else:
+ self.fatal("Can't find 'pip' and 'setuptools' wheels")
+
+ venv_python_bin = Path(self.query_python_path())
+
+ if venv_python_bin.exists():
+ self.info(
+ "Virtualenv %s appears to already exist; "
+ "skipping virtualenv creation." % self.query_python_path()
+ )
+ else:
+ self.run_command(
+ [sys.executable, "--version"],
+ )
+
+ # Temporary hack to get around a bug with venv in Python 3.7.3 in CI
+ # https://bugs.python.org/issue36441
+ if self._is_windows():
+ if sys.version_info[:3] == (3, 7, 3):
+ python_exe = Path(sys.executable)
+ debug_exe_dir = (
+ python_exe.parent / "lib" / "venv" / "scripts" / "nt"
+ )
+
+ if debug_exe_dir.exists():
+
+ for executable in {
+ "python.exe",
+ "python_d.exe",
+ "pythonw.exe",
+ "pythonw_d.exe",
+ }:
+ expected_python_debug_exe = debug_exe_dir / executable
+ if not expected_python_debug_exe.exists():
+ shutil.copy(
+ sys.executable, str(expected_python_debug_exe)
+ )
+
+ # We install "--without-pip" since the version of pip bundled with
+ # python is not consistent across versions/platforms and could be
+ # incompatible. We don't use "--upgrade" to get the newest pip
+ # since that would tie us to pypy being available, which we don't want.
+ self.mkdir_p(dirs["abs_work_dir"])
+ self.run_command(
+ [sys.executable, "-m", "venv", "--without-pip", venv_path],
+ cwd=dirs["abs_work_dir"],
+ error_list=VirtualenvErrorList,
+ halt_on_failure=True,
+ )
+
+ self._ensure_python_exe(venv_python_bin.parent)
+
+ # We can work around a bug on some versions of Python 3.6 on
+ # Windows by copying the 'pyvenv.cfg' of the current venv
+ # to the new venv. This will make the new venv reference
+ # the original Python install instead of the current venv,
+ # which resolves the issue. There shouldn't be any harm in
+ # always doing this, but we'll play it safe and restrict it
+ # to Windows Python 3.6 anyway.
+ if self._is_windows() and sys.version_info[:2] == (3, 6):
+ this_venv = Path(sys.executable).parent.parent
+ this_venv_config = this_venv / "pyvenv.cfg"
+ if this_venv_config.exists():
+ new_venv_config = Path(venv_path) / "pyvenv.cfg"
+ shutil.copyfile(str(this_venv_config), str(new_venv_config))
+
+ # Since we didn't install pip, we can use the pip wheel directly
+ # to install pip itself, and setuptools afterwards. Doing this "self
+ # install" is faster than letting venv install the bundled pip only
+ # to uninstall it when it installs this vendored pip wheel. We set the
+ pip_path = pip_wheel_path / "pip"
+
+ self.run_command(
+ [
+ str(venv_python_bin),
+ str(pip_path),
+ "install",
+ "--only-binary",
+ ":all:",
+ "--disable-pip-version-check",
+ str(pip_wheel_path),
+ str(setuptools_wheel_path),
+ ],
+ cwd=dirs["abs_work_dir"],
+ error_list=VirtualenvErrorList,
+ halt_on_failure=True,
+ )
+
+ self.info(self.platform_name())
+ if self.platform_name().startswith("macos"):
+ tmp_path = "{}/bin/bak".format(venv_path)
+ self.info(
+ "Copying venv python binaries to {} to clear for re-sign".format(
+ tmp_path
+ )
+ )
+ subprocess.call("mkdir -p {}".format(tmp_path), shell=True)
+ subprocess.call(
+ "cp {}/bin/python* {}/".format(venv_path, tmp_path), shell=True
+ )
+ self.info("Replacing venv python binaries with reset copies")
+ subprocess.call(
+ "mv -f {}/* {}/bin/".format(tmp_path, venv_path), shell=True
+ )
+ self.info(
+ "codesign -s - --preserve-metadata=identifier,entitlements,flags,runtime "
+ "-f {}/bin/*".format(venv_path)
+ )
+ subprocess.call(
+ "codesign -s - --preserve-metadata=identifier,entitlements,flags,runtime -f "
+ "{}/bin/python*".format(venv_path),
+ shell=True,
+ )
+
+ if not modules:
+ modules = c.get("virtualenv_modules", [])
+ if not requirements:
+ requirements = c.get("virtualenv_requirements", [])
+ if not modules and requirements:
+ self.install_module(requirements=requirements, install_method="pip")
+ for module in modules:
+ module_url = module
+ global_options = []
+ if isinstance(module, dict):
+ if module.get("name", None):
+ module_name = module["name"]
+ else:
+ self.fatal(
+ "Can't install module without module name: %s" % str(module)
+ )
+ module_url = module.get("url", None)
+ global_options = module.get("global_options", [])
+ else:
+ module_url = self.config.get("%s_url" % module, module_url)
+ module_name = module
+ install_method = "pip"
+ if module_name in ("pywin32",):
+ install_method = "easy_install"
+ self.install_module(
+ module=module_name,
+ module_url=module_url,
+ install_method=install_method,
+ requirements=requirements,
+ global_options=global_options,
+ )
+
+ for (
+ module,
+ url,
+ method,
+ requirements,
+ optional,
+ two_pass,
+ editable,
+ ) in self._virtualenv_modules:
+ if two_pass:
+ self.install_module(
+ module=module,
+ module_url=url,
+ install_method=method,
+ requirements=requirements or (),
+ optional=optional,
+ no_deps=True,
+ editable=editable,
+ )
+ self.install_module(
+ module=module,
+ module_url=url,
+ install_method=method,
+ requirements=requirements or (),
+ optional=optional,
+ editable=editable,
+ )
+
+ self.info("Done creating virtualenv %s." % venv_path)
+
+ self.package_versions(log_output=True)
+
+ def activate_virtualenv(self):
+ """Import the virtualenv's packages into this Python interpreter."""
+ venv_root_dir = Path(self.query_virtualenv_path())
+ venv_name = venv_root_dir.name
+ bin_path = Path(self.query_python_path())
+ bin_dir = bin_path.parent
+
+ if self._is_windows():
+ site_packages_dir = venv_root_dir / "Lib" / "site-packages"
+ else:
+ site_packages_dir = (
+ venv_root_dir
+ / "lib"
+ / "python{}.{}".format(*sys.version_info)
+ / "site-packages"
+ )
+
+ os.environ["PATH"] = os.pathsep.join(
+ [str(bin_dir)] + os.environ.get("PATH", "").split(os.pathsep)
+ )
+ os.environ["VIRTUAL_ENV"] = venv_name
+
+ prev_path = set(sys.path)
+
+ site.addsitedir(str(site_packages_dir.resolve()))
+
+ new_path = list(sys.path)
+
+ sys.path[:] = [p for p in new_path if p not in prev_path] + [
+ p for p in new_path if p in prev_path
+ ]
+
+ sys.real_prefix = sys.prefix
+ sys.prefix = str(venv_root_dir)
+ sys.executable = str(bin_path)
+
+ def _ensure_python_exe(self, python_exe_root: Path):
+ """On some machines in CI venv does not behave consistently. Sometimes
+ only a "python3" executable is created, but we expect "python". Since
+ they are functionally identical, we can just copy "python3" to "python"
+ (and vice-versa) to solve the problem.
+ """
+ python3_exe_path = python_exe_root / "python3"
+ python_exe_path = python_exe_root / "python"
+
+ if self._is_windows():
+ python3_exe_path = python3_exe_path.with_suffix(".exe")
+ python_exe_path = python_exe_path.with_suffix(".exe")
+
+ if python3_exe_path.exists() and not python_exe_path.exists():
+ shutil.copy(str(python3_exe_path), str(python_exe_path))
+
+ if python_exe_path.exists() and not python3_exe_path.exists():
+ shutil.copy(str(python_exe_path), str(python3_exe_path))
+
+ if not python_exe_path.exists() and not python3_exe_path.exists():
+ raise Exception(
+ f'Neither a "{python_exe_path.name}" or "{python3_exe_path.name}" '
+ f"were found. This means something unexpected happened during the "
+ f"virtual environment creation and we cannot proceed."
+ )
+
+
+# This is (sadly) a mixin for logging methods.
+class PerfherderResourceOptionsMixin(ScriptMixin):
+ def perfherder_resource_options(self):
+ """Obtain a list of extraOptions values to identify the env."""
+ opts = []
+
+ if "TASKCLUSTER_INSTANCE_TYPE" in os.environ:
+ # Include the instance type so results can be grouped.
+ opts.append("taskcluster-%s" % os.environ["TASKCLUSTER_INSTANCE_TYPE"])
+ else:
+ # We assume !taskcluster => buildbot.
+ instance = "unknown"
+
+ # Try to load EC2 instance type from metadata file. This file
+ # may not exist in many scenarios (including when inside a chroot).
+ # So treat it as optional.
+ try:
+ # This file should exist on Linux in EC2.
+ with open("/etc/instance_metadata.json", "rb") as fh:
+ im = json.load(fh)
+ instance = im.get("aws_instance_type", "unknown").encode("ascii")
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ self.info(
+ "instance_metadata.json not found; unable to "
+ "determine instance type"
+ )
+ except Exception:
+ self.warning(
+ "error reading instance_metadata: %s" % traceback.format_exc()
+ )
+
+ opts.append("buildbot-%s" % instance)
+
+ return opts
+
+
+class ResourceMonitoringMixin(PerfherderResourceOptionsMixin):
+ """Provides resource monitoring capabilities to scripts.
+
+ When this class is in the inheritance chain, resource usage stats of the
+ executing script will be recorded.
+
+ This class requires the VirtualenvMixin in order to install a package used
+ for recording resource usage.
+
+ While we would like to record resource usage for the entirety of a script,
+ since we require an external package, we can only record resource usage
+ after that package is installed (as part of creating the virtualenv).
+ That's just the way things have to be.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(ResourceMonitoringMixin, self).__init__(*args, **kwargs)
+
+ self.register_virtualenv_module("psutil>=5.9.0", method="pip", optional=True)
+ self.register_virtualenv_module(
+ "mozsystemmonitor==1.0.1", method="pip", optional=True
+ )
+ self.register_virtualenv_module("jsonschema==2.5.1", method="pip")
+ self._resource_monitor = None
+
+ # 2-tuple of (name, options) to assign Perfherder resource monitor
+ # metrics to. This needs to be assigned by a script in order for
+ # Perfherder metrics to be reported.
+ self.resource_monitor_perfherder_id = None
+
+ @PostScriptAction("create-virtualenv")
+ def _start_resource_monitoring(self, action, success=None):
+ self.activate_virtualenv()
+
+ # Resource Monitor requires Python 2.7, however it's currently optional.
+ # Remove when all machines have had their Python version updated (bug 711299).
+ if sys.version_info[:2] < (2, 7):
+ self.warning(
+ "Resource monitoring will not be enabled! Python 2.7+ required."
+ )
+ return
+
+ try:
+ from mozsystemmonitor.resourcemonitor import SystemResourceMonitor
+
+ self.info("Starting resource monitoring.")
+ self._resource_monitor = SystemResourceMonitor(poll_interval=1.0)
+ self._resource_monitor.start()
+ except Exception:
+ self.warning(
+ "Unable to start resource monitor: %s" % traceback.format_exc()
+ )
+
+ @PreScriptAction
+ def _resource_record_pre_action(self, action):
+ # Resource monitor isn't available until after create-virtualenv.
+ if not self._resource_monitor:
+ return
+
+ self._resource_monitor.begin_phase(action)
+
+ @PostScriptAction
+ def _resource_record_post_action(self, action, success=None):
+ # Resource monitor isn't available until after create-virtualenv.
+ if not self._resource_monitor:
+ return
+
+ self._resource_monitor.finish_phase(action)
+
+ @PostScriptRun
+ def _resource_record_post_run(self):
+ if not self._resource_monitor:
+ return
+
+ # This should never raise an exception. This is a workaround until
+ # mozsystemmonitor is fixed. See bug 895388.
+ try:
+ self._resource_monitor.stop()
+ self._log_resource_usage()
+
+ # Upload a JSON file containing the raw resource data.
+ try:
+ upload_dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not os.path.exists(upload_dir):
+ os.makedirs(upload_dir)
+ with open(os.path.join(upload_dir, "resource-usage.json"), "w") as fh:
+ json.dump(
+ self._resource_monitor.as_dict(), fh, sort_keys=True, indent=4
+ )
+ except (AttributeError, KeyError):
+ self.exception("could not upload resource usage JSON", level=WARNING)
+
+ except Exception:
+ self.warning(
+ "Exception when reporting resource usage: %s" % traceback.format_exc()
+ )
+
+ def _log_resource_usage(self):
+ # Delay import because not available until virtualenv is populated.
+ import jsonschema
+
+ rm = self._resource_monitor
+
+ if rm.start_time is None:
+ return
+
+ def resources(phase):
+ cpu_percent = rm.aggregate_cpu_percent(phase=phase, per_cpu=False)
+ cpu_times = rm.aggregate_cpu_times(phase=phase, per_cpu=False)
+ io = rm.aggregate_io(phase=phase)
+
+ swap_in = sum(m.swap.sin for m in rm.measurements)
+ swap_out = sum(m.swap.sout for m in rm.measurements)
+
+ return cpu_percent, cpu_times, io, (swap_in, swap_out)
+
+ def log_usage(prefix, duration, cpu_percent, cpu_times, io):
+ message = (
+ "{prefix} - Wall time: {duration:.0f}s; "
+ "CPU: {cpu_percent}; "
+ "Read bytes: {io_read_bytes}; Write bytes: {io_write_bytes}; "
+ "Read time: {io_read_time}; Write time: {io_write_time}"
+ )
+
+ # XXX Some test harnesses are complaining about a string being
+ # being fed into a 'f' formatter. This will help diagnose the
+ # issue.
+ if cpu_percent:
+ # pylint: disable=W1633
+ cpu_percent_str = str(round(cpu_percent)) + "%"
+ else:
+ cpu_percent_str = "Can't collect data"
+
+ try:
+ self.info(
+ message.format(
+ prefix=prefix,
+ duration=duration,
+ cpu_percent=cpu_percent_str,
+ io_read_bytes=io.read_bytes,
+ io_write_bytes=io.write_bytes,
+ io_read_time=io.read_time,
+ io_write_time=io.write_time,
+ )
+ )
+
+ except ValueError:
+ self.warning("Exception when formatting: %s" % traceback.format_exc())
+
+ cpu_percent, cpu_times, io, (swap_in, swap_out) = resources(None)
+ duration = rm.end_time - rm.start_time
+
+ # Write out Perfherder data if configured.
+ if self.resource_monitor_perfherder_id:
+ perfherder_name, perfherder_options = self.resource_monitor_perfherder_id
+
+ suites = []
+ overall = []
+
+ if cpu_percent:
+ overall.append(
+ {
+ "name": "cpu_percent",
+ "value": cpu_percent,
+ }
+ )
+
+ overall.extend(
+ [
+ {"name": "io_write_bytes", "value": io.write_bytes},
+ {"name": "io.read_bytes", "value": io.read_bytes},
+ {"name": "io_write_time", "value": io.write_time},
+ {"name": "io_read_time", "value": io.read_time},
+ ]
+ )
+
+ suites.append(
+ {
+ "name": "%s.overall" % perfherder_name,
+ "extraOptions": perfherder_options
+ + self.perfherder_resource_options(),
+ "subtests": overall,
+ }
+ )
+
+ for phase in rm.phases.keys():
+ phase_duration = rm.phases[phase][1] - rm.phases[phase][0]
+ subtests = [
+ {
+ "name": "time",
+ "value": phase_duration,
+ }
+ ]
+ cpu_percent = rm.aggregate_cpu_percent(phase=phase, per_cpu=False)
+ if cpu_percent is not None:
+ subtests.append(
+ {
+ "name": "cpu_percent",
+ "value": rm.aggregate_cpu_percent(
+ phase=phase, per_cpu=False
+ ),
+ }
+ )
+
+ # We don't report I/O during each step because measured I/O
+ # is system I/O and that I/O can be delayed (e.g. writes will
+ # buffer before being flushed and recorded in our metrics).
+ suites.append(
+ {
+ "name": "%s.%s" % (perfherder_name, phase),
+ "subtests": subtests,
+ }
+ )
+
+ data = {
+ "framework": {"name": "job_resource_usage"},
+ "suites": suites,
+ }
+
+ schema_path = os.path.join(
+ external_tools_path, "performance-artifact-schema.json"
+ )
+ with open(schema_path, "rb") as fh:
+ schema = json.load(fh)
+
+ # this will throw an exception that causes the job to fail if the
+ # perfherder data is not valid -- please don't change this
+ # behaviour, otherwise people will inadvertently break this
+ # functionality
+ self.info("Validating Perfherder data against %s" % schema_path)
+ jsonschema.validate(data, schema)
+ self.info("PERFHERDER_DATA: %s" % json.dumps(data))
+
+ log_usage("Total resource usage", duration, cpu_percent, cpu_times, io)
+
+ # Print special messages so usage shows up in Treeherder.
+ if cpu_percent:
+ self._tinderbox_print("CPU usage<br/>{:,.1f}%".format(cpu_percent))
+
+ self._tinderbox_print(
+ "I/O read bytes / time<br/>{:,} / {:,}".format(io.read_bytes, io.read_time)
+ )
+ self._tinderbox_print(
+ "I/O write bytes / time<br/>{:,} / {:,}".format(
+ io.write_bytes, io.write_time
+ )
+ )
+
+ # Print CPU components having >1%. "cpu_times" is a data structure
+ # whose attributes are measurements. Ideally we'd have an API that
+ # returned just the measurements as a dict or something.
+ cpu_attrs = []
+ for attr in sorted(dir(cpu_times)):
+ if attr.startswith("_"):
+ continue
+ if attr in ("count", "index"):
+ continue
+ cpu_attrs.append(attr)
+
+ cpu_total = sum(getattr(cpu_times, attr) for attr in cpu_attrs)
+
+ for attr in cpu_attrs:
+ value = getattr(cpu_times, attr)
+ # cpu_total can be 0.0. Guard against division by 0.
+ # pylint --py3k W1619
+ percent = value / cpu_total * 100.0 if cpu_total else 0.0
+
+ if percent > 1.00:
+ self._tinderbox_print(
+ "CPU {}<br/>{:,.1f} ({:,.1f}%)".format(attr, value, percent)
+ )
+
+ # Swap on Windows isn't reported by psutil.
+ if not self._is_windows():
+ self._tinderbox_print(
+ "Swap in / out<br/>{:,} / {:,}".format(swap_in, swap_out)
+ )
+
+ for phase in rm.phases.keys():
+ start_time, end_time = rm.phases[phase]
+ cpu_percent, cpu_times, io, swap = resources(phase)
+ log_usage(phase, end_time - start_time, cpu_percent, cpu_times, io)
+
+ def _tinderbox_print(self, message):
+ self.info("TinderboxPrint: %s" % message)
+
+
+# This needs to be inherited only if you have already inherited ScriptMixin
+class Python3Virtualenv(object):
+ """Support Python3.5+ virtualenv creation."""
+
+ py3_initialized_venv = False
+
+ def py3_venv_configuration(self, python_path, venv_path):
+ """We don't use __init__ to allow integrating with other mixins.
+
+ python_path - Path to Python 3 binary.
+ venv_path - Path to virtual environment to be created.
+ """
+ self.py3_initialized_venv = True
+ self.py3_python_path = os.path.abspath(python_path)
+ version = self.get_output_from_command(
+ [self.py3_python_path, "--version"], env=self.query_env()
+ ).split()[-1]
+ # Using -m venv is only used on 3.5+ versions
+ assert version > "3.5.0"
+ self.py3_venv_path = os.path.abspath(venv_path)
+ self.py3_pip_path = os.path.join(self.py3_path_to_executables(), "pip")
+
+ def py3_path_to_executables(self):
+ platform = self.platform_name()
+ if platform.startswith("win"):
+ return os.path.join(self.py3_venv_path, "Scripts")
+ else:
+ return os.path.join(self.py3_venv_path, "bin")
+
+ def py3_venv_initialized(func):
+ def call(self, *args, **kwargs):
+ if not self.py3_initialized_venv:
+ raise Exception(
+ "You need to call py3_venv_configuration() "
+ "before using this method."
+ )
+ func(self, *args, **kwargs)
+
+ return call
+
+ @py3_venv_initialized
+ def py3_create_venv(self):
+ """Create Python environment with python3 -m venv /path/to/venv."""
+ if os.path.exists(self.py3_venv_path):
+ self.info(
+ "Virtualenv %s appears to already exist; skipping "
+ "virtualenv creation." % self.py3_venv_path
+ )
+ else:
+ self.info("Running command...")
+ self.run_command(
+ "%s -m venv %s" % (self.py3_python_path, self.py3_venv_path),
+ error_list=VirtualenvErrorList,
+ halt_on_failure=True,
+ env=self.query_env(),
+ )
+
+ @py3_venv_initialized
+ def py3_install_modules(self, modules, use_mozharness_pip_config=True):
+ if not os.path.exists(self.py3_venv_path):
+ raise Exception("You need to call py3_create_venv() first.")
+
+ for m in modules:
+ cmd = [self.py3_pip_path, "install"]
+ if use_mozharness_pip_config:
+ cmd += self._mozharness_pip_args()
+ cmd += [m]
+ self.run_command(cmd, env=self.query_env())
+
+ def _mozharness_pip_args(self):
+ """We have information in Mozharness configs that apply to pip"""
+ c = self.config
+ pip_args = []
+ # To avoid timeouts with our pypi server, increase default timeout:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1007230#c802
+ pip_args += ["--timeout", str(c.get("pip_timeout", 120))]
+
+ if c.get("find_links") and not c["pip_index"]:
+ pip_args += ["--no-index"]
+
+ # Add --find-links pages to look at. Add --trusted-host automatically if
+ # the host isn't secure. This allows modern versions of pip to connect
+ # without requiring an override.
+ trusted_hosts = set()
+ for link in c.get("find_links", []):
+ parsed = urlparse.urlparse(link)
+
+ try:
+ socket.gethostbyname(parsed.hostname)
+ except socket.gaierror as e:
+ self.info("error resolving %s (ignoring): %s" % (parsed.hostname, e))
+ continue
+
+ pip_args += ["--find-links", link]
+ if parsed.scheme != "https":
+ trusted_hosts.add(parsed.hostname)
+
+ for host in sorted(trusted_hosts):
+ pip_args += ["--trusted-host", host]
+
+ return pip_args
+
+ @py3_venv_initialized
+ def py3_install_requirement_files(
+ self, requirements, pip_args=[], use_mozharness_pip_config=True
+ ):
+ """
+ requirements - You can specify multiple requirements paths
+ """
+ cmd = [self.py3_pip_path, "install"]
+ cmd += pip_args
+
+ if use_mozharness_pip_config:
+ cmd += self._mozharness_pip_args()
+
+ for requirement_path in requirements:
+ cmd += ["-r", requirement_path]
+
+ self.run_command(cmd, env=self.query_env())
+
+
+# __main__ {{{1
+
+if __name__ == "__main__":
+ """TODO: unit tests."""
+ pass
diff --git a/testing/mozharness/mozharness/base/script.py b/testing/mozharness/mozharness/base/script.py
new file mode 100644
index 0000000000..7e6ac7c92f
--- /dev/null
+++ b/testing/mozharness/mozharness/base/script.py
@@ -0,0 +1,2513 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic script objects.
+
+script.py, along with config.py and log.py, represents the core of
+mozharness.
+"""
+
+import codecs
+import datetime
+import errno
+import fnmatch
+import functools
+import gzip
+import hashlib
+import inspect
+import itertools
+import os
+import platform
+import pprint
+import re
+import shutil
+import socket
+import ssl
+import subprocess
+import sys
+import tarfile
+import time
+import traceback
+import zipfile
+import zlib
+from contextlib import contextmanager
+from io import BytesIO
+
+import mozinfo
+import six
+from mozharness.base.config import BaseConfig
+from mozharness.base.log import (
+ DEBUG,
+ ERROR,
+ FATAL,
+ INFO,
+ WARNING,
+ ConsoleLogger,
+ LogMixin,
+ MultiFileLogger,
+ OutputParser,
+ SimpleFileLogger,
+)
+from mozprocess import ProcessHandler
+from six import binary_type
+
+try:
+ import httplib
+except ImportError:
+ import http.client as httplib
+try:
+ import simplejson as json
+except ImportError:
+ import json
+try:
+ from urllib2 import Request, quote, urlopen
+except ImportError:
+ from urllib.request import Request, quote, urlopen
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+if os.name == "nt":
+ import locale
+
+ try:
+ import win32api
+ import win32file
+
+ PYWIN32 = True
+ except ImportError:
+ PYWIN32 = False
+
+try:
+ from urllib2 import HTTPError, URLError
+except ImportError:
+ from urllib.error import HTTPError, URLError
+
+
+class ContentLengthMismatch(Exception):
+ pass
+
+
+def platform_name():
+ pm = PlatformMixin()
+
+ if pm._is_linux() and pm._is_64_bit():
+ return "linux64"
+ elif pm._is_linux() and not pm._is_64_bit():
+ return "linux"
+ elif pm._is_darwin():
+ return "macosx"
+ elif pm._is_windows() and pm._is_64_bit():
+ return "win64"
+ elif pm._is_windows() and not pm._is_64_bit():
+ return "win32"
+ else:
+ return None
+
+
+class PlatformMixin(object):
+ def _is_windows(self):
+ """check if the current operating system is Windows.
+
+ Returns:
+ bool: True if the current platform is Windows, False otherwise
+ """
+ system = platform.system()
+ if system in ("Windows", "Microsoft"):
+ return True
+ if system.startswith("CYGWIN"):
+ return True
+ if os.name == "nt":
+ return True
+
+ def _is_darwin(self):
+ """check if the current operating system is Darwin.
+
+ Returns:
+ bool: True if the current platform is Darwin, False otherwise
+ """
+ if platform.system() in ("Darwin"):
+ return True
+ if sys.platform.startswith("darwin"):
+ return True
+
+ def _is_linux(self):
+ """check if the current operating system is a Linux distribution.
+
+ Returns:
+ bool: True if the current platform is a Linux distro, False otherwise
+ """
+ if platform.system() in ("Linux"):
+ return True
+ if sys.platform.startswith("linux"):
+ return True
+
+ def _is_debian(self):
+ """check if the current operating system is explicitly Debian.
+ This intentionally doesn't count Debian derivatives like Ubuntu.
+
+ Returns:
+ bool: True if the current platform is debian, False otherwise
+ """
+ if not self._is_linux():
+ return False
+ self.info(mozinfo.linux_distro)
+ re_debian_distro = re.compile("debian")
+ return re_debian_distro.match(mozinfo.linux_distro) is not None
+
+ def _is_redhat_based(self):
+ """check if the current operating system is a Redhat derived Linux distribution.
+
+ Returns:
+ bool: True if the current platform is a Redhat Linux distro, False otherwise
+ """
+ if not self._is_linux():
+ return False
+ re_redhat_distro = re.compile("Redhat|Fedora|CentOS|Oracle")
+ return re_redhat_distro.match(mozinfo.linux_distro) is not None
+
+ def _is_64_bit(self):
+ if self._is_darwin():
+ # osx is a special snowflake and to ensure the arch, it is better to use the following
+ return (
+ sys.maxsize > 2 ** 32
+ ) # context: https://docs.python.org/2/library/platform.html
+ else:
+ # Using machine() gives you the architecture of the host rather
+ # than the build type of the Python binary
+ return "64" in platform.machine()
+
+
+# ScriptMixin {{{1
+class ScriptMixin(PlatformMixin):
+ """This mixin contains simple filesystem commands and the like.
+
+ It also contains some very special but very complex methods that,
+ together with logging and config, provide the base for all scripts
+ in this harness.
+
+ WARNING !!!
+ This class depends entirely on `LogMixin` methods in such a way that it will
+ only works if a class inherits from both `ScriptMixin` and `LogMixin`
+ simultaneously.
+
+ Depends on self.config of some sort.
+
+ Attributes:
+ env (dict): a mapping object representing the string environment.
+ script_obj (ScriptMixin): reference to a ScriptMixin instance.
+ """
+
+ env = None
+ script_obj = None
+ ssl_context = None
+
+ def query_filesize(self, file_path):
+ self.info("Determining filesize for %s" % file_path)
+ length = os.path.getsize(file_path)
+ self.info(" %s" % str(length))
+ return length
+
+ # TODO this should be parallelized with the to-be-written BaseHelper!
+ def query_sha512sum(self, file_path):
+ self.info("Determining sha512sum for %s" % file_path)
+ m = hashlib.sha512()
+ contents = self.read_from_file(file_path, verbose=False, open_mode="rb")
+ m.update(contents)
+ sha512 = m.hexdigest()
+ self.info(" %s" % sha512)
+ return sha512
+
+ def platform_name(self):
+ """Return the platform name on which the script is running on.
+ Returns:
+ None: for failure to determine the platform.
+ str: The name of the platform (e.g. linux64)
+ """
+ return platform_name()
+
+ # Simple filesystem commands {{{2
+ def mkdir_p(self, path, error_level=ERROR):
+ """Create a directory if it doesn't exists.
+ This method also logs the creation, error or current existence of the
+ directory to be created.
+
+ Args:
+ path (str): path of the directory to be created.
+ error_level (str): log level name to be used in case of error.
+
+ Returns:
+ None: for sucess.
+ int: -1 on error
+ """
+
+ if not os.path.exists(path):
+ self.info("mkdir: %s" % path)
+ try:
+ os.makedirs(path)
+ except OSError:
+ self.log("Can't create directory %s!" % path, level=error_level)
+ return -1
+ else:
+ self.debug("mkdir_p: %s Already exists." % path)
+
+ def rmtree(self, path, log_level=INFO, error_level=ERROR, exit_code=-1):
+ """Delete an entire directory tree and log its result.
+ This method also logs the platform rmtree function, its retries, errors,
+ and current existence of the directory.
+
+ Args:
+ path (str): path to the directory tree root to remove.
+ log_level (str, optional): log level name to for this operation. Defaults
+ to `INFO`.
+ error_level (str, optional): log level name to use in case of error.
+ Defaults to `ERROR`.
+ exit_code (int, optional): useless parameter, not use here.
+ Defaults to -1
+
+ Returns:
+ None: for success
+ """
+
+ self.log("rmtree: %s" % path, level=log_level)
+ error_message = "Unable to remove %s!" % path
+ if self._is_windows():
+ # Call _rmtree_windows() directly, since even checking
+ # os.path.exists(path) will hang if path is longer than MAX_PATH.
+ self.info("Using _rmtree_windows ...")
+ return self.retry(
+ self._rmtree_windows,
+ error_level=error_level,
+ error_message=error_message,
+ args=(path,),
+ log_level=log_level,
+ )
+ if os.path.exists(path):
+ if os.path.isdir(path):
+ return self.retry(
+ shutil.rmtree,
+ error_level=error_level,
+ error_message=error_message,
+ retry_exceptions=(OSError,),
+ args=(path,),
+ log_level=log_level,
+ )
+ else:
+ return self.retry(
+ os.remove,
+ error_level=error_level,
+ error_message=error_message,
+ retry_exceptions=(OSError,),
+ args=(path,),
+ log_level=log_level,
+ )
+ else:
+ self.debug("%s doesn't exist." % path)
+
+ def query_msys_path(self, path):
+ """replaces the Windows harddrive letter path style with a linux
+ path style, e.g. C:// --> /C/
+ Note: method, not used in any script.
+
+ Args:
+ path (str?): path to convert to the linux path style.
+ Returns:
+ str: in case `path` is a string. The result is the path with the new notation.
+ type(path): `path` itself is returned in case `path` is not str type.
+ """
+ if not isinstance(path, six.string_types):
+ return path
+ path = path.replace("\\", "/")
+
+ def repl(m):
+ return "/%s/" % m.group(1)
+
+ path = re.sub(r"""^([a-zA-Z]):/""", repl, path)
+ return path
+
+ def _rmtree_windows(self, path):
+ """Windows-specific rmtree that handles path lengths longer than MAX_PATH.
+ Ported from clobberer.py.
+
+ Args:
+ path (str): directory path to remove.
+
+ Returns:
+ None: if the path doesn't exists.
+ int: the return number of calling `self.run_command`
+ int: in case the path specified is not a directory but a file.
+ 0 on success, non-zero on error. Note: The returned value
+ is the result of calling `win32file.DeleteFile`
+ """
+
+ assert self._is_windows()
+ path = os.path.realpath(path)
+ full_path = "\\\\?\\" + path
+ if not os.path.exists(full_path):
+ return
+ if not PYWIN32:
+ if not os.path.isdir(path):
+ return self.run_command('del /F /Q "%s"' % path)
+ else:
+ return self.run_command('rmdir /S /Q "%s"' % path)
+ # Make sure directory is writable
+ win32file.SetFileAttributesW("\\\\?\\" + path, win32file.FILE_ATTRIBUTE_NORMAL)
+ # Since we call rmtree() with a file, sometimes
+ if not os.path.isdir("\\\\?\\" + path):
+ return win32file.DeleteFile("\\\\?\\" + path)
+
+ for ffrec in win32api.FindFiles("\\\\?\\" + path + "\\*.*"):
+ file_attr = ffrec[0]
+ name = ffrec[8]
+ if name == "." or name == "..":
+ continue
+ full_name = os.path.join(path, name)
+
+ if file_attr & win32file.FILE_ATTRIBUTE_DIRECTORY:
+ self._rmtree_windows(full_name)
+ else:
+ try:
+ win32file.SetFileAttributesW(
+ "\\\\?\\" + full_name, win32file.FILE_ATTRIBUTE_NORMAL
+ )
+ win32file.DeleteFile("\\\\?\\" + full_name)
+ except Exception:
+ # DeleteFile fails on long paths, del /f /q works just fine
+ self.run_command('del /F /Q "%s"' % full_name)
+
+ win32file.RemoveDirectory("\\\\?\\" + path)
+
+ def get_filename_from_url(self, url):
+ """parse a filename base on an url.
+
+ Args:
+ url (str): url to parse for the filename
+
+ Returns:
+ str: filename parsed from the url, or `netloc` network location part
+ of the url.
+ """
+
+ parsed = urlparse.urlsplit(url.rstrip("/"))
+ if parsed.path != "":
+ return parsed.path.rsplit("/", 1)[-1]
+ else:
+ return parsed.netloc
+
+ def _urlopen(self, url, **kwargs):
+ """open the url `url` using `urllib2`.`
+ This method can be overwritten to extend its complexity
+
+ Args:
+ url (str | urllib.request.Request): url to open
+ kwargs: Arbitrary keyword arguments passed to the `urllib.request.urlopen` function.
+
+ Returns:
+ file-like: file-like object with additional methods as defined in
+ `urllib.request.urlopen`_.
+ None: None may be returned if no handler handles the request.
+
+ Raises:
+ urllib2.URLError: on errors
+
+ .. urillib.request.urlopen:
+ https://docs.python.org/2/library/urllib2.html#urllib2.urlopen
+ """
+ # http://bugs.python.org/issue13359 - urllib2 does not automatically quote the URL
+ url_quoted = quote(url, safe="%/:=&?~#+!$,;'@()*[]|")
+ # windows certificates need to be refreshed (https://bugs.python.org/issue36011)
+ if self.platform_name() in ("win64",) and platform.architecture()[0] in (
+ "x64",
+ ):
+ if self.ssl_context is None:
+ self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+ self.ssl_context.load_default_certs()
+ return urlopen(url_quoted, context=self.ssl_context, **kwargs)
+ else:
+ return urlopen(url_quoted, **kwargs)
+
+ def fetch_url_into_memory(self, url):
+ """Downloads a file from a url into memory instead of disk.
+
+ Args:
+ url (str): URL path where the file to be downloaded is located.
+
+ Raises:
+ IOError: When the url points to a file on disk and cannot be found
+ ContentLengthMismatch: When the length of the retrieved content does not match the
+ Content-Length response header.
+ ValueError: When the scheme of a url is not what is expected.
+
+ Returns:
+ BytesIO: contents of url
+ """
+ self.info("Fetch {} into memory".format(url))
+ parsed_url = urlparse.urlparse(url)
+
+ if parsed_url.scheme in ("", "file"):
+ path = parsed_url.path
+ if not os.path.isfile(path):
+ raise IOError("Could not find file to extract: {}".format(url))
+
+ content_length = os.stat(path).st_size
+
+ # In case we're referrencing a file without file://
+ if parsed_url.scheme == "":
+ url = "file://%s" % os.path.abspath(url)
+ parsed_url = urlparse.urlparse(url)
+
+ request = Request(url)
+ # When calling fetch_url_into_memory() you should retry when we raise
+ # one of these exceptions:
+ # * Bug 1300663 - HTTPError: HTTP Error 404: Not Found
+ # * Bug 1300413 - HTTPError: HTTP Error 500: Internal Server Error
+ # * Bug 1300943 - HTTPError: HTTP Error 503: Service Unavailable
+ # * Bug 1300953 - URLError: <urlopen error [Errno -2] Name or service not known>
+ # * Bug 1301594 - URLError: <urlopen error [Errno 10054] An existing connection was ...
+ # * Bug 1301597 - URLError: <urlopen error [Errno 8] _ssl.c:504: EOF occurred in ...
+ # * Bug 1301855 - URLError: <urlopen error [Errno 60] Operation timed out>
+ # * Bug 1302237 - URLError: <urlopen error [Errno 104] Connection reset by peer>
+ # * Bug 1301807 - BadStatusLine: ''
+ #
+ # Bug 1309912 - Adding timeout in hopes to solve blocking on response.read() (bug 1300413)
+ response = urlopen(request, timeout=30)
+
+ if parsed_url.scheme in ("http", "https"):
+ content_length = int(response.headers.get("Content-Length"))
+
+ response_body = response.read()
+ response_body_size = len(response_body)
+
+ self.info("Content-Length response header: {}".format(content_length))
+ self.info("Bytes received: {}".format(response_body_size))
+
+ if response_body_size != content_length:
+ raise ContentLengthMismatch(
+ "The retrieved Content-Length header declares a body length "
+ "of {} bytes, while we actually retrieved {} bytes".format(
+ content_length, response_body_size
+ )
+ )
+
+ if response.info().get("Content-Encoding") == "gzip":
+ self.info('Content-Encoding is "gzip", so decompressing response body')
+ # See http://www.zlib.net/manual.html#Advanced
+ # section "ZEXTERN int ZEXPORT inflateInit2 OF....":
+ # Add 32 to windowBits to enable zlib and gzip decoding with automatic
+ # header detection, or add 16 to decode only the gzip format (the zlib
+ # format will return a Z_DATA_ERROR).
+ # Adding 16 since we only wish to support gzip encoding.
+ file_contents = zlib.decompress(response_body, zlib.MAX_WBITS | 16)
+ else:
+ file_contents = response_body
+
+ # Use BytesIO instead of StringIO
+ # http://stackoverflow.com/questions/34162017/unzip-buffer-with-python/34162395#34162395
+ return BytesIO(file_contents)
+
+ def _download_file(self, url, file_name):
+ """Helper function for download_file()
+ Additionaly this function logs all exceptions as warnings before
+ re-raising them
+
+ Args:
+ url (str): string containing the URL with the file location
+ file_name (str): name of the file where the downloaded file
+ is written.
+
+ Returns:
+ str: filename of the written file on disk
+
+ Raises:
+ urllib2.URLError: on incomplete download.
+ urllib2.HTTPError: on Http error code
+ socket.timeout: on connection timeout
+ socket.error: on socket error
+ """
+ # If our URLs look like files, prefix them with file:// so they can
+ # be loaded like URLs.
+ if not (url.startswith("http") or url.startswith("file://")):
+ if not os.path.isfile(url):
+ self.fatal("The file %s does not exist" % url)
+ url = "file://%s" % os.path.abspath(url)
+
+ try:
+ f_length = None
+ f = self._urlopen(url, timeout=30)
+
+ if f.info().get("content-length") is not None:
+ f_length = int(f.info()["content-length"])
+ got_length = 0
+ if f.info().get("Content-Encoding") == "gzip":
+ # Note, we'll download the full compressed content into its own
+ # file, since that allows the gzip library to seek through it.
+ # Once downloaded, we'll decompress it into the real target
+ # file, and delete the compressed version.
+ local_file = open(file_name + ".gz", "wb")
+ else:
+ local_file = open(file_name, "wb")
+ while True:
+ block = f.read(1024 ** 2)
+ if not block:
+ if f_length is not None and got_length != f_length:
+ raise URLError(
+ "Download incomplete; content-length was %d, "
+ "but only received %d" % (f_length, got_length)
+ )
+ break
+ local_file.write(block)
+ if f_length is not None:
+ got_length += len(block)
+ local_file.close()
+ if f.info().get("Content-Encoding") == "gzip":
+ # Decompress file into target location, then remove compressed version
+ with open(file_name, "wb") as f_out:
+ # On some execution paths, this could be called with python 2.6
+ # whereby gzip.open(...) cannot be used with a 'with' statement.
+ # So let's do this the python 2.6 way...
+ try:
+ f_in = gzip.open(file_name + ".gz", "rb")
+ shutil.copyfileobj(f_in, f_out)
+ finally:
+ f_in.close()
+ os.remove(file_name + ".gz")
+ return file_name
+ except HTTPError as e:
+ self.warning(
+ "Server returned status %s %s for %s" % (str(e.code), str(e), url)
+ )
+ raise
+ except URLError as e:
+ self.warning("URL Error: %s" % url)
+
+ # Failures due to missing local files won't benefit from retry.
+ # Raise the original OSError.
+ if isinstance(e.args[0], OSError) and e.args[0].errno == errno.ENOENT:
+ raise e.args[0]
+
+ raise
+ except socket.timeout as e:
+ self.warning("Timed out accessing %s: %s" % (url, str(e)))
+ raise
+ except socket.error as e:
+ self.warning("Socket error when accessing %s: %s" % (url, str(e)))
+ raise
+
+ def _retry_download(self, url, error_level, file_name=None, retry_config=None):
+ """Helper method to retry download methods.
+
+ This method calls `self.retry` on `self._download_file` using the passed
+ parameters if a file_name is specified. If no file is specified, we will
+ instead call `self._urlopen`, which grabs the contents of a url but does
+ not create a file on disk.
+
+ Args:
+ url (str): URL path where the file is located.
+ file_name (str): file_name where the file will be written to.
+ error_level (str): log level to use in case an error occurs.
+ retry_config (dict, optional): key-value pairs to be passed to
+ `self.retry`. Defaults to `None`
+
+ Returns:
+ str: `self._download_file` return value is returned
+ unknown: `self.retry` `failure_status` is returned on failure, which
+ defaults to -1
+ """
+ retry_args = dict(
+ failure_status=None,
+ retry_exceptions=(
+ HTTPError,
+ URLError,
+ httplib.HTTPException,
+ socket.timeout,
+ socket.error,
+ ),
+ error_message="Can't download from %s to %s!" % (url, file_name),
+ error_level=error_level,
+ )
+
+ if retry_config:
+ retry_args.update(retry_config)
+
+ download_func = self._urlopen
+ kwargs = {"url": url}
+ if file_name:
+ download_func = self._download_file
+ kwargs = {"url": url, "file_name": file_name}
+
+ return self.retry(download_func, kwargs=kwargs, **retry_args)
+
+ def _filter_entries(self, namelist, extract_dirs):
+ """Filter entries of the archive based on the specified list of to extract dirs."""
+ filter_partial = functools.partial(fnmatch.filter, namelist)
+ entries = itertools.chain(*map(filter_partial, extract_dirs or ["*"]))
+
+ for entry in entries:
+ yield entry
+
+ def unzip(self, compressed_file, extract_to, extract_dirs="*", verbose=False):
+ """This method allows to extract a zip file without writing to disk first.
+
+ Args:
+ compressed_file (object): File-like object with the contents of a compressed zip file.
+ extract_to (str): where to extract the compressed file.
+ extract_dirs (list, optional): directories inside the archive file to extract.
+ Defaults to '*'.
+ verbose (bool, optional): whether or not extracted content should be displayed.
+ Defaults to False.
+
+ Raises:
+ zipfile.BadZipfile: on contents of zipfile being invalid
+ """
+ with zipfile.ZipFile(compressed_file) as bundle:
+ entries = self._filter_entries(bundle.namelist(), extract_dirs)
+
+ for entry in entries:
+ if verbose:
+ self.info(" {}".format(entry))
+
+ # Exception to be retried:
+ # Bug 1301645 - BadZipfile: Bad CRC-32 for file ...
+ # http://stackoverflow.com/questions/5624669/strange-badzipfile-bad-crc-32-problem/5626098#5626098
+ # Bug 1301802 - error: Error -3 while decompressing: invalid stored block lengths
+ bundle.extract(entry, path=extract_to)
+
+ # ZipFile doesn't preserve permissions during extraction:
+ # http://bugs.python.org/issue15795
+ fname = os.path.realpath(os.path.join(extract_to, entry))
+ try:
+ # getinfo() can raise KeyError
+ mode = bundle.getinfo(entry).external_attr >> 16 & 0x1FF
+ # Only set permissions if attributes are available. Otherwise all
+ # permissions will be removed eg. on Windows.
+ if mode:
+ os.chmod(fname, mode)
+
+ except KeyError:
+ self.warning("{} was not found in the zip file".format(entry))
+
+ def deflate(self, compressed_file, mode, extract_to=".", *args, **kwargs):
+ """This method allows to extract a compressed file from a tar, tar.bz2 and tar.gz files.
+
+ Args:
+ compressed_file (object): File-like object with the contents of a compressed file.
+ mode (str): string of the form 'filemode[:compression]' (e.g. 'r:gz' or 'r:bz2')
+ extract_to (str, optional): where to extract the compressed file.
+ """
+ t = tarfile.open(fileobj=compressed_file, mode=mode)
+ t.extractall(path=extract_to)
+
+ def download_unpack(self, url, extract_to=".", extract_dirs="*", verbose=False):
+ """Generic method to download and extract a compressed file without writing it
+ to disk first.
+
+ Args:
+ url (str): URL where the file to be downloaded is located.
+ extract_to (str, optional): directory where the downloaded file will
+ be extracted to.
+ extract_dirs (list, optional): directories inside the archive to extract.
+ Defaults to `*`. It currently only applies to zip files.
+ verbose (bool, optional): whether or not extracted content should be displayed.
+ Defaults to False.
+
+ """
+
+ def _determine_extraction_method_and_kwargs(url):
+ EXTENSION_TO_MIMETYPE = {
+ "bz2": "application/x-bzip2",
+ "gz": "application/x-gzip",
+ "tar": "application/x-tar",
+ "zip": "application/zip",
+ }
+ MIMETYPES = {
+ "application/x-bzip2": {
+ "function": self.deflate,
+ "kwargs": {"mode": "r:bz2"},
+ },
+ "application/x-gzip": {
+ "function": self.deflate,
+ "kwargs": {"mode": "r:gz"},
+ },
+ "application/x-tar": {
+ "function": self.deflate,
+ "kwargs": {"mode": "r"},
+ },
+ "application/zip": {
+ "function": self.unzip,
+ },
+ "application/x-zip-compressed": {
+ "function": self.unzip,
+ },
+ }
+
+ filename = url.split("/")[-1]
+ # XXX: bz2/gz instead of tar.{bz2/gz}
+ extension = filename[filename.rfind(".") + 1 :]
+ mimetype = EXTENSION_TO_MIMETYPE[extension]
+ self.debug("Mimetype: {}".format(mimetype))
+
+ function = MIMETYPES[mimetype]["function"]
+ kwargs = {
+ "compressed_file": compressed_file,
+ "extract_to": extract_to,
+ "extract_dirs": extract_dirs,
+ "verbose": verbose,
+ }
+ kwargs.update(MIMETYPES[mimetype].get("kwargs", {}))
+
+ return function, kwargs
+
+ # Many scripts overwrite this method and set extract_dirs to None
+ extract_dirs = "*" if extract_dirs is None else extract_dirs
+ self.info(
+ "Downloading and extracting to {} these dirs {} from {}".format(
+ extract_to,
+ ", ".join(extract_dirs),
+ url,
+ )
+ )
+
+ # 1) Let's fetch the file
+ retry_args = dict(
+ retry_exceptions=(
+ HTTPError,
+ URLError,
+ httplib.HTTPException,
+ socket.timeout,
+ socket.error,
+ ContentLengthMismatch,
+ ),
+ sleeptime=30,
+ attempts=5,
+ error_message="Can't download from {}".format(url),
+ error_level=FATAL,
+ )
+ compressed_file = self.retry(
+ self.fetch_url_into_memory, kwargs={"url": url}, **retry_args
+ )
+
+ # 2) We're guaranteed to have download the file with error_level=FATAL
+ # Let's unpack the file
+ function, kwargs = _determine_extraction_method_and_kwargs(url)
+ try:
+ function(**kwargs)
+ except zipfile.BadZipfile:
+ # Dump the exception and exit
+ self.exception(level=FATAL)
+
+ def load_json_url(self, url, error_level=None, *args, **kwargs):
+ """Returns a json object from a url (it retries)."""
+ contents = self._retry_download(
+ url=url, error_level=error_level, *args, **kwargs
+ )
+ return json.loads(contents.read())
+
+ # http://www.techniqal.com/blog/2008/07/31/python-file-read-write-with-urllib2/
+ # TODO thinking about creating a transfer object.
+ def download_file(
+ self,
+ url,
+ file_name=None,
+ parent_dir=None,
+ create_parent_dir=True,
+ error_level=ERROR,
+ exit_code=3,
+ retry_config=None,
+ ):
+ """Python wget.
+ Download the filename at `url` into `file_name` and put it on `parent_dir`.
+ On error log with the specified `error_level`, on fatal exit with `exit_code`.
+ Execute all the above based on `retry_config` parameter.
+
+ Args:
+ url (str): URL path where the file to be downloaded is located.
+ file_name (str, optional): file_name where the file will be written to.
+ Defaults to urls' filename.
+ parent_dir (str, optional): directory where the downloaded file will
+ be written to. Defaults to current working
+ directory
+ create_parent_dir (bool, optional): create the parent directory if it
+ doesn't exist. Defaults to `True`
+ error_level (str, optional): log level to use in case an error occurs.
+ Defaults to `ERROR`
+ retry_config (dict, optional): key-value pairs to be passed to
+ `self.retry`. Defaults to `None`
+
+ Returns:
+ str: filename where the downloaded file was written to.
+ unknown: on failure, `failure_status` is returned.
+ """
+ if not file_name:
+ try:
+ file_name = self.get_filename_from_url(url)
+ except AttributeError:
+ self.log(
+ "Unable to get filename from %s; bad url?" % url,
+ level=error_level,
+ exit_code=exit_code,
+ )
+ return
+ if parent_dir:
+ file_name = os.path.join(parent_dir, file_name)
+ if create_parent_dir:
+ self.mkdir_p(parent_dir, error_level=error_level)
+ self.info("Downloading %s to %s" % (url, file_name))
+ status = self._retry_download(
+ url=url,
+ error_level=error_level,
+ file_name=file_name,
+ retry_config=retry_config,
+ )
+ if status == file_name:
+ self.info("Downloaded %d bytes." % os.path.getsize(file_name))
+ return status
+
+ def move(self, src, dest, log_level=INFO, error_level=ERROR, exit_code=-1):
+ """recursively move a file or directory (src) to another location (dest).
+
+ Args:
+ src (str): file or directory path to move.
+ dest (str): file or directory path where to move the content to.
+ log_level (str): log level to use for normal operation. Defaults to
+ `INFO`
+ error_level (str): log level to use on error. Defaults to `ERROR`
+
+ Returns:
+ int: 0 on success. -1 on error.
+ """
+ self.log("Moving %s to %s" % (src, dest), level=log_level)
+ try:
+ shutil.move(src, dest)
+ # http://docs.python.org/tutorial/errors.html
+ except IOError as e:
+ self.log("IO error: %s" % str(e), level=error_level, exit_code=exit_code)
+ return -1
+ except shutil.Error as e:
+ # ERROR level ends up reporting the failure to treeherder &
+ # pollutes the failure summary list.
+ self.log("shutil error: %s" % str(e), level=WARNING, exit_code=exit_code)
+ return -1
+ return 0
+
+ def chmod(self, path, mode):
+ """change `path` mode to `mode`.
+
+ Args:
+ path (str): path whose mode will be modified.
+ mode (hex): one of the values defined at `stat`_
+
+ .. _stat:
+ https://docs.python.org/2/library/os.html#os.chmod
+ """
+
+ self.info("Chmoding %s to %s" % (path, str(oct(mode))))
+ os.chmod(path, mode)
+
+ def copyfile(
+ self,
+ src,
+ dest,
+ log_level=INFO,
+ error_level=ERROR,
+ copystat=False,
+ compress=False,
+ ):
+ """copy or compress `src` into `dest`.
+
+ Args:
+ src (str): filepath to copy.
+ dest (str): filepath where to move the content to.
+ log_level (str, optional): log level to use for normal operation. Defaults to
+ `INFO`
+ error_level (str, optional): log level to use on error. Defaults to `ERROR`
+ copystat (bool, optional): whether or not to copy the files metadata.
+ Defaults to `False`.
+ compress (bool, optional): whether or not to compress the destination file.
+ Defaults to `False`.
+
+ Returns:
+ int: -1 on error
+ None: on success
+ """
+
+ if compress:
+ self.log("Compressing %s to %s" % (src, dest), level=log_level)
+ try:
+ infile = open(src, "rb")
+ outfile = gzip.open(dest, "wb")
+ outfile.writelines(infile)
+ outfile.close()
+ infile.close()
+ except IOError as e:
+ self.log(
+ "Can't compress %s to %s: %s!" % (src, dest, str(e)),
+ level=error_level,
+ )
+ return -1
+ else:
+ self.log("Copying %s to %s" % (src, dest), level=log_level)
+ try:
+ shutil.copyfile(src, dest)
+ except (IOError, shutil.Error) as e:
+ self.log(
+ "Can't copy %s to %s: %s!" % (src, dest, str(e)), level=error_level
+ )
+ return -1
+
+ if copystat:
+ try:
+ shutil.copystat(src, dest)
+ except (IOError, shutil.Error) as e:
+ self.log(
+ "Can't copy attributes of %s to %s: %s!" % (src, dest, str(e)),
+ level=error_level,
+ )
+ return -1
+
+ def copytree(
+ self, src, dest, overwrite="no_overwrite", log_level=INFO, error_level=ERROR
+ ):
+ """An implementation of `shutil.copytree` that allows for `dest` to exist
+ and implements different overwrite levels:
+ - 'no_overwrite' will keep all(any) existing files in destination tree
+ - 'overwrite_if_exists' will only overwrite destination paths that have
+ the same path names relative to the root of the
+ src and destination tree
+ - 'clobber' will replace the whole destination tree(clobber) if it exists
+
+ Args:
+ src (str): directory path to move.
+ dest (str): directory path where to move the content to.
+ overwrite (str): string specifying the overwrite level.
+ log_level (str, optional): log level to use for normal operation. Defaults to
+ `INFO`
+ error_level (str, optional): log level to use on error. Defaults to `ERROR`
+
+ Returns:
+ int: -1 on error
+ None: on success
+ """
+
+ self.info("copying tree: %s to %s" % (src, dest))
+ try:
+ if overwrite == "clobber" or not os.path.exists(dest):
+ self.rmtree(dest)
+ shutil.copytree(src, dest)
+ elif overwrite == "no_overwrite" or overwrite == "overwrite_if_exists":
+ files = os.listdir(src)
+ for f in files:
+ abs_src_f = os.path.join(src, f)
+ abs_dest_f = os.path.join(dest, f)
+ if not os.path.exists(abs_dest_f):
+ if os.path.isdir(abs_src_f):
+ self.mkdir_p(abs_dest_f)
+ self.copytree(abs_src_f, abs_dest_f, overwrite="clobber")
+ else:
+ shutil.copy2(abs_src_f, abs_dest_f)
+ elif overwrite == "no_overwrite": # destination path exists
+ if os.path.isdir(abs_src_f) and os.path.isdir(abs_dest_f):
+ self.copytree(
+ abs_src_f, abs_dest_f, overwrite="no_overwrite"
+ )
+ else:
+ self.debug(
+ "ignoring path: %s as destination: \
+ %s exists"
+ % (abs_src_f, abs_dest_f)
+ )
+ else: # overwrite == 'overwrite_if_exists' and destination exists
+ self.debug("overwriting: %s with: %s" % (abs_dest_f, abs_src_f))
+ self.rmtree(abs_dest_f)
+
+ if os.path.isdir(abs_src_f):
+ self.mkdir_p(abs_dest_f)
+ self.copytree(
+ abs_src_f, abs_dest_f, overwrite="overwrite_if_exists"
+ )
+ else:
+ shutil.copy2(abs_src_f, abs_dest_f)
+ else:
+ self.fatal(
+ "%s is not a valid argument for param overwrite" % (overwrite)
+ )
+ except (IOError, shutil.Error):
+ self.exception(
+ "There was an error while copying %s to %s!" % (src, dest),
+ level=error_level,
+ )
+ return -1
+
+ def write_to_file(
+ self,
+ file_path,
+ contents,
+ verbose=True,
+ open_mode="w",
+ create_parent_dir=False,
+ error_level=ERROR,
+ ):
+ """Write `contents` to `file_path`, according to `open_mode`.
+
+ Args:
+ file_path (str): filepath where the content will be written to.
+ contents (str): content to write to the filepath.
+ verbose (bool, optional): whether or not to log `contents` value.
+ Defaults to `True`
+ open_mode (str, optional): open mode to use for openning the file.
+ Defaults to `w`
+ create_parent_dir (bool, optional): whether or not to create the
+ parent directory of `file_path`
+ error_level (str, optional): log level to use on error. Defaults to `ERROR`
+
+ Returns:
+ str: `file_path` on success
+ None: on error.
+ """
+ self.info("Writing to file %s" % file_path)
+ if verbose:
+ self.info("Contents:")
+ for line in contents.splitlines():
+ self.info(" %s" % line)
+ if create_parent_dir:
+ parent_dir = os.path.dirname(file_path)
+ self.mkdir_p(parent_dir, error_level=error_level)
+ try:
+ fh = open(file_path, open_mode)
+ try:
+ fh.write(contents)
+ except UnicodeEncodeError:
+ fh.write(contents.encode("utf-8", "replace"))
+ fh.close()
+ return file_path
+ except IOError:
+ self.log("%s can't be opened for writing!" % file_path, level=error_level)
+
+ @contextmanager
+ def opened(self, file_path, verbose=True, open_mode="r", error_level=ERROR):
+ """Create a context manager to use on a with statement.
+
+ Args:
+ file_path (str): filepath of the file to open.
+ verbose (bool, optional): useless parameter, not used here.
+ Defaults to True.
+ open_mode (str, optional): open mode to use for openning the file.
+ Defaults to `r`
+ error_level (str, optional): log level name to use on error.
+ Defaults to `ERROR`
+
+ Yields:
+ tuple: (file object, error) pair. In case of error `None` is yielded
+ as file object, together with the corresponding error.
+ If there is no error, `None` is returned as the error.
+ """
+ # See opened_w_error in http://www.python.org/dev/peps/pep-0343/
+ self.info("Reading from file %s" % file_path)
+ try:
+ fh = open(file_path, open_mode)
+ except IOError as err:
+ self.log(
+ "unable to open %s: %s" % (file_path, err.strerror), level=error_level
+ )
+ yield None, err
+ else:
+ try:
+ yield fh, None
+ finally:
+ fh.close()
+
+ def read_from_file(self, file_path, verbose=True, open_mode="r", error_level=ERROR):
+ """Use `self.opened` context manager to open a file and read its
+ content.
+
+ Args:
+ file_path (str): filepath of the file to read.
+ verbose (bool, optional): whether or not to log the file content.
+ Defaults to True.
+ open_mode (str, optional): open mode to use for openning the file.
+ Defaults to `r`
+ error_level (str, optional): log level name to use on error.
+ Defaults to `ERROR`
+
+ Returns:
+ None: on error.
+ str: file content on success.
+ """
+ with self.opened(file_path, verbose, open_mode, error_level) as (fh, err):
+ if err:
+ return None
+ contents = fh.read()
+ if verbose:
+ self.info("Contents:")
+ for line in contents.splitlines():
+ self.info(" %s" % line)
+ return contents
+
+ def chdir(self, dir_name):
+ self.log("Changing directory to %s." % dir_name)
+ os.chdir(dir_name)
+
+ def is_exe(self, fpath):
+ """
+ Determine if fpath is a file and if it is executable.
+ """
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+ def which(self, program):
+ """OS independent implementation of Unix's which command
+
+ Args:
+ program (str): name or path to the program whose executable is
+ being searched.
+
+ Returns:
+ None: if the executable was not found.
+ str: filepath of the executable file.
+ """
+ if self._is_windows() and not program.endswith(".exe"):
+ program += ".exe"
+ fpath, fname = os.path.split(program)
+ if fpath:
+ if self.is_exe(program):
+ return program
+ else:
+ # If the exe file is defined in the configs let's use that
+ exe = self.query_exe(program)
+ if self.is_exe(exe):
+ return exe
+
+ # If not defined, let's look for it in the $PATH
+ env = self.query_env()
+ for path in env["PATH"].split(os.pathsep):
+ exe_file = os.path.join(path, program)
+ if self.is_exe(exe_file):
+ return exe_file
+ return None
+
+ # More complex commands {{{2
+ def retry(
+ self,
+ action,
+ attempts=None,
+ sleeptime=60,
+ max_sleeptime=5 * 60,
+ retry_exceptions=(Exception,),
+ good_statuses=None,
+ cleanup=None,
+ error_level=ERROR,
+ error_message="%(action)s failed after %(attempts)d tries!",
+ failure_status=-1,
+ log_level=INFO,
+ args=(),
+ kwargs={},
+ ):
+ """generic retry command. Ported from `util.retry`_
+
+ Args:
+ action (func): callable object to retry.
+ attempts (int, optinal): maximum number of times to call actions.
+ Defaults to `self.config.get('global_retries', 5)`
+ sleeptime (int, optional): number of seconds to wait between
+ attempts. Defaults to 60 and doubles each retry attempt, to
+ a maximum of `max_sleeptime'
+ max_sleeptime (int, optional): maximum value of sleeptime. Defaults
+ to 5 minutes
+ retry_exceptions (tuple, optional): Exceptions that should be caught.
+ If exceptions other than those listed in `retry_exceptions' are
+ raised from `action', they will be raised immediately. Defaults
+ to (Exception)
+ good_statuses (object, optional): return values which, if specified,
+ will result in retrying if the return value isn't listed.
+ Defaults to `None`.
+ cleanup (func, optional): If `cleanup' is provided and callable
+ it will be called immediately after an Exception is caught.
+ No arguments will be passed to it. If your cleanup function
+ requires arguments it is recommended that you wrap it in an
+ argumentless function.
+ Defaults to `None`.
+ error_level (str, optional): log level name in case of error.
+ Defaults to `ERROR`.
+ error_message (str, optional): string format to use in case
+ none of the attempts success. Defaults to
+ '%(action)s failed after %(attempts)d tries!'
+ failure_status (int, optional): flag to return in case the retries
+ were not successfull. Defaults to -1.
+ log_level (str, optional): log level name to use for normal activity.
+ Defaults to `INFO`.
+ args (tuple, optional): positional arguments to pass onto `action`.
+ kwargs (dict, optional): key-value arguments to pass onto `action`.
+
+ Returns:
+ object: return value of `action`.
+ int: failure status in case of failure retries.
+ """
+ if not callable(action):
+ self.fatal("retry() called with an uncallable method %s!" % action)
+ if cleanup and not callable(cleanup):
+ self.fatal("retry() called with an uncallable cleanup method %s!" % cleanup)
+ if not attempts:
+ attempts = self.config.get("global_retries", 5)
+ if max_sleeptime < sleeptime:
+ self.debug(
+ "max_sleeptime %d less than sleeptime %d" % (max_sleeptime, sleeptime)
+ )
+ n = 0
+ while n <= attempts:
+ retry = False
+ n += 1
+ try:
+ self.log(
+ "retry: Calling %s with args: %s, kwargs: %s, attempt #%d"
+ % (action.__name__, str(args), str(kwargs), n),
+ level=log_level,
+ )
+ status = action(*args, **kwargs)
+ if good_statuses and status not in good_statuses:
+ retry = True
+ except retry_exceptions as e:
+ retry = True
+ error_message = "%s\nCaught exception: %s" % (error_message, str(e))
+ self.log(
+ "retry: attempt #%d caught %s exception: %s"
+ % (n, type(e).__name__, str(e)),
+ level=INFO,
+ )
+
+ if not retry:
+ return status
+ else:
+ if cleanup:
+ cleanup()
+ if n == attempts:
+ self.log(
+ error_message % {"action": action, "attempts": n},
+ level=error_level,
+ )
+ return failure_status
+ if sleeptime > 0:
+ self.log(
+ "retry: Failed, sleeping %d seconds before retrying"
+ % sleeptime,
+ level=log_level,
+ )
+ time.sleep(sleeptime)
+ sleeptime = sleeptime * 2
+ if sleeptime > max_sleeptime:
+ sleeptime = max_sleeptime
+
+ def query_env(
+ self,
+ partial_env=None,
+ replace_dict=None,
+ purge_env=(),
+ set_self_env=None,
+ log_level=DEBUG,
+ avoid_host_env=False,
+ ):
+ """Environment query/generation method.
+ The default, self.query_env(), will look for self.config['env']
+ and replace any special strings in there ( %(PATH)s ).
+ It will then store it as self.env for speeding things up later.
+
+ If you specify partial_env, partial_env will be used instead of
+ self.config['env'], and we don't save self.env as it's a one-off.
+
+
+ Args:
+ partial_env (dict, optional): key-value pairs of the name and value
+ of different environment variables. Defaults to an empty dictionary.
+ replace_dict (dict, optional): key-value pairs to replace the old
+ environment variables.
+ purge_env (list): environment names to delete from the final
+ environment dictionary.
+ set_self_env (boolean, optional): whether or not the environment
+ variables dictionary should be copied to `self`.
+ Defaults to True.
+ log_level (str, optional): log level name to use on normal operation.
+ Defaults to `DEBUG`.
+ avoid_host_env (boolean, optional): if set to True, we will not use
+ any environment variables set on the host except PATH.
+ Defaults to False.
+
+ Returns:
+ dict: environment variables names with their values.
+ """
+ if partial_env is None:
+ if self.env is not None:
+ return self.env
+ partial_env = self.config.get("env", None)
+ if partial_env is None:
+ partial_env = {}
+ if set_self_env is None:
+ set_self_env = True
+
+ env = {"PATH": os.environ["PATH"]} if avoid_host_env else os.environ.copy()
+
+ default_replace_dict = self.query_abs_dirs()
+ default_replace_dict["PATH"] = os.environ["PATH"]
+ if not replace_dict:
+ replace_dict = default_replace_dict
+ else:
+ for key in default_replace_dict:
+ if key not in replace_dict:
+ replace_dict[key] = default_replace_dict[key]
+ for key in partial_env.keys():
+ env[key] = partial_env[key] % replace_dict
+ self.log("ENV: %s is now %s" % (key, env[key]), level=log_level)
+ for k in purge_env:
+ if k in env:
+ del env[k]
+ if os.name == "nt":
+ pref_encoding = locale.getpreferredencoding()
+ for k, v in six.iteritems(env):
+ # When run locally on Windows machines, some environment
+ # variables may be unicode.
+ env[k] = six.ensure_str(v, pref_encoding)
+ if set_self_env:
+ self.env = env
+ return env
+
+ def query_exe(
+ self,
+ exe_name,
+ exe_dict="exes",
+ default=None,
+ return_type=None,
+ error_level=FATAL,
+ ):
+ """One way to work around PATH rewrites.
+
+ By default, return exe_name, and we'll fall through to searching
+ os.environ["PATH"].
+ However, if self.config[exe_dict][exe_name] exists, return that.
+ This lets us override exe paths via config file.
+
+ If we need runtime setting, we can build in self.exes support later.
+
+ Args:
+ exe_name (str): name of the executable to search for.
+ exe_dict(str, optional): name of the dictionary of executables
+ present in `self.config`. Defaults to `exes`.
+ default (str, optional): default name of the executable to search
+ for. Defaults to `exe_name`.
+ return_type (str, optional): type to which the original return
+ value will be turn into. Only 'list', 'string' and `None` are
+ supported. Defaults to `None`.
+ error_level (str, optional): log level name to use on error.
+
+ Returns:
+ list: in case return_type is 'list'
+ str: in case return_type is 'string'
+ None: in case return_type is `None`
+ Any: if the found executable is not of type list, tuple nor str.
+ """
+ if default is None:
+ default = exe_name
+ exe = self.config.get(exe_dict, {}).get(exe_name, default)
+ repl_dict = {}
+ if hasattr(self.script_obj, "query_abs_dirs"):
+ # allow for 'make': '%(abs_work_dir)s/...' etc.
+ dirs = self.script_obj.query_abs_dirs()
+ repl_dict.update(dirs)
+ if isinstance(exe, dict):
+ found = False
+ # allow for searchable paths of the exe
+ for name, path in six.iteritems(exe):
+ if isinstance(path, list) or isinstance(path, tuple):
+ path = [x % repl_dict for x in path]
+ if all([os.path.exists(section) for section in path]):
+ found = True
+ elif isinstance(path, str):
+ path = path % repl_dict
+ if os.path.exists(path):
+ found = True
+ else:
+ self.log(
+ "a exes %s dict's value is not a string, list, or tuple. Got key "
+ "%s and value %s" % (exe_name, name, str(path)),
+ level=error_level,
+ )
+ if found:
+ exe = path
+ break
+ else:
+ self.log(
+ "query_exe was a searchable dict but an existing "
+ "path could not be determined. Tried searching in "
+ "paths: %s" % (str(exe)),
+ level=error_level,
+ )
+ return None
+ elif isinstance(exe, list) or isinstance(exe, tuple):
+ exe = [x % repl_dict for x in exe]
+ elif isinstance(exe, str):
+ exe = exe % repl_dict
+ else:
+ self.log(
+ "query_exe: %s is not a list, tuple, dict, or string: "
+ "%s!" % (exe_name, str(exe)),
+ level=error_level,
+ )
+ return exe
+ if return_type == "list":
+ if isinstance(exe, str):
+ exe = [exe]
+ elif return_type == "string":
+ if isinstance(exe, list):
+ exe = subprocess.list2cmdline(exe)
+ elif return_type is not None:
+ self.log(
+ "Unknown return_type type %s requested in query_exe!" % return_type,
+ level=error_level,
+ )
+ return exe
+
+ def run_command(
+ self,
+ command,
+ cwd=None,
+ error_list=None,
+ halt_on_failure=False,
+ success_codes=None,
+ env=None,
+ partial_env=None,
+ return_type="status",
+ throw_exception=False,
+ output_parser=None,
+ output_timeout=None,
+ fatal_exit_code=2,
+ error_level=ERROR,
+ **kwargs
+ ):
+ """Run a command, with logging and error parsing.
+ TODO: context_lines
+
+ error_list example:
+ [{'regex': re.compile('^Error: LOL J/K'), level=IGNORE},
+ {'regex': re.compile('^Error:'), level=ERROR, contextLines='5:5'},
+ {'substr': 'THE WORLD IS ENDING', level=FATAL, contextLines='20:'}
+ ]
+ (context_lines isn't written yet)
+
+ Args:
+ command (str | list | tuple): command or sequence of commands to
+ execute and log.
+ cwd (str, optional): directory path from where to execute the
+ command. Defaults to `None`.
+ error_list (list, optional): list of errors to pass to
+ `mozharness.base.log.OutputParser`. Defaults to `None`.
+ halt_on_failure (bool, optional): whether or not to redefine the
+ log level as `FATAL` on errors. Defaults to False.
+ success_codes (int, optional): numeric value to compare against
+ the command return value.
+ env (dict, optional): key-value of environment values to use to
+ run the command. Defaults to None.
+ partial_env (dict, optional): key-value of environment values to
+ replace from the current environment values. Defaults to None.
+ return_type (str, optional): if equal to 'num_errors' then the
+ amount of errors matched by `error_list` is returned. Defaults
+ to 'status'.
+ throw_exception (bool, optional): whether or not to raise an
+ exception if the return value of the command doesn't match
+ any of the `success_codes`. Defaults to False.
+ output_parser (OutputParser, optional): lets you provide an
+ instance of your own OutputParser subclass. Defaults to `OutputParser`.
+ output_timeout (int): amount of seconds to wait for output before
+ the process is killed.
+ fatal_exit_code (int, optional): call `self.fatal` if the return value
+ of the command is not in `success_codes`. Defaults to 2.
+ error_level (str, optional): log level name to use on error. Defaults
+ to `ERROR`.
+ **kwargs: Arbitrary keyword arguments.
+
+ Returns:
+ int: -1 on error.
+ Any: `command` return value is returned otherwise.
+ """
+ if success_codes is None:
+ success_codes = [0]
+ if cwd is not None:
+ if not os.path.isdir(cwd):
+ level = error_level
+ if halt_on_failure:
+ level = FATAL
+ self.log(
+ "Can't run command %s in non-existent directory '%s'!"
+ % (command, cwd),
+ level=level,
+ )
+ return -1
+ self.info("Running command: %s in %s" % (command, cwd))
+ else:
+ self.info("Running command: %s" % (command,))
+ if isinstance(command, list) or isinstance(command, tuple):
+ self.info("Copy/paste: %s" % subprocess.list2cmdline(command))
+ shell = True
+ if isinstance(command, list) or isinstance(command, tuple):
+ shell = False
+ if env is None:
+ if partial_env:
+ self.info("Using partial env: %s" % pprint.pformat(partial_env))
+ env = self.query_env(partial_env=partial_env)
+ else:
+ if hasattr(self, "previous_env") and env == self.previous_env:
+ self.info("Using env: (same as previous command)")
+ else:
+ self.info("Using env: %s" % pprint.pformat(env))
+ self.previous_env = env
+
+ if output_parser is None:
+ parser = OutputParser(
+ config=self.config, log_obj=self.log_obj, error_list=error_list
+ )
+ else:
+ parser = output_parser
+
+ try:
+ if output_timeout:
+
+ def processOutput(line):
+ parser.add_lines(line)
+
+ def onTimeout():
+ self.info(
+ "Automation Error: mozprocess timed out after "
+ "%s seconds running %s" % (str(output_timeout), str(command))
+ )
+
+ p = ProcessHandler(
+ command,
+ shell=shell,
+ env=env,
+ cwd=cwd,
+ storeOutput=False,
+ onTimeout=(onTimeout,),
+ processOutputLine=[processOutput],
+ )
+ self.info(
+ "Calling %s with output_timeout %d" % (command, output_timeout)
+ )
+ p.run(outputTimeout=output_timeout)
+ p.wait()
+ if p.timedOut:
+ self.log(
+ "timed out after %s seconds of no output" % output_timeout,
+ level=error_level,
+ )
+ returncode = int(p.proc.returncode)
+ else:
+ p = subprocess.Popen(
+ command,
+ shell=shell,
+ stdout=subprocess.PIPE,
+ cwd=cwd,
+ stderr=subprocess.STDOUT,
+ env=env,
+ bufsize=0,
+ )
+ loop = True
+ while loop:
+ if p.poll() is not None:
+ """Avoid losing the final lines of the log?"""
+ loop = False
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ parser.add_lines(line)
+ returncode = p.returncode
+ except KeyboardInterrupt:
+ level = error_level
+ if halt_on_failure:
+ level = FATAL
+ self.log(
+ "Process interrupted by the user, killing process with pid %s" % p.pid,
+ level=level,
+ )
+ p.kill()
+ return -1
+ except OSError as e:
+ level = error_level
+ if halt_on_failure:
+ level = FATAL
+ self.log(
+ "caught OS error %s: %s while running %s"
+ % (e.errno, e.strerror, command),
+ level=level,
+ )
+ return -1
+
+ return_level = INFO
+ if returncode not in success_codes:
+ return_level = error_level
+ if throw_exception:
+ raise subprocess.CalledProcessError(returncode, command)
+ self.log("Return code: %d" % returncode, level=return_level)
+
+ if halt_on_failure:
+ _fail = False
+ if returncode not in success_codes:
+ self.log(
+ "%s not in success codes: %s" % (returncode, success_codes),
+ level=error_level,
+ )
+ _fail = True
+ if parser.num_errors:
+ self.log("failures found while parsing output", level=error_level)
+ _fail = True
+ if _fail:
+ self.return_code = fatal_exit_code
+ self.fatal(
+ "Halting on failure while running %s" % (command,),
+ exit_code=fatal_exit_code,
+ )
+ if return_type == "num_errors":
+ return parser.num_errors
+ return returncode
+
+ def get_output_from_command(
+ self,
+ command,
+ cwd=None,
+ halt_on_failure=False,
+ env=None,
+ silent=False,
+ log_level=INFO,
+ tmpfile_base_path="tmpfile",
+ return_type="output",
+ save_tmpfiles=False,
+ throw_exception=False,
+ fatal_exit_code=2,
+ ignore_errors=False,
+ success_codes=None,
+ ):
+ """Similar to run_command, but where run_command is an
+ os.system(command) analog, get_output_from_command is a `command`
+ analog.
+
+ Less error checking by design, though if we figure out how to
+ do it without borking the output, great.
+
+ TODO: binary mode? silent is kinda like that.
+ TODO: since p.wait() can take a long time, optionally log something
+ every N seconds?
+ TODO: optionally only keep the first or last (N) line(s) of output?
+ TODO: optionally only return the tmp_stdout_filename?
+
+ ignore_errors=True is for the case where a command might produce standard
+ error output, but you don't particularly care; setting to True will
+ cause standard error to be logged at DEBUG rather than ERROR
+
+ Args:
+ command (str | list): command or list of commands to
+ execute and log.
+ cwd (str, optional): directory path from where to execute the
+ command. Defaults to `None`.
+ halt_on_failure (bool, optional): whether or not to redefine the
+ log level as `FATAL` on error. Defaults to False.
+ env (dict, optional): key-value of environment values to use to
+ run the command. Defaults to None.
+ silent (bool, optional): whether or not to output the stdout of
+ executing the command. Defaults to False.
+ log_level (str, optional): log level name to use on normal execution.
+ Defaults to `INFO`.
+ tmpfile_base_path (str, optional): base path of the file to which
+ the output will be writen to. Defaults to 'tmpfile'.
+ return_type (str, optional): if equal to 'output' then the complete
+ output of the executed command is returned, otherwise the written
+ filenames are returned. Defaults to 'output'.
+ save_tmpfiles (bool, optional): whether or not to save the temporary
+ files created from the command output. Defaults to False.
+ throw_exception (bool, optional): whether or not to raise an
+ exception if the return value of the command is not zero.
+ Defaults to False.
+ fatal_exit_code (int, optional): call self.fatal if the return value
+ of the command match this value.
+ ignore_errors (bool, optional): whether or not to change the log
+ level to `ERROR` for the output of stderr. Defaults to False.
+ success_codes (int, optional): numeric value to compare against
+ the command return value.
+
+ Returns:
+ None: if the cwd is not a directory.
+ None: on IOError.
+ tuple: stdout and stderr filenames.
+ str: stdout output.
+ """
+ if cwd:
+ if not os.path.isdir(cwd):
+ level = ERROR
+ if halt_on_failure:
+ level = FATAL
+ self.log(
+ "Can't run command %s in non-existent directory %s!"
+ % (command, cwd),
+ level=level,
+ )
+ return None
+ self.info("Getting output from command: %s in %s" % (command, cwd))
+ else:
+ self.info("Getting output from command: %s" % command)
+ if isinstance(command, list):
+ self.info("Copy/paste: %s" % subprocess.list2cmdline(command))
+ # This could potentially return something?
+ tmp_stdout = None
+ tmp_stderr = None
+ tmp_stdout_filename = "%s_stdout" % tmpfile_base_path
+ tmp_stderr_filename = "%s_stderr" % tmpfile_base_path
+ if success_codes is None:
+ success_codes = [0]
+
+ # TODO probably some more elegant solution than 2 similar passes
+ try:
+ tmp_stdout = open(tmp_stdout_filename, "w")
+ except IOError:
+ level = ERROR
+ if halt_on_failure:
+ level = FATAL
+ self.log(
+ "Can't open %s for writing!" % tmp_stdout_filename + self.exception(),
+ level=level,
+ )
+ return None
+ try:
+ tmp_stderr = open(tmp_stderr_filename, "w")
+ except IOError:
+ level = ERROR
+ if halt_on_failure:
+ level = FATAL
+ self.log(
+ "Can't open %s for writing!" % tmp_stderr_filename + self.exception(),
+ level=level,
+ )
+ return None
+ shell = True
+ if isinstance(command, list):
+ shell = False
+
+ p = subprocess.Popen(
+ command,
+ shell=shell,
+ stdout=tmp_stdout,
+ cwd=cwd,
+ stderr=tmp_stderr,
+ env=env,
+ bufsize=0,
+ )
+ # XXX: changed from self.debug to self.log due to this error:
+ # TypeError: debug() takes exactly 1 argument (2 given)
+ self.log(
+ "Temporary files: %s and %s" % (tmp_stdout_filename, tmp_stderr_filename),
+ level=DEBUG,
+ )
+ p.wait()
+ tmp_stdout.close()
+ tmp_stderr.close()
+ return_level = DEBUG
+ output = None
+ if return_type == "output" or not silent:
+ if os.path.exists(tmp_stdout_filename) and os.path.getsize(
+ tmp_stdout_filename
+ ):
+ output = self.read_from_file(tmp_stdout_filename, verbose=False)
+ if not silent:
+ self.log("Output received:", level=log_level)
+ output_lines = output.rstrip().splitlines()
+ for line in output_lines:
+ if not line or line.isspace():
+ continue
+ if isinstance(line, binary_type):
+ line = line.decode("utf-8")
+ self.log(" %s" % line, level=log_level)
+ output = "\n".join(output_lines)
+ if os.path.exists(tmp_stderr_filename) and os.path.getsize(tmp_stderr_filename):
+ if not ignore_errors:
+ return_level = ERROR
+ self.log("Errors received:", level=return_level)
+ errors = self.read_from_file(tmp_stderr_filename, verbose=False)
+ for line in errors.rstrip().splitlines():
+ if not line or line.isspace():
+ continue
+ if isinstance(line, binary_type):
+ line = line.decode("utf-8")
+ self.log(" %s" % line, level=return_level)
+ elif p.returncode not in success_codes and not ignore_errors:
+ return_level = ERROR
+ # Clean up.
+ if not save_tmpfiles:
+ self.rmtree(tmp_stderr_filename, log_level=DEBUG)
+ self.rmtree(tmp_stdout_filename, log_level=DEBUG)
+ if p.returncode and throw_exception:
+ raise subprocess.CalledProcessError(p.returncode, command)
+ self.log("Return code: %d" % p.returncode, level=return_level)
+ if halt_on_failure and return_level == ERROR:
+ self.return_code = fatal_exit_code
+ self.fatal(
+ "Halting on failure while running %s" % command,
+ exit_code=fatal_exit_code,
+ )
+ # Hm, options on how to return this? I bet often we'll want
+ # output_lines[0] with no newline.
+ if return_type != "output":
+ return (tmp_stdout_filename, tmp_stderr_filename)
+ else:
+ return output
+
+ def _touch_file(self, file_name, times=None, error_level=FATAL):
+ """touch a file.
+
+ Args:
+ file_name (str): name of the file to touch.
+ times (tuple, optional): 2-tuple as specified by `os.utime`_
+ Defaults to None.
+ error_level (str, optional): log level name in case of error.
+ Defaults to `FATAL`.
+
+ .. _`os.utime`:
+ https://docs.python.org/3.4/library/os.html?highlight=os.utime#os.utime
+ """
+ self.info("Touching: %s" % file_name)
+ try:
+ os.utime(file_name, times)
+ except OSError:
+ try:
+ open(file_name, "w").close()
+ except IOError as e:
+ msg = "I/O error(%s): %s" % (e.errno, e.strerror)
+ self.log(msg, error_level=error_level)
+ os.utime(file_name, times)
+
+ def unpack(
+ self,
+ filename,
+ extract_to,
+ extract_dirs=None,
+ error_level=ERROR,
+ fatal_exit_code=2,
+ verbose=False,
+ ):
+ """The method allows to extract a file regardless of its extension.
+
+ Args:
+ filename (str): filename of the compressed file.
+ extract_to (str): where to extract the compressed file.
+ extract_dirs (list, optional): directories inside the archive file to extract.
+ Defaults to `None`.
+ fatal_exit_code (int, optional): call `self.fatal` if the return value
+ of the command is not in `success_codes`. Defaults to 2.
+ verbose (bool, optional): whether or not extracted content should be displayed.
+ Defaults to False.
+
+ Raises:
+ IOError: on `filename` file not found.
+
+ """
+ if not os.path.isfile(filename):
+ raise IOError("Could not find file to extract: %s" % filename)
+
+ if zipfile.is_zipfile(filename):
+ try:
+ self.info(
+ "Using ZipFile to extract {} to {}".format(filename, extract_to)
+ )
+ with zipfile.ZipFile(filename) as bundle:
+ for entry in self._filter_entries(bundle.namelist(), extract_dirs):
+ if verbose:
+ self.info(" %s" % entry)
+ bundle.extract(entry, path=extract_to)
+
+ # ZipFile doesn't preserve permissions during extraction:
+ # http://bugs.python.org/issue15795
+ fname = os.path.realpath(os.path.join(extract_to, entry))
+ mode = bundle.getinfo(entry).external_attr >> 16 & 0x1FF
+ # Only set permissions if attributes are available. Otherwise all
+ # permissions will be removed eg. on Windows.
+ if mode:
+ os.chmod(fname, mode)
+ except zipfile.BadZipfile as e:
+ self.log(
+ "%s (%s)" % (str(e), filename),
+ level=error_level,
+ exit_code=fatal_exit_code,
+ )
+
+ # Bug 1211882 - is_tarfile cannot be trusted for dmg files
+ elif tarfile.is_tarfile(filename) and not filename.lower().endswith(".dmg"):
+ try:
+ self.info(
+ "Using TarFile to extract {} to {}".format(filename, extract_to)
+ )
+ with tarfile.open(filename) as bundle:
+ for entry in self._filter_entries(bundle.getnames(), extract_dirs):
+ if verbose:
+ self.info(" %s" % entry)
+ bundle.extract(entry, path=extract_to)
+ except tarfile.TarError as e:
+ self.log(
+ "%s (%s)" % (str(e), filename),
+ level=error_level,
+ exit_code=fatal_exit_code,
+ )
+ else:
+ self.log(
+ "No extraction method found for: %s" % filename,
+ level=error_level,
+ exit_code=fatal_exit_code,
+ )
+
+ def is_taskcluster(self):
+ """Returns boolean indicating if we're running in TaskCluster."""
+ # This may need expanding in the future to work on
+ return "TASKCLUSTER_WORKER_TYPE" in os.environ
+
+
+def PreScriptRun(func):
+ """Decorator for methods that will be called before script execution.
+
+ Each method on a BaseScript having this decorator will be called at the
+ beginning of BaseScript.run().
+
+ The return value is ignored. Exceptions will abort execution.
+ """
+ func._pre_run_listener = True
+ return func
+
+
+def PostScriptRun(func):
+ """Decorator for methods that will be called after script execution.
+
+ This is similar to PreScriptRun except it is called at the end of
+ execution. The method will always be fired, even if execution fails.
+ """
+ func._post_run_listener = True
+ return func
+
+
+def PreScriptAction(action=None):
+ """Decorator for methods that will be called at the beginning of each action.
+
+ Each method on a BaseScript having this decorator will be called during
+ BaseScript.run() before an individual action is executed. The method will
+ receive the action's name as an argument.
+
+ If no values are passed to the decorator, it will be applied to every
+ action. If a string is passed, the decorated function will only be called
+ for the action of that name.
+
+ The return value of the method is ignored. Exceptions will abort execution.
+ """
+
+ def _wrapped(func):
+ func._pre_action_listener = action
+ return func
+
+ def _wrapped_none(func):
+ func._pre_action_listener = None
+ return func
+
+ if type(action) == type(_wrapped):
+ return _wrapped_none(action)
+
+ return _wrapped
+
+
+def PostScriptAction(action=None):
+ """Decorator for methods that will be called at the end of each action.
+
+ This behaves similarly to PreScriptAction. It varies in that it is called
+ after execution of the action.
+
+ The decorated method will receive the action name as a positional argument.
+ It will then receive the following named arguments:
+
+ success - Bool indicating whether the action finished successfully.
+
+ The decorated method will always be called, even if the action threw an
+ exception.
+
+ The return value is ignored.
+ """
+
+ def _wrapped(func):
+ func._post_action_listener = action
+ return func
+
+ def _wrapped_none(func):
+ func._post_action_listener = None
+ return func
+
+ if type(action) == type(_wrapped):
+ return _wrapped_none(action)
+
+ return _wrapped
+
+
+# BaseScript {{{1
+class BaseScript(ScriptMixin, LogMixin, object):
+ def __init__(
+ self,
+ config_options=None,
+ ConfigClass=BaseConfig,
+ default_log_level="info",
+ **kwargs
+ ):
+ self._return_code = 0
+ super(BaseScript, self).__init__()
+
+ self.log_obj = None
+ self.abs_dirs = None
+ if config_options is None:
+ config_options = []
+ self.summary_list = []
+ self.failures = []
+ rw_config = ConfigClass(config_options=config_options, **kwargs)
+ self.config = rw_config.get_read_only_config()
+ self.actions = tuple(rw_config.actions)
+ self.all_actions = tuple(rw_config.all_actions)
+ self.env = None
+ self.new_log_obj(default_log_level=default_log_level)
+ self.script_obj = self
+
+ # Indicate we're a source checkout if VCS directory is present at the
+ # appropriate place. This code will break if this file is ever moved
+ # to another directory.
+ self.topsrcdir = None
+
+ srcreldir = "testing/mozharness/mozharness/base"
+ here = os.path.normpath(os.path.dirname(__file__))
+ if here.replace("\\", "/").endswith(srcreldir):
+ topsrcdir = os.path.normpath(os.path.join(here, "..", "..", "..", ".."))
+ hg_dir = os.path.join(topsrcdir, ".hg")
+ git_dir = os.path.join(topsrcdir, ".git")
+ if os.path.isdir(hg_dir) or os.path.isdir(git_dir):
+ self.topsrcdir = topsrcdir
+
+ # Set self.config to read-only.
+ #
+ # We can create intermediate config info programmatically from
+ # this in a repeatable way, with logs; this is how we straddle the
+ # ideal-but-not-user-friendly static config and the
+ # easy-to-write-hard-to-debug writable config.
+ #
+ # To allow for other, script-specific configurations
+ # (e.g., props json parsing), before locking,
+ # call self._pre_config_lock(). If needed, this method can
+ # alter self.config.
+ self._pre_config_lock(rw_config)
+ self._config_lock()
+
+ self.info("Run as %s" % rw_config.command_line)
+ if self.config.get("dump_config_hierarchy"):
+ # we only wish to dump and display what self.config is made up of,
+ # against the current script + args, without actually running any
+ # actions
+ self._dump_config_hierarchy(rw_config.all_cfg_files_and_dicts)
+ if self.config.get("dump_config"):
+ self.dump_config(exit_on_finish=True)
+
+ # Collect decorated methods. We simply iterate over the attributes of
+ # the current class instance and look for signatures deposited by
+ # the decorators.
+ self._listeners = dict(
+ pre_run=[],
+ pre_action=[],
+ post_action=[],
+ post_run=[],
+ )
+ for k in dir(self):
+ try:
+ item = self._getattr(k)
+ except Exception as e:
+ item = None
+ self.warning(
+ "BaseScript collecting decorated methods: "
+ "failure to get attribute {}: {}".format(k, str(e))
+ )
+ if not item:
+ continue
+
+ # We only decorate methods, so ignore other types.
+ if not inspect.ismethod(item):
+ continue
+
+ if hasattr(item, "_pre_run_listener"):
+ self._listeners["pre_run"].append(k)
+
+ if hasattr(item, "_pre_action_listener"):
+ self._listeners["pre_action"].append((k, item._pre_action_listener))
+
+ if hasattr(item, "_post_action_listener"):
+ self._listeners["post_action"].append((k, item._post_action_listener))
+
+ if hasattr(item, "_post_run_listener"):
+ self._listeners["post_run"].append(k)
+
+ def _getattr(self, name):
+ # `getattr(self, k)` will call the method `k` for any property
+ # access. If the property depends upon a module which has not
+ # been imported at the time the BaseScript initializer is
+ # executed, this property access will result in an
+ # Exception. Until Python 3's `inspect.getattr_static` is
+ # available, the simplest approach is to ignore the specific
+ # properties which are known to cause issues. Currently
+ # adb_path and device are ignored since they require the
+ # availablity of the mozdevice package which is not guaranteed
+ # when BaseScript is called.
+ property_list = set(["adb_path", "device"])
+ if six.PY2:
+ if name in property_list:
+ item = None
+ else:
+ item = getattr(self, name)
+ else:
+ item = inspect.getattr_static(self, name)
+ if type(item) == property:
+ item = None
+ else:
+ item = getattr(self, name)
+ return item
+
+ def _dump_config_hierarchy(self, cfg_files):
+ """interpret each config file used.
+
+ This will show which keys/values are being added or overwritten by
+ other config files depending on their hierarchy (when they were added).
+ """
+ # go through each config_file. We will start with the lowest and
+ # print its keys/values that are being used in self.config. If any
+ # keys/values are present in a config file with a higher precedence,
+ # ignore those.
+ dirs = self.query_abs_dirs()
+ cfg_files_dump_config = {} # we will dump this to file
+ # keep track of keys that did not come from a config file
+ keys_not_from_file = set(self.config.keys())
+ if not cfg_files:
+ cfg_files = []
+ self.info("Total config files: %d" % (len(cfg_files)))
+ if len(cfg_files):
+ self.info("cfg files used from lowest precedence to highest:")
+ for i, (target_file, target_dict) in enumerate(cfg_files):
+ unique_keys = set(target_dict.keys())
+ unique_dict = {}
+ # iterate through the target_dicts remaining 'higher' cfg_files
+ remaining_cfgs = cfg_files[slice(i + 1, len(cfg_files))]
+ # where higher == more precedent
+ for ii, (higher_file, higher_dict) in enumerate(remaining_cfgs):
+ # now only keep keys/values that are not overwritten by a
+ # higher config
+ unique_keys = unique_keys.difference(set(higher_dict.keys()))
+ # unique_dict we know now has only keys/values that are unique to
+ # this config file.
+ unique_dict = dict((key, target_dict.get(key)) for key in unique_keys)
+ cfg_files_dump_config[target_file] = unique_dict
+ self.action_message("Config File %d: %s" % (i + 1, target_file))
+ self.info(pprint.pformat(unique_dict))
+ # let's also find out which keys/values from self.config are not
+ # from each target config file dict
+ keys_not_from_file = keys_not_from_file.difference(set(target_dict.keys()))
+ not_from_file_dict = dict(
+ (key, self.config.get(key)) for key in keys_not_from_file
+ )
+ cfg_files_dump_config["not_from_cfg_file"] = not_from_file_dict
+ self.action_message(
+ "Not from any config file (default_config, " "cmd line options, etc)"
+ )
+ self.info(pprint.pformat(not_from_file_dict))
+
+ # finally, let's dump this output as JSON and exit early
+ self.dump_config(
+ os.path.join(dirs["abs_log_dir"], "localconfigfiles.json"),
+ cfg_files_dump_config,
+ console_output=False,
+ exit_on_finish=True,
+ )
+
+ def _pre_config_lock(self, rw_config):
+ """This empty method can allow for config checking and manipulation
+ before the config lock, when overridden in scripts.
+ """
+ pass
+
+ def _config_lock(self):
+ """After this point, the config is locked and should not be
+ manipulated (based on mozharness.base.config.ReadOnlyDict)
+ """
+ self.config.lock()
+
+ def _possibly_run_method(self, method_name, error_if_missing=False):
+ """This is here for run()."""
+ if hasattr(self, method_name) and callable(self._getattr(method_name)):
+ return getattr(self, method_name)()
+ elif error_if_missing:
+ self.error("No such method %s!" % method_name)
+
+ def run_action(self, action):
+ if action not in self.actions:
+ self.action_message("Skipping %s step." % action)
+ return
+
+ method_name = action.replace("-", "_")
+ self.action_message("Running %s step." % action)
+
+ # An exception during a pre action listener should abort execution.
+ for fn, target in self._listeners["pre_action"]:
+ if target is not None and target != action:
+ continue
+
+ try:
+ self.info("Running pre-action listener: %s" % fn)
+ method = getattr(self, fn)
+ method(action)
+ except Exception:
+ self.error(
+ "Exception during pre-action for %s: %s"
+ % (action, traceback.format_exc())
+ )
+
+ for fn, target in self._listeners["post_action"]:
+ if target is not None and target != action:
+ continue
+
+ try:
+ self.info("Running post-action listener: %s" % fn)
+ method = getattr(self, fn)
+ method(action, success=False)
+ except Exception:
+ self.error(
+ "An additional exception occurred during "
+ "post-action for %s: %s" % (action, traceback.format_exc())
+ )
+
+ self.fatal("Aborting due to exception in pre-action listener.")
+
+ # We always run post action listeners, even if the main routine failed.
+ success = False
+ try:
+ self.info("Running main action method: %s" % method_name)
+ self._possibly_run_method("preflight_%s" % method_name)
+ self._possibly_run_method(method_name, error_if_missing=True)
+ self._possibly_run_method("postflight_%s" % method_name)
+ success = True
+ finally:
+ post_success = True
+ for fn, target in self._listeners["post_action"]:
+ if target is not None and target != action:
+ continue
+
+ try:
+ self.info("Running post-action listener: %s" % fn)
+ method = getattr(self, fn)
+ method(action, success=success and self.return_code == 0)
+ except Exception:
+ post_success = False
+ self.error(
+ "Exception during post-action for %s: %s"
+ % (action, traceback.format_exc())
+ )
+
+ step_result = "success" if success else "failed"
+ self.action_message("Finished %s step (%s)" % (action, step_result))
+
+ if not post_success:
+ self.fatal("Aborting due to failure in post-action listener.")
+
+ def run(self):
+ """Default run method.
+ This is the "do everything" method, based on actions and all_actions.
+
+ First run self.dump_config() if it exists.
+ Second, go through the list of all_actions.
+ If they're in the list of self.actions, try to run
+ self.preflight_ACTION(), self.ACTION(), and self.postflight_ACTION().
+
+ Preflight is sanity checking before doing anything time consuming or
+ destructive.
+
+ Postflight is quick testing for success after an action.
+
+ """
+ for fn in self._listeners["pre_run"]:
+ try:
+ self.info("Running pre-run listener: %s" % fn)
+ method = getattr(self, fn)
+ method()
+ except Exception:
+ self.error(
+ "Exception during pre-run listener: %s" % traceback.format_exc()
+ )
+
+ for fn in self._listeners["post_run"]:
+ try:
+ method = getattr(self, fn)
+ method()
+ except Exception:
+ self.error(
+ "An additional exception occurred during a "
+ "post-run listener: %s" % traceback.format_exc()
+ )
+
+ self.fatal("Aborting due to failure in pre-run listener.")
+
+ self.dump_config()
+ try:
+ for action in self.all_actions:
+ self.run_action(action)
+ except Exception:
+ self.fatal("Uncaught exception: %s" % traceback.format_exc())
+ finally:
+ post_success = True
+ for fn in self._listeners["post_run"]:
+ try:
+ self.info("Running post-run listener: %s" % fn)
+ method = getattr(self, fn)
+ method()
+ except Exception:
+ post_success = False
+ self.error(
+ "Exception during post-run listener: %s"
+ % traceback.format_exc()
+ )
+
+ if not post_success:
+ self.fatal("Aborting due to failure in post-run listener.")
+
+ return self.return_code
+
+ def run_and_exit(self):
+ """Runs the script and exits the current interpreter."""
+ rc = self.run()
+ if rc != 0:
+ self.warning("returning nonzero exit status %d" % rc)
+ sys.exit(rc)
+
+ def clobber(self):
+ """
+ Delete the working directory
+ """
+ dirs = self.query_abs_dirs()
+ self.rmtree(dirs["abs_work_dir"], error_level=FATAL)
+
+ def query_abs_dirs(self):
+ """We want to be able to determine where all the important things
+ are. Absolute paths lend themselves well to this, though I wouldn't
+ be surprised if this causes some issues somewhere.
+
+ This should be overridden in any script that has additional dirs
+ to query.
+
+ The query_* methods tend to set self.VAR variables as their
+ runtime cache.
+ """
+ if self.abs_dirs:
+ return self.abs_dirs
+ c = self.config
+ dirs = {}
+ dirs["base_work_dir"] = c["base_work_dir"]
+ dirs["abs_work_dir"] = os.path.join(c["base_work_dir"], c["work_dir"])
+ dirs["abs_log_dir"] = os.path.join(c["base_work_dir"], c.get("log_dir", "logs"))
+ if "GECKO_PATH" in os.environ:
+ dirs["abs_src_dir"] = os.environ["GECKO_PATH"]
+ self.abs_dirs = dirs
+ return self.abs_dirs
+
+ def dump_config(
+ self, file_path=None, config=None, console_output=True, exit_on_finish=False
+ ):
+ """Dump self.config to localconfig.json"""
+ config = config or self.config
+ dirs = self.query_abs_dirs()
+ if not file_path:
+ file_path = os.path.join(dirs["abs_log_dir"], "localconfig.json")
+ self.info("Dumping config to %s." % file_path)
+ self.mkdir_p(os.path.dirname(file_path))
+ json_config = json.dumps(config, sort_keys=True, indent=4)
+ fh = codecs.open(file_path, encoding="utf-8", mode="w+")
+ fh.write(json_config)
+ fh.close()
+ if console_output:
+ self.info(pprint.pformat(config))
+ if exit_on_finish:
+ sys.exit()
+
+ # logging {{{2
+ def new_log_obj(self, default_log_level="info"):
+ c = self.config
+ log_dir = os.path.join(c["base_work_dir"], c.get("log_dir", "logs"))
+ log_config = {
+ "logger_name": "Simple",
+ "log_name": "log",
+ "log_dir": log_dir,
+ "log_level": default_log_level,
+ "log_format": "%(asctime)s %(levelname)8s - %(message)s",
+ "log_to_console": True,
+ "append_to_log": False,
+ }
+ log_type = self.config.get("log_type", "console")
+ for key in log_config.keys():
+ value = self.config.get(key, None)
+ if value is not None:
+ log_config[key] = value
+ if log_type == "multi":
+ self.log_obj = MultiFileLogger(**log_config)
+ elif log_type == "simple":
+ self.log_obj = SimpleFileLogger(**log_config)
+ else:
+ self.log_obj = ConsoleLogger(**log_config)
+
+ def action_message(self, message):
+ self.info(
+ "[mozharness: %sZ] %s"
+ % (datetime.datetime.utcnow().isoformat(" "), message)
+ )
+
+ def summary(self):
+ """Print out all the summary lines added via add_summary()
+ throughout the script.
+
+ I'd like to revisit how to do this in a prettier fashion.
+ """
+ self.action_message("%s summary:" % self.__class__.__name__)
+ if self.summary_list:
+ for item in self.summary_list:
+ try:
+ self.log(item["message"], level=item["level"])
+ except ValueError:
+ """log is closed; print as a default. Ran into this
+ when calling from __del__()"""
+ print("### Log is closed! (%s)" % item["message"])
+
+ def add_summary(self, message, level=INFO):
+ self.summary_list.append({"message": message, "level": level})
+ # TODO write to a summary-only log?
+ # Summaries need a lot more love.
+ self.log(message, level=level)
+
+ def summarize_success_count(
+ self, success_count, total_count, message="%d of %d successful.", level=None
+ ):
+ if level is None:
+ level = INFO
+ if success_count < total_count:
+ level = ERROR
+ self.add_summary(message % (success_count, total_count), level=level)
+
+ def get_hash_for_file(self, file_path, hash_type="sha512"):
+ bs = 65536
+ hasher = hashlib.new(hash_type)
+ with open(file_path, "rb") as fh:
+ buf = fh.read(bs)
+ while len(buf) > 0:
+ hasher.update(buf)
+ buf = fh.read(bs)
+ return hasher.hexdigest()
+
+ @property
+ def return_code(self):
+ return self._return_code
+
+ @return_code.setter
+ def return_code(self, code):
+ old_return_code, self._return_code = self._return_code, code
+ if old_return_code != code:
+ self.warning("setting return code to %d" % code)
diff --git a/testing/mozharness/mozharness/base/transfer.py b/testing/mozharness/mozharness/base/transfer.py
new file mode 100755
index 0000000000..610e93ecc9
--- /dev/null
+++ b/testing/mozharness/mozharness/base/transfer.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic ways to upload + download files.
+"""
+
+import pprint
+
+try:
+ from urllib2 import urlopen
+except ImportError:
+ from urllib.request import urlopen
+
+import json
+
+from mozharness.base.log import DEBUG
+
+
+# TransferMixin {{{1
+class TransferMixin(object):
+ """
+ Generic transfer methods.
+
+ Dependent on BaseScript.
+ """
+
+ def load_json_from_url(self, url, timeout=30, log_level=DEBUG):
+ self.log(
+ "Attempting to download %s; timeout=%i" % (url, timeout), level=log_level
+ )
+ try:
+ r = urlopen(url, timeout=timeout)
+ j = json.load(r)
+ self.log(pprint.pformat(j), level=log_level)
+ except BaseException:
+ self.exception(message="Unable to download %s!" % url)
+ raise
+ return j
diff --git a/testing/mozharness/mozharness/base/vcs/__init__.py b/testing/mozharness/mozharness/base/vcs/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/base/vcs/__init__.py
diff --git a/testing/mozharness/mozharness/base/vcs/gittool.py b/testing/mozharness/mozharness/base/vcs/gittool.py
new file mode 100644
index 0000000000..e9d0c0e2c9
--- /dev/null
+++ b/testing/mozharness/mozharness/base/vcs/gittool.py
@@ -0,0 +1,107 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import re
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+from mozharness.base.errors import GitErrorList, VCSException
+from mozharness.base.log import LogMixin, OutputParser
+from mozharness.base.script import ScriptMixin
+
+
+class GittoolParser(OutputParser):
+ """
+ A class that extends OutputParser such that it can find the "Got revision"
+ string from gittool.py output
+ """
+
+ got_revision_exp = re.compile(r"Got revision (\w+)")
+ got_revision = None
+
+ def parse_single_line(self, line):
+ m = self.got_revision_exp.match(line)
+ if m:
+ self.got_revision = m.group(1)
+ super(GittoolParser, self).parse_single_line(line)
+
+
+class GittoolVCS(ScriptMixin, LogMixin):
+ def __init__(self, log_obj=None, config=None, vcs_config=None, script_obj=None):
+ super(GittoolVCS, self).__init__()
+
+ self.log_obj = log_obj
+ self.script_obj = script_obj
+ if config:
+ self.config = config
+ else:
+ self.config = {}
+ # vcs_config = {
+ # repo: repository,
+ # branch: branch,
+ # revision: revision,
+ # ssh_username: ssh_username,
+ # ssh_key: ssh_key,
+ # }
+ self.vcs_config = vcs_config
+ self.gittool = self.query_exe("gittool.py", return_type="list")
+
+ def ensure_repo_and_revision(self):
+ """Makes sure that `dest` is has `revision` or `branch` checked out
+ from `repo`.
+
+ Do what it takes to make that happen, including possibly clobbering
+ dest.
+ """
+ c = self.vcs_config
+ for conf_item in ("dest", "repo"):
+ assert self.vcs_config[conf_item]
+ dest = os.path.abspath(c["dest"])
+ repo = c["repo"]
+ revision = c.get("revision")
+ branch = c.get("branch")
+ clean = c.get("clean")
+ share_base = c.get("vcs_share_base", os.environ.get("GIT_SHARE_BASE_DIR", None))
+ env = {"PATH": os.environ.get("PATH")}
+ env.update(c.get("env", {}))
+ if self._is_windows():
+ # git.exe is not in the PATH by default
+ env["PATH"] = "%s;C:/mozilla-build/Git/bin" % env["PATH"]
+ # SYSTEMROOT is needed for 'import random'
+ if "SYSTEMROOT" not in env:
+ env["SYSTEMROOT"] = os.environ.get("SYSTEMROOT")
+ if share_base is not None:
+ env["GIT_SHARE_BASE_DIR"] = share_base
+
+ cmd = self.gittool[:]
+ if branch:
+ cmd.extend(["-b", branch])
+ if revision:
+ cmd.extend(["-r", revision])
+ if clean:
+ cmd.append("--clean")
+
+ for base_mirror_url in self.config.get(
+ "gittool_base_mirror_urls", self.config.get("vcs_base_mirror_urls", [])
+ ):
+ bits = urlparse.urlparse(repo)
+ mirror_url = urlparse.urljoin(base_mirror_url, bits.path)
+ cmd.extend(["--mirror", mirror_url])
+
+ cmd.extend([repo, dest])
+ parser = GittoolParser(
+ config=self.config, log_obj=self.log_obj, error_list=GitErrorList
+ )
+ retval = self.run_command(
+ cmd, error_list=GitErrorList, env=env, output_parser=parser
+ )
+
+ if retval != 0:
+ raise VCSException("Unable to checkout")
+
+ return parser.got_revision
diff --git a/testing/mozharness/mozharness/base/vcs/mercurial.py b/testing/mozharness/mozharness/base/vcs/mercurial.py
new file mode 100755
index 0000000000..63b0d27c34
--- /dev/null
+++ b/testing/mozharness/mozharness/base/vcs/mercurial.py
@@ -0,0 +1,478 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Mercurial VCS support.
+"""
+
+import hashlib
+import os
+import re
+import subprocess
+import sys
+from collections import namedtuple
+
+try:
+ from urlparse import urlsplit
+except ImportError:
+ from urllib.parse import urlsplit
+
+import mozharness
+from mozharness.base.errors import HgErrorList, VCSException
+from mozharness.base.log import LogMixin, OutputParser
+from mozharness.base.script import ScriptMixin
+from mozharness.base.transfer import TransferMixin
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.dirname(sys.path[0]))))
+
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+
+HG_OPTIONS = ["--config", "ui.merge=internal:merge"]
+
+# MercurialVCS {{{1
+# TODO Make the remaining functions more mozharness-friendly.
+# TODO Add the various tag functionality that are currently in
+# build/tools/scripts to MercurialVCS -- generic tagging logic belongs here.
+REVISION, BRANCH = 0, 1
+
+
+class RepositoryUpdateRevisionParser(OutputParser):
+ """Parse `hg pull` output for "repository unrelated" errors."""
+
+ revision = None
+ RE_UPDATED = re.compile("^updated to ([a-f0-9]{40})$")
+
+ def parse_single_line(self, line):
+ m = self.RE_UPDATED.match(line)
+ if m:
+ self.revision = m.group(1)
+
+ return super(RepositoryUpdateRevisionParser, self).parse_single_line(line)
+
+
+def make_hg_url(hg_host, repo_path, protocol="http", revision=None, filename=None):
+ """Helper function.
+
+ Construct a valid hg url from a base hg url (hg.mozilla.org),
+ repo_path, revision and possible filename
+ """
+ base = "%s://%s" % (protocol, hg_host)
+ repo = "/".join(p.strip("/") for p in [base, repo_path])
+ if not filename:
+ if not revision:
+ return repo
+ else:
+ return "/".join([p.strip("/") for p in [repo, "rev", revision]])
+ else:
+ assert revision
+ return "/".join([p.strip("/") for p in [repo, "raw-file", revision, filename]])
+
+
+class MercurialVCS(ScriptMixin, LogMixin, TransferMixin):
+ # For the most part, scripts import mercurial, update
+ # tag-release.py imports
+ # apply_and_push, update, get_revision, out, BRANCH, REVISION,
+ # get_branches, cleanOutgoingRevs
+
+ def __init__(self, log_obj=None, config=None, vcs_config=None, script_obj=None):
+ super(MercurialVCS, self).__init__()
+ self.can_share = None
+ self.log_obj = log_obj
+ self.script_obj = script_obj
+ if config:
+ self.config = config
+ else:
+ self.config = {}
+ # vcs_config = {
+ # hg_host: hg_host,
+ # repo: repository,
+ # branch: branch,
+ # revision: revision,
+ # ssh_username: ssh_username,
+ # ssh_key: ssh_key,
+ # }
+ self.vcs_config = vcs_config or {}
+ self.hg = self.query_exe("hg", return_type="list") + HG_OPTIONS
+
+ def _make_absolute(self, repo):
+ if repo.startswith("file://"):
+ path = repo[len("file://") :]
+ repo = "file://%s" % os.path.abspath(path)
+ elif "://" not in repo:
+ repo = os.path.abspath(repo)
+ return repo
+
+ def get_repo_name(self, repo):
+ return repo.rstrip("/").split("/")[-1]
+
+ def get_repo_path(self, repo):
+ repo = self._make_absolute(repo)
+ if repo.startswith("/"):
+ return repo.lstrip("/")
+ else:
+ return urlsplit(repo).path.lstrip("/")
+
+ def get_revision_from_path(self, path):
+ """Returns which revision directory `path` currently has checked out."""
+ return self.get_output_from_command(
+ self.hg + ["parent", "--template", "{node}"], cwd=path
+ )
+
+ def get_branch_from_path(self, path):
+ branch = self.get_output_from_command(self.hg + ["branch"], cwd=path)
+ return str(branch).strip()
+
+ def get_branches_from_path(self, path):
+ branches = []
+ for line in self.get_output_from_command(
+ self.hg + ["branches", "-c"], cwd=path
+ ).splitlines():
+ branches.append(line.split()[0])
+ return branches
+
+ def hg_ver(self):
+ """Returns the current version of hg, as a tuple of
+ (major, minor, build)"""
+ ver_string = self.get_output_from_command(self.hg + ["-q", "version"])
+ match = re.search(r"\(version ([0-9.]+)\)", ver_string)
+ if match:
+ bits = match.group(1).split(".")
+ if len(bits) < 3:
+ bits += (0,)
+ ver = tuple(int(b) for b in bits)
+ else:
+ ver = (0, 0, 0)
+ self.debug("Running hg version %s" % str(ver))
+ return ver
+
+ def update(self, dest, branch=None, revision=None):
+ """Updates working copy `dest` to `branch` or `revision`.
+ If revision is set, branch will be ignored.
+ If neither is set then the working copy will be updated to the
+ latest revision on the current branch. Local changes will be
+ discarded.
+ """
+ # If we have a revision, switch to that
+ msg = "Updating %s" % dest
+ if branch:
+ msg += " to branch %s" % branch
+ if revision:
+ msg += " revision %s" % revision
+ self.info("%s." % msg)
+ if revision is not None:
+ cmd = self.hg + ["update", "-C", "-r", revision]
+ if self.run_command(cmd, cwd=dest, error_list=HgErrorList):
+ raise VCSException("Unable to update %s to %s!" % (dest, revision))
+ else:
+ # Check & switch branch
+ local_branch = self.get_branch_from_path(dest)
+
+ cmd = self.hg + ["update", "-C"]
+
+ # If this is different, checkout the other branch
+ if branch and branch != local_branch:
+ cmd.append(branch)
+
+ if self.run_command(cmd, cwd=dest, error_list=HgErrorList):
+ raise VCSException("Unable to update %s!" % dest)
+ return self.get_revision_from_path(dest)
+
+ def clone(self, repo, dest, branch=None, revision=None, update_dest=True):
+ """Clones hg repo and places it at `dest`, replacing whatever else
+ is there. The working copy will be empty.
+
+ If `revision` is set, only the specified revision and its ancestors
+ will be cloned. If revision is set, branch is ignored.
+
+ If `update_dest` is set, then `dest` will be updated to `revision`
+ if set, otherwise to `branch`, otherwise to the head of default.
+ """
+ msg = "Cloning %s to %s" % (repo, dest)
+ if branch:
+ msg += " on branch %s" % branch
+ if revision:
+ msg += " to revision %s" % revision
+ self.info("%s." % msg)
+ parent_dest = os.path.dirname(dest)
+ if parent_dest and not os.path.exists(parent_dest):
+ self.mkdir_p(parent_dest)
+ if os.path.exists(dest):
+ self.info("Removing %s before clone." % dest)
+ self.rmtree(dest)
+
+ cmd = self.hg + ["clone"]
+ if not update_dest:
+ cmd.append("-U")
+
+ if revision:
+ cmd.extend(["-r", revision])
+ elif branch:
+ # hg >= 1.6 supports -b branch for cloning
+ ver = self.hg_ver()
+ if ver >= (1, 6, 0):
+ cmd.extend(["-b", branch])
+
+ cmd.extend([repo, dest])
+ output_timeout = self.config.get(
+ "vcs_output_timeout", self.vcs_config.get("output_timeout")
+ )
+ if (
+ self.run_command(cmd, error_list=HgErrorList, output_timeout=output_timeout)
+ != 0
+ ):
+ raise VCSException("Unable to clone %s to %s!" % (repo, dest))
+
+ if update_dest:
+ return self.update(dest, branch, revision)
+
+ def common_args(self, revision=None, branch=None, ssh_username=None, ssh_key=None):
+ """Fill in common hg arguments, encapsulating logic checks that
+ depend on mercurial versions and provided arguments
+ """
+ args = []
+ if ssh_username or ssh_key:
+ opt = ["-e", "ssh"]
+ if ssh_username:
+ opt[1] += " -l %s" % ssh_username
+ if ssh_key:
+ opt[1] += " -i %s" % ssh_key
+ args.extend(opt)
+ if revision:
+ args.extend(["-r", revision])
+ elif branch:
+ if self.hg_ver() >= (1, 6, 0):
+ args.extend(["-b", branch])
+ return args
+
+ def pull(self, repo, dest, update_dest=True, **kwargs):
+ """Pulls changes from hg repo and places it in `dest`.
+
+ If `revision` is set, only the specified revision and its ancestors
+ will be pulled.
+
+ If `update_dest` is set, then `dest` will be updated to `revision`
+ if set, otherwise to `branch`, otherwise to the head of default.
+ """
+ msg = "Pulling %s to %s" % (repo, dest)
+ if update_dest:
+ msg += " and updating"
+ self.info("%s." % msg)
+ if not os.path.exists(dest):
+ # Error or clone?
+ # If error, should we have a halt_on_error=False above?
+ self.error("Can't hg pull in nonexistent directory %s." % dest)
+ return -1
+ # Convert repo to an absolute path if it's a local repository
+ repo = self._make_absolute(repo)
+ cmd = self.hg + ["pull"]
+ cmd.extend(self.common_args(**kwargs))
+ cmd.append(repo)
+ output_timeout = self.config.get(
+ "vcs_output_timeout", self.vcs_config.get("output_timeout")
+ )
+ if (
+ self.run_command(
+ cmd, cwd=dest, error_list=HgErrorList, output_timeout=output_timeout
+ )
+ != 0
+ ):
+ raise VCSException("Can't pull in %s!" % dest)
+
+ if update_dest:
+ branch = self.vcs_config.get("branch")
+ revision = self.vcs_config.get("revision")
+ return self.update(dest, branch=branch, revision=revision)
+
+ # Defines the places of attributes in the tuples returned by `out'
+
+ def out(self, src, remote, **kwargs):
+ """Check for outgoing changesets present in a repo"""
+ self.info("Checking for outgoing changesets from %s to %s." % (src, remote))
+ cmd = self.hg + ["-q", "out", "--template", "{node} {branches}\n"]
+ cmd.extend(self.common_args(**kwargs))
+ cmd.append(remote)
+ if os.path.exists(src):
+ try:
+ revs = []
+ for line in (
+ self.get_output_from_command(cmd, cwd=src, throw_exception=True)
+ .rstrip()
+ .split("\n")
+ ):
+ try:
+ rev, branch = line.split()
+ # Mercurial displays no branch at all if the revision
+ # is on "default"
+ except ValueError:
+ rev = line.rstrip()
+ branch = "default"
+ revs.append((rev, branch))
+ return revs
+ except subprocess.CalledProcessError as inst:
+ # In some situations, some versions of Mercurial return "1"
+ # if no changes are found, so we need to ignore this return
+ # code
+ if inst.returncode == 1:
+ return []
+ raise
+
+ def push(self, src, remote, push_new_branches=True, **kwargs):
+ # This doesn't appear to work with hg_ver < (1, 6, 0).
+ # Error out, or let you try?
+ self.info("Pushing new changes from %s to %s." % (src, remote))
+ cmd = self.hg + ["push"]
+ cmd.extend(self.common_args(**kwargs))
+ if push_new_branches and self.hg_ver() >= (1, 6, 0):
+ cmd.append("--new-branch")
+ cmd.append(remote)
+ status = self.run_command(
+ cmd,
+ cwd=src,
+ error_list=HgErrorList,
+ success_codes=(0, 1),
+ return_type="num_errors",
+ )
+ if status:
+ raise VCSException("Can't push %s to %s!" % (src, remote))
+ return status
+
+ @property
+ def robustcheckout_path(self):
+ """Path to the robustcheckout extension."""
+ ext = os.path.join(external_tools_path, "robustcheckout.py")
+ if os.path.exists(ext):
+ return ext
+
+ def ensure_repo_and_revision(self):
+ """Makes sure that `dest` is has `revision` or `branch` checked out
+ from `repo`.
+
+ Do what it takes to make that happen, including possibly clobbering
+ dest.
+ """
+ c = self.vcs_config
+ dest = c["dest"]
+ repo_url = c["repo"]
+ rev = c.get("revision")
+ branch = c.get("branch")
+ purge = c.get("clone_with_purge", False)
+ upstream = c.get("clone_upstream_url")
+
+ # The API here is kind of bad because we're relying on state in
+ # self.vcs_config instead of passing arguments. This confuses
+ # scripts that have multiple repos. This includes the clone_tools()
+ # step :(
+
+ if not rev and not branch:
+ self.warning('did not specify revision or branch; assuming "default"')
+ branch = "default"
+
+ share_base = c.get("vcs_share_base") or os.environ.get("HG_SHARE_BASE_DIR")
+ if share_base and c.get("use_vcs_unique_share"):
+ # Bug 1277041 - update migration scripts to support robustcheckout
+ # fake a share but don't really share
+ share_base = os.path.join(share_base, hashlib.md5(dest).hexdigest())
+
+ # We require shared storage is configured because it guarantees we
+ # only have 1 local copy of logical repo stores.
+ if not share_base:
+ raise VCSException(
+ "vcs share base not defined; " "refusing to operate sub-optimally"
+ )
+
+ if not self.robustcheckout_path:
+ raise VCSException("could not find the robustcheckout Mercurial extension")
+
+ # Log HG version and install info to aid debugging.
+ self.run_command(self.hg + ["--version"])
+ self.run_command(self.hg + ["debuginstall", "--config=ui.username=worker"])
+
+ args = self.hg + [
+ "--config",
+ "extensions.robustcheckout=%s" % self.robustcheckout_path,
+ "robustcheckout",
+ repo_url,
+ dest,
+ "--sharebase",
+ share_base,
+ ]
+ if purge:
+ args.append("--purge")
+ if upstream:
+ args.extend(["--upstream", upstream])
+
+ if rev:
+ args.extend(["--revision", rev])
+ if branch:
+ args.extend(["--branch", branch])
+
+ parser = RepositoryUpdateRevisionParser(
+ config=self.config, log_obj=self.log_obj
+ )
+ if self.run_command(args, output_parser=parser):
+ raise VCSException("repo checkout failed!")
+
+ if not parser.revision:
+ raise VCSException("could not identify revision updated to")
+
+ return parser.revision
+
+ def cleanOutgoingRevs(self, reponame, remote, username, sshKey):
+ # TODO retry
+ self.info("Wiping outgoing local changes from %s to %s." % (reponame, remote))
+ outgoingRevs = self.out(
+ src=reponame, remote=remote, ssh_username=username, ssh_key=sshKey
+ )
+ for r in reversed(outgoingRevs):
+ self.run_command(
+ self.hg + ["strip", "-n", r[REVISION]],
+ cwd=reponame,
+ error_list=HgErrorList,
+ )
+
+ def query_pushinfo(self, repository, revision):
+ """Query the pushdate and pushid of a repository/revision.
+ This is intended to be used on hg.mozilla.org/mozilla-central and
+ similar. It may or may not work for other hg repositories.
+ """
+ PushInfo = namedtuple("PushInfo", ["pushid", "pushdate"])
+
+ try:
+ url = "%s/json-pushes?changeset=%s" % (repository, revision)
+ self.info("Pushdate URL is: %s" % url)
+ contents = self.retry(self.load_json_from_url, args=(url,))
+
+ # The contents should be something like:
+ # {
+ # "28537": {
+ # "changesets": [
+ # "1d0a914ae676cc5ed203cdc05c16d8e0c22af7e5",
+ # ],
+ # "date": 1428072488,
+ # "user": "user@mozilla.com"
+ # }
+ # }
+ #
+ # So we grab the first element ("28537" in this case) and then pull
+ # out the 'date' field.
+ pushid = next(contents.keys())
+ self.info("Pushid is: %s" % pushid)
+ pushdate = contents[pushid]["date"]
+ self.info("Pushdate is: %s" % pushdate)
+ return PushInfo(pushid, pushdate)
+
+ except Exception:
+ self.exception("Failed to get push info from hg.mozilla.org")
+ raise
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ pass
diff --git a/testing/mozharness/mozharness/base/vcs/vcsbase.py b/testing/mozharness/mozharness/base/vcs/vcsbase.py
new file mode 100755
index 0000000000..c587a8b1ca
--- /dev/null
+++ b/testing/mozharness/mozharness/base/vcs/vcsbase.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Generic VCS support.
+"""
+
+import os
+import sys
+from copy import deepcopy
+
+from mozharness.base.errors import VCSException
+from mozharness.base.log import FATAL
+from mozharness.base.script import BaseScript
+from mozharness.base.vcs.gittool import GittoolVCS
+from mozharness.base.vcs.mercurial import MercurialVCS
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.dirname(sys.path[0]))))
+
+
+# Update this with supported VCS name : VCS object
+VCS_DICT = {
+ "hg": MercurialVCS,
+ "gittool": GittoolVCS,
+}
+
+
+# VCSMixin {{{1
+class VCSMixin(object):
+ """Basic VCS methods that are vcs-agnostic.
+ The vcs_class handles all the vcs-specific tasks.
+ """
+
+ def query_dest(self, kwargs):
+ if "dest" in kwargs:
+ return kwargs["dest"]
+ dest = os.path.basename(kwargs["repo"])
+ # Git fun
+ if dest.endswith(".git"):
+ dest = dest.replace(".git", "")
+ return dest
+
+ def _get_revision(self, vcs_obj, dest):
+ try:
+ got_revision = vcs_obj.ensure_repo_and_revision()
+ if got_revision:
+ return got_revision
+ except VCSException:
+ self.rmtree(dest)
+ raise
+
+ def _get_vcs_class(self, vcs):
+ vcs = vcs or self.config.get("default_vcs", getattr(self, "default_vcs", None))
+ vcs_class = VCS_DICT.get(vcs)
+ return vcs_class
+
+ def vcs_checkout(self, vcs=None, error_level=FATAL, **kwargs):
+ """Check out a single repo."""
+ c = self.config
+ vcs_class = self._get_vcs_class(vcs)
+ if not vcs_class:
+ self.error("Running vcs_checkout with kwargs %s" % str(kwargs))
+ raise VCSException("No VCS set!")
+ # need a better way to do this.
+ if "dest" not in kwargs:
+ kwargs["dest"] = self.query_dest(kwargs)
+ if "vcs_share_base" not in kwargs:
+ kwargs["vcs_share_base"] = c.get(
+ "%s_share_base" % vcs, c.get("vcs_share_base")
+ )
+ vcs_obj = vcs_class(
+ log_obj=self.log_obj,
+ config=self.config,
+ vcs_config=kwargs,
+ script_obj=self,
+ )
+ return self.retry(
+ self._get_revision,
+ error_level=error_level,
+ error_message="Automation Error: Can't checkout %s!" % kwargs["repo"],
+ args=(vcs_obj, kwargs["dest"]),
+ )
+
+ def vcs_checkout_repos(
+ self, repo_list, parent_dir=None, tag_override=None, **kwargs
+ ):
+ """Check out a list of repos."""
+ orig_dir = os.getcwd()
+ c = self.config
+ if not parent_dir:
+ parent_dir = os.path.join(c["base_work_dir"], c["work_dir"])
+ self.mkdir_p(parent_dir)
+ self.chdir(parent_dir)
+ revision_dict = {}
+ kwargs_orig = deepcopy(kwargs)
+ for repo_dict in repo_list:
+ kwargs = deepcopy(kwargs_orig)
+ kwargs.update(repo_dict)
+ if tag_override:
+ kwargs["branch"] = tag_override
+ dest = self.query_dest(kwargs)
+ revision_dict[dest] = {"repo": kwargs["repo"]}
+ revision_dict[dest]["revision"] = self.vcs_checkout(**kwargs)
+ self.chdir(orig_dir)
+ return revision_dict
+
+ def vcs_query_pushinfo(self, repository, revision, vcs=None):
+ """Query the pushid/pushdate of a repository/revision
+ Returns a namedtuple with "pushid" and "pushdate" elements
+ """
+ vcs_class = self._get_vcs_class(vcs)
+ if not vcs_class:
+ raise VCSException("No VCS set in vcs_query_pushinfo!")
+ vcs_obj = vcs_class(
+ log_obj=self.log_obj,
+ config=self.config,
+ script_obj=self,
+ )
+ return vcs_obj.query_pushinfo(repository, revision)
+
+
+class VCSScript(VCSMixin, BaseScript):
+ def __init__(self, **kwargs):
+ super(VCSScript, self).__init__(**kwargs)
+
+ def pull(self, repos=None, parent_dir=None):
+ repos = repos or self.config.get("repos")
+ if not repos:
+ self.info("Pull has nothing to do!")
+ return
+ dirs = self.query_abs_dirs()
+ parent_dir = parent_dir or dirs["abs_work_dir"]
+ return self.vcs_checkout_repos(repos, parent_dir=parent_dir)
+
+
+# Specific VCS stubs {{{1
+# For ease of use.
+# This is here instead of mercurial.py because importing MercurialVCS into
+# vcsbase from mercurial, and importing VCSScript into mercurial from
+# vcsbase, was giving me issues.
+class MercurialScript(VCSScript):
+ default_vcs = "hg"
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ pass
diff --git a/testing/mozharness/mozharness/lib/__init__.py b/testing/mozharness/mozharness/lib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/lib/__init__.py
diff --git a/testing/mozharness/mozharness/lib/python/__init__.py b/testing/mozharness/mozharness/lib/python/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/lib/python/__init__.py
diff --git a/testing/mozharness/mozharness/lib/python/authentication.py b/testing/mozharness/mozharness/lib/python/authentication.py
new file mode 100644
index 0000000000..5d7330e357
--- /dev/null
+++ b/testing/mozharness/mozharness/lib/python/authentication.py
@@ -0,0 +1,60 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+"""module for http authentication operations"""
+import getpass
+import os
+
+CREDENTIALS_PATH = os.path.expanduser("~/.mozilla/credentials.cfg")
+DIRNAME = os.path.dirname(CREDENTIALS_PATH)
+LDAP_PASSWORD = None
+
+
+def get_credentials():
+ """Returns http credentials.
+
+ The user's email address is stored on disk (for convenience in the future)
+ while the password is requested from the user on first invocation.
+ """
+ global LDAP_PASSWORD
+ if not os.path.exists(DIRNAME):
+ os.makedirs(DIRNAME)
+
+ if os.path.isfile(CREDENTIALS_PATH):
+ with open(CREDENTIALS_PATH, "r") as file_handler:
+ content = file_handler.read().splitlines()
+
+ https_username = content[0].strip()
+
+ if len(content) > 1:
+ # We want to remove files which contain the password
+ os.remove(CREDENTIALS_PATH)
+ else:
+ try:
+ # pylint: disable=W1609
+ input_method = raw_input
+ except NameError:
+ input_method = input
+
+ https_username = input_method("Please enter your full LDAP email address: ")
+
+ with open(CREDENTIALS_PATH, "w+") as file_handler:
+ file_handler.write("%s\n" % https_username)
+
+ os.chmod(CREDENTIALS_PATH, 0o600)
+
+ if not LDAP_PASSWORD:
+ print("Please enter your LDAP password (we won't store it):")
+ LDAP_PASSWORD = getpass.getpass()
+
+ return https_username, LDAP_PASSWORD
+
+
+def get_credentials_path():
+ if os.path.isfile(CREDENTIALS_PATH):
+ get_credentials()
+
+ return CREDENTIALS_PATH
diff --git a/testing/mozharness/mozharness/mozilla/__init__.py b/testing/mozharness/mozharness/mozilla/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/automation.py b/testing/mozharness/mozharness/mozilla/automation.py
new file mode 100644
index 0000000000..6beabc55b2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/automation.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Code to integration with automation.
+"""
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+from mozharness.base.log import ERROR, INFO, WARNING
+
+TBPL_SUCCESS = "SUCCESS"
+TBPL_WARNING = "WARNING"
+TBPL_FAILURE = "FAILURE"
+TBPL_EXCEPTION = "EXCEPTION"
+TBPL_RETRY = "RETRY"
+TBPL_STATUS_DICT = {
+ TBPL_SUCCESS: INFO,
+ TBPL_WARNING: WARNING,
+ TBPL_FAILURE: ERROR,
+ TBPL_EXCEPTION: ERROR,
+ TBPL_RETRY: WARNING,
+}
+EXIT_STATUS_DICT = {
+ TBPL_SUCCESS: 0,
+ TBPL_WARNING: 1,
+ TBPL_FAILURE: 2,
+ TBPL_EXCEPTION: 3,
+ TBPL_RETRY: 4,
+}
+TBPL_WORST_LEVEL_TUPLE = (
+ TBPL_RETRY,
+ TBPL_EXCEPTION,
+ TBPL_FAILURE,
+ TBPL_WARNING,
+ TBPL_SUCCESS,
+)
+
+
+class AutomationMixin(object):
+ worst_status = TBPL_SUCCESS
+ properties = {}
+
+ def tryserver_email(self):
+ pass
+
+ def record_status(self, tbpl_status, level=None, set_return_code=True):
+ if tbpl_status not in TBPL_STATUS_DICT:
+ self.error("record_status() doesn't grok the status %s!" % tbpl_status)
+ else:
+ if not level:
+ level = TBPL_STATUS_DICT[tbpl_status]
+ self.worst_status = self.worst_level(
+ tbpl_status, self.worst_status, TBPL_WORST_LEVEL_TUPLE
+ )
+ if self.worst_status != tbpl_status:
+ self.info(
+ "Current worst status %s is worse; keeping it." % self.worst_status
+ )
+ self.add_summary("# TBPL %s #" % self.worst_status, level=level)
+ if set_return_code:
+ self.return_code = EXIT_STATUS_DICT[self.worst_status]
+
+ def add_failure(self, key, message="%(key)s failed.", level=ERROR):
+ if key not in self.failures:
+ self.failures.append(key)
+ self.add_summary(message % {"key": key}, level=level)
+ self.record_status(TBPL_FAILURE)
+
+ def query_failure(self, key):
+ return key in self.failures
+
+ def query_is_nightly(self):
+ """returns whether or not the script should run as a nightly build."""
+ return bool(self.config.get("nightly_build"))
diff --git a/testing/mozharness/mozharness/mozilla/bouncer/__init__.py b/testing/mozharness/mozharness/mozilla/bouncer/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/bouncer/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/bouncer/submitter.py b/testing/mozharness/mozharness/mozilla/bouncer/submitter.py
new file mode 100644
index 0000000000..e9289d2be6
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/bouncer/submitter.py
@@ -0,0 +1,134 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import base64
+import socket
+import sys
+import traceback
+from xml.dom.minidom import parseString
+
+from mozharness.base.log import FATAL
+
+try:
+ import httplib
+except ImportError:
+ import http.client as httplib
+try:
+ from urllib import quote, urlencode
+except ImportError:
+ from urllib.parse import quote, urlencode
+try:
+ from urllib2 import HTTPError, Request, URLError, urlopen
+except ImportError:
+ from urllib.request import HTTPError, Request, URLError, urlopen
+
+
+class BouncerSubmitterMixin(object):
+ def query_credentials(self):
+ if self.credentials:
+ return self.credentials
+ global_dict = {}
+ local_dict = {}
+ exec(
+ compile(
+ open(self.config["credentials_file"], "rb").read(),
+ self.config["credentials_file"],
+ "exec",
+ ),
+ global_dict,
+ local_dict,
+ )
+ self.credentials = (local_dict["tuxedoUsername"], local_dict["tuxedoPassword"])
+ return self.credentials
+
+ def api_call(self, route, data, error_level=FATAL, retry_config=None):
+ retry_args = dict(
+ failure_status=None,
+ retry_exceptions=(
+ HTTPError,
+ URLError,
+ httplib.BadStatusLine,
+ socket.timeout,
+ socket.error,
+ ),
+ error_message="call to %s failed" % (route),
+ error_level=error_level,
+ )
+
+ if retry_config:
+ retry_args.update(retry_config)
+
+ return self.retry(self._api_call, args=(route, data), **retry_args)
+
+ def _api_call(self, route, data):
+ api_prefix = self.config["bouncer-api-prefix"]
+ api_url = "%s/%s" % (api_prefix, route)
+ request = Request(api_url)
+ if data:
+ post_data = urlencode(data, doseq=True)
+ request.add_data(post_data)
+ self.info("POST data: %s" % post_data)
+ credentials = self.query_credentials()
+ if credentials:
+ auth = base64.encodestring("%s:%s" % credentials)
+ request.add_header("Authorization", "Basic %s" % auth.strip())
+ try:
+ self.info("Submitting to %s" % api_url)
+ res = urlopen(request, timeout=60).read()
+ self.info("Server response")
+ self.info(res)
+ return res
+ except HTTPError as e:
+ self.warning("Cannot access %s" % api_url)
+ traceback.print_exc(file=sys.stdout)
+ self.warning("Returned page source:")
+ self.warning(e.read())
+ raise
+ except URLError:
+ traceback.print_exc(file=sys.stdout)
+ self.warning("Cannot access %s" % api_url)
+ raise
+ except socket.timeout as e:
+ self.warning("Timed out accessing %s: %s" % (api_url, e))
+ raise
+ except socket.error as e:
+ self.warning("Socket error when accessing %s: %s" % (api_url, e))
+ raise
+ except httplib.BadStatusLine as e:
+ self.warning("BadStatusLine accessing %s: %s" % (api_url, e))
+ raise
+
+ def product_exists(self, product_name):
+ self.info("Checking if %s already exists" % product_name)
+ res = self.api_call("product_show?product=%s" % quote(product_name), data=None)
+ try:
+ xml = parseString(res)
+ # API returns <products/> if the product doesn't exist
+ products_found = len(xml.getElementsByTagName("product"))
+ self.info("Products found: %s" % products_found)
+ return bool(products_found)
+ except Exception as e:
+ self.warning("Error parsing XML: %s" % e)
+ self.warning("Assuming %s does not exist" % product_name)
+ # ignore XML parsing errors
+ return False
+
+ def api_add_product(self, product_name, add_locales, ssl_only=False):
+ data = {
+ "product": product_name,
+ }
+ if self.locales and add_locales:
+ data["languages"] = self.locales
+ if ssl_only:
+ # Send "true" as a string
+ data["ssl_only"] = "true"
+ self.api_call("product_add/", data)
+
+ def api_add_location(self, product_name, bouncer_platform, path):
+ data = {
+ "product": product_name,
+ "os": bouncer_platform,
+ "path": path,
+ }
+ self.api_call("location_add/", data)
diff --git a/testing/mozharness/mozharness/mozilla/building/__init__.py b/testing/mozharness/mozharness/mozilla/building/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/building/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/building/buildbase.py b/testing/mozharness/mozharness/mozilla/building/buildbase.py
new file mode 100755
index 0000000000..9bdc2b5b16
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/building/buildbase.py
@@ -0,0 +1,1522 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" buildbase.py.
+
+provides a base class for fx desktop builds
+author: Jordan Lund
+
+"""
+import copy
+import json
+import os
+import re
+import sys
+import time
+import uuid
+from datetime import datetime
+
+import six
+import yaml
+from mozharness.base.config import DEFAULT_CONFIG_PATH, BaseConfig, parse_config_file
+from mozharness.base.errors import MakefileErrorList
+from mozharness.base.log import ERROR, FATAL, OutputParser
+from mozharness.base.python import PerfherderResourceOptionsMixin, VirtualenvMixin
+from mozharness.base.script import PostScriptRun
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import (
+ EXIT_STATUS_DICT,
+ TBPL_FAILURE,
+ TBPL_RETRY,
+ TBPL_STATUS_DICT,
+ TBPL_SUCCESS,
+ TBPL_WORST_LEVEL_TUPLE,
+ AutomationMixin,
+)
+from mozharness.mozilla.secrets import SecretsMixin
+from yaml import YAMLError
+
+AUTOMATION_EXIT_CODES = sorted(EXIT_STATUS_DICT.values())
+
+MISSING_CFG_KEY_MSG = "The key '%s' could not be determined \
+Please add this to your config."
+
+ERROR_MSGS = {
+ "comments_undetermined": '"comments" could not be determined. This may be \
+because it was a forced build.',
+ "tooltool_manifest_undetermined": '"tooltool_manifest_src" not set, \
+Skipping run_tooltool...',
+}
+
+
+# Output Parsers
+
+TBPL_UPLOAD_ERRORS = [
+ {
+ "regex": re.compile("Connection timed out"),
+ "level": TBPL_RETRY,
+ },
+ {
+ "regex": re.compile("Connection reset by peer"),
+ "level": TBPL_RETRY,
+ },
+ {
+ "regex": re.compile("Connection refused"),
+ "level": TBPL_RETRY,
+ },
+]
+
+
+class MakeUploadOutputParser(OutputParser):
+ tbpl_error_list = TBPL_UPLOAD_ERRORS
+
+ def __init__(self, **kwargs):
+ super(MakeUploadOutputParser, self).__init__(**kwargs)
+ self.tbpl_status = TBPL_SUCCESS
+
+ def parse_single_line(self, line):
+ # let's check for retry errors which will give log levels:
+ # tbpl status as RETRY and mozharness status as WARNING
+ for error_check in self.tbpl_error_list:
+ if error_check["regex"].search(line):
+ self.num_warnings += 1
+ self.warning(line)
+ self.tbpl_status = self.worst_level(
+ error_check["level"],
+ self.tbpl_status,
+ levels=TBPL_WORST_LEVEL_TUPLE,
+ )
+ break
+ else:
+ self.info(line)
+
+
+class MozconfigPathError(Exception):
+ """
+ There was an error getting a mozconfig path from a mozharness config.
+ """
+
+
+def get_mozconfig_path(script, config, dirs):
+ """
+ Get the path to the mozconfig file to use from a mozharness config.
+
+ :param script: The object to interact with the filesystem through.
+ :type script: ScriptMixin:
+
+ :param config: The mozharness config to inspect.
+ :type config: dict
+
+ :param dirs: The directories specified for this build.
+ :type dirs: dict
+ """
+ COMPOSITE_KEYS = {"mozconfig_variant", "app_name", "mozconfig_platform"}
+ have_composite_mozconfig = COMPOSITE_KEYS <= set(config.keys())
+ have_partial_composite_mozconfig = len(COMPOSITE_KEYS & set(config.keys())) > 0
+ have_src_mozconfig = "src_mozconfig" in config
+ have_src_mozconfig_manifest = "src_mozconfig_manifest" in config
+
+ # first determine the mozconfig path
+ if have_partial_composite_mozconfig and not have_composite_mozconfig:
+ raise MozconfigPathError(
+ "All or none of 'app_name', 'mozconfig_platform' and `mozconfig_variant' must be "
+ "in the config in order to determine the mozconfig."
+ )
+ elif have_composite_mozconfig and have_src_mozconfig:
+ raise MozconfigPathError(
+ "'src_mozconfig' or 'mozconfig_variant' must be "
+ "in the config but not both in order to determine the mozconfig."
+ )
+ elif have_composite_mozconfig and have_src_mozconfig_manifest:
+ raise MozconfigPathError(
+ "'src_mozconfig_manifest' or 'mozconfig_variant' must be "
+ "in the config but not both in order to determine the mozconfig."
+ )
+ elif have_src_mozconfig and have_src_mozconfig_manifest:
+ raise MozconfigPathError(
+ "'src_mozconfig' or 'src_mozconfig_manifest' must be "
+ "in the config but not both in order to determine the mozconfig."
+ )
+ elif have_composite_mozconfig:
+ src_mozconfig = "%(app_name)s/config/mozconfigs/%(platform)s/%(variant)s" % {
+ "app_name": config["app_name"],
+ "platform": config["mozconfig_platform"],
+ "variant": config["mozconfig_variant"],
+ }
+ abs_mozconfig_path = os.path.join(dirs["abs_src_dir"], src_mozconfig)
+ elif have_src_mozconfig:
+ abs_mozconfig_path = os.path.join(
+ dirs["abs_src_dir"], config.get("src_mozconfig")
+ )
+ elif have_src_mozconfig_manifest:
+ manifest = os.path.join(dirs["abs_work_dir"], config["src_mozconfig_manifest"])
+ if not os.path.exists(manifest):
+ raise MozconfigPathError(
+ 'src_mozconfig_manifest: "%s" not found. Does it exist?' % (manifest,)
+ )
+ else:
+ with script.opened(manifest, error_level=ERROR) as (fh, err):
+ if err:
+ raise MozconfigPathError(
+ "%s exists but coud not read properties" % manifest
+ )
+ abs_mozconfig_path = os.path.join(
+ dirs["abs_src_dir"], json.load(fh)["gecko_path"]
+ )
+ else:
+ raise MozconfigPathError(
+ "Must provide 'app_name', 'mozconfig_platform' and 'mozconfig_variant'; "
+ "or one of 'src_mozconfig' or 'src_mozconfig_manifest' in the config "
+ "in order to determine the mozconfig."
+ )
+
+ return abs_mozconfig_path
+
+
+class BuildingConfig(BaseConfig):
+ # TODO add nosetests for this class
+ def get_cfgs_from_files(self, all_config_files, options):
+ """
+ Determine the configuration from the normal options and from
+ `--branch`, `--build-pool`, and `--custom-build-variant-cfg`. If the
+ files for any of the latter options are also given with `--config-file`
+ or `--opt-config-file`, they are only parsed once.
+
+ The build pool has highest precedence, followed by branch, build
+ variant, and any normally-specified configuration files.
+ """
+ # override from BaseConfig
+
+ # this is what we will return. It will represent each config
+ # file name and its associated dict
+ # eg ('builds/branch_specifics.py', {'foo': 'bar'})
+ all_config_dicts = []
+ # important config files
+ variant_cfg_file = pool_cfg_file = ""
+
+ # we want to make the order in which the options were given
+ # not matter. ie: you can supply --branch before --build-pool
+ # or vice versa and the hierarchy will not be different
+
+ # ### The order from highest precedence to lowest is:
+ # # There can only be one of these...
+ # 1) build_pool: this can be either staging, pre-prod, and prod cfgs
+ # 2) build_variant: these could be known like asan and debug
+ # or a custom config
+ #
+ # # There can be many of these
+ # 3) all other configs: these are any configs that are passed with
+ # --cfg and --opt-cfg. There order is kept in
+ # which they were passed on the cmd line. This
+ # behaviour is maintains what happens by default
+ # in mozharness
+
+ # so, let's first assign the configs that hold a known position of
+ # importance (1 through 3)
+ for i, cf in enumerate(all_config_files):
+ if options.build_pool:
+ if cf == BuildOptionParser.build_pool_cfg_file:
+ pool_cfg_file = all_config_files[i]
+
+ if cf == options.build_variant:
+ variant_cfg_file = all_config_files[i]
+
+ # now remove these from the list if there was any.
+ # we couldn't pop() these in the above loop as mutating a list while
+ # iterating through it causes spurious results :)
+ for cf in [pool_cfg_file, variant_cfg_file]:
+ if cf:
+ all_config_files.remove(cf)
+
+ # now let's update config with the remaining config files.
+ # this functionality is the same as the base class
+ all_config_dicts.extend(
+ super(BuildingConfig, self).get_cfgs_from_files(all_config_files, options)
+ )
+
+ # stack variant, branch, and pool cfg files on top of that,
+ # if they are present, in that order
+ if variant_cfg_file:
+ # take the whole config
+ all_config_dicts.append(
+ (variant_cfg_file, parse_config_file(variant_cfg_file))
+ )
+ config_paths = options.config_paths or ["."]
+ if pool_cfg_file:
+ # take only the specific pool. If we are here, the pool
+ # must be present
+ build_pool_configs = parse_config_file(
+ pool_cfg_file, search_path=config_paths + [DEFAULT_CONFIG_PATH]
+ )
+ all_config_dicts.append(
+ (pool_cfg_file, build_pool_configs[options.build_pool])
+ )
+ return all_config_dicts
+
+
+# noinspection PyUnusedLocal
+class BuildOptionParser(object):
+ # TODO add nosetests for this class
+ platform = None
+ bits = None
+
+ # add to this list and you can automagically do things like
+ # --custom-build-variant-cfg asan
+ # and the script will pull up the appropriate path for the config
+ # against the current platform and bits.
+ # *It will warn and fail if there is not a config for the current
+ # platform/bits
+ path_base = "builds/releng_sub_%s_configs/"
+ build_variants = {
+ "add-on-devel": path_base + "%s_add-on-devel.py",
+ "asan": path_base + "%s_asan.py",
+ "asan-tc": path_base + "%s_asan_tc.py",
+ "asan-reporter-tc": path_base + "%s_asan_reporter_tc.py",
+ "fuzzing-asan-tc": path_base + "%s_fuzzing_asan_tc.py",
+ "tsan-tc": path_base + "%s_tsan_tc.py",
+ "fuzzing-tsan-tc": path_base + "%s_fuzzing_tsan_tc.py",
+ "cross-debug": path_base + "%s_cross_debug.py",
+ "cross-debug-searchfox": path_base + "%s_cross_debug_searchfox.py",
+ "cross-noopt-debug": path_base + "%s_cross_noopt_debug.py",
+ "cross-fuzzing-asan": path_base + "%s_cross_fuzzing_asan.py",
+ "cross-fuzzing-debug": path_base + "%s_cross_fuzzing_debug.py",
+ "debug": path_base + "%s_debug.py",
+ "fuzzing-debug": path_base + "%s_fuzzing_debug.py",
+ "asan-and-debug": path_base + "%s_asan_and_debug.py",
+ "asan-tc-and-debug": path_base + "%s_asan_tc_and_debug.py",
+ "stat-and-debug": path_base + "%s_stat_and_debug.py",
+ "code-coverage-debug": path_base + "%s_code_coverage_debug.py",
+ "code-coverage-opt": path_base + "%s_code_coverage_opt.py",
+ "source": path_base + "%s_source.py",
+ "noopt-debug": path_base + "%s_noopt_debug.py",
+ "arm-gradle-dependencies": path_base
+ + "%s_arm_gradle_dependencies.py", # NOQA: E501
+ "arm": path_base + "%s_arm.py",
+ "arm-lite": path_base + "%s_arm_lite.py",
+ "arm-beta": path_base + "%s_arm_beta.py",
+ "arm-beta-debug": path_base + "%s_arm_beta_debug.py",
+ "arm-debug": path_base + "%s_arm_debug.py",
+ "arm-lite-debug": path_base + "%s_arm_debug_lite.py",
+ "arm-debug-ccov": path_base + "%s_arm_debug_ccov.py",
+ "arm-debug-searchfox": path_base + "%s_arm_debug_searchfox.py",
+ "arm-gradle": path_base + "%s_arm_gradle.py",
+ "rusttests": path_base + "%s_rusttests.py",
+ "rusttests-debug": path_base + "%s_rusttests_debug.py",
+ "x86": path_base + "%s_x86.py",
+ "x86-lite": path_base + "%s_x86_lite.py",
+ "x86-beta": path_base + "%s_x86_beta.py",
+ "x86-beta-debug": path_base + "%s_x86_beta_debug.py",
+ "x86-debug": path_base + "%s_x86_debug.py",
+ "x86-lite-debug": path_base + "%s_x86_debug_lite.py",
+ "x86-profile-generate": path_base + "%s_x86_profile_generate.py",
+ "x86_64": path_base + "%s_x86_64.py",
+ "x86_64-lite": path_base + "%s_x86_64_lite.py",
+ "x86_64-beta": path_base + "%s_x86_64_beta.py",
+ "x86_64-beta-debug": path_base + "%s_x86_64_beta_debug.py",
+ "x86_64-debug": path_base + "%s_x86_64_debug.py",
+ "x86_64-lite-debug": path_base + "%s_x86_64_debug_lite.py",
+ "x86_64-debug-isolated-process": path_base
+ + "%s_x86_64_debug_isolated_process.py",
+ "x86_64-profile-generate": path_base + "%s_x86_64_profile_generate.py",
+ "arm-partner-sample1": path_base + "%s_arm_partner_sample1.py",
+ "aarch64": path_base + "%s_aarch64.py",
+ "aarch64-lite": path_base + "%s_aarch64_lite.py",
+ "aarch64-beta": path_base + "%s_aarch64_beta.py",
+ "aarch64-beta-debug": path_base + "%s_aarch64_beta_debug.py",
+ "aarch64-pgo": path_base + "%s_aarch64_pgo.py",
+ "aarch64-debug": path_base + "%s_aarch64_debug.py",
+ "aarch64-lite-debug": path_base + "%s_aarch64_debug_lite.py",
+ "android-geckoview-docs": path_base + "%s_geckoview_docs.py",
+ "valgrind": path_base + "%s_valgrind.py",
+ }
+ build_pool_cfg_file = "builds/build_pool_specifics.py"
+
+ @classmethod
+ def _query_pltfrm_and_bits(cls, target_option, options):
+ """determine platform and bits
+
+ This can be from either from a supplied --platform and --bits
+ or parsed from given config file names.
+ """
+ error_msg = (
+ "Whoops!\nYou are trying to pass a shortname for "
+ "%s. \nHowever, I need to know the %s to find the appropriate "
+ 'filename. You can tell me by passing:\n\t"%s" or a config '
+ 'filename via "--config" with %s in it. \nIn either case, these '
+ "option arguments must come before --custom-build-variant."
+ )
+ current_config_files = options.config_files or []
+ if not cls.bits:
+ # --bits has not been supplied
+ # lets parse given config file names for 32 or 64
+ for cfg_file_name in current_config_files:
+ if "32" in cfg_file_name:
+ cls.bits = "32"
+ break
+ if "64" in cfg_file_name:
+ cls.bits = "64"
+ break
+ else:
+ sys.exit(error_msg % (target_option, "bits", "--bits", '"32" or "64"'))
+
+ if not cls.platform:
+ # --platform has not been supplied
+ # lets parse given config file names for platform
+ for cfg_file_name in current_config_files:
+ if "windows" in cfg_file_name:
+ cls.platform = "windows"
+ break
+ if "mac" in cfg_file_name:
+ cls.platform = "mac"
+ break
+ if "linux" in cfg_file_name:
+ cls.platform = "linux"
+ break
+ if "android" in cfg_file_name:
+ cls.platform = "android"
+ break
+ else:
+ sys.exit(
+ error_msg
+ % (
+ target_option,
+ "platform",
+ "--platform",
+ '"linux", "windows", "mac", or "android"',
+ )
+ )
+ return cls.bits, cls.platform
+
+ @classmethod
+ def find_variant_cfg_path(cls, opt, value, parser):
+ valid_variant_cfg_path = None
+ # first let's see if we were given a valid short-name
+ if cls.build_variants.get(value):
+ bits, pltfrm = cls._query_pltfrm_and_bits(opt, parser.values)
+ prospective_cfg_path = cls.build_variants[value] % (pltfrm, bits)
+ else:
+ # this is either an incomplete path or an invalid key in
+ # build_variants
+ prospective_cfg_path = value
+
+ if os.path.exists(prospective_cfg_path):
+ # now let's see if we were given a valid pathname
+ valid_variant_cfg_path = value
+ else:
+ # FIXME: We should actually wait until we have parsed all arguments
+ # before looking at this, otherwise the behavior will depend on the
+ # order of arguments. But that isn't a problem as long as --extra-config-path
+ # is always passed first.
+ extra_config_paths = parser.values.config_paths or []
+ config_paths = extra_config_paths + [DEFAULT_CONFIG_PATH]
+ # let's take our prospective_cfg_path and see if we can
+ # determine an existing file
+ for path in config_paths:
+ if os.path.exists(os.path.join(path, prospective_cfg_path)):
+ # success! we found a config file
+ valid_variant_cfg_path = os.path.join(path, prospective_cfg_path)
+ break
+ return valid_variant_cfg_path, prospective_cfg_path
+
+ @classmethod
+ def set_build_variant(cls, option, opt, value, parser):
+ """sets an extra config file.
+
+ This is done by either taking an existing filepath or by taking a valid
+ shortname coupled with known platform/bits.
+ """
+ valid_variant_cfg_path, prospective_cfg_path = cls.find_variant_cfg_path(
+ "--custom-build-variant-cfg", value, parser
+ )
+
+ if not valid_variant_cfg_path:
+ # either the value was an indeterminable path or an invalid short
+ # name
+ sys.exit(
+ "Whoops!\n'--custom-build-variant' was passed but an "
+ "appropriate config file could not be determined. Tried "
+ "using: '%s' but it was not:"
+ "\n\t-- a valid shortname: %s "
+ "\n\t-- a valid variant for the given platform and bits."
+ % (prospective_cfg_path, str(list(cls.build_variants.keys())))
+ )
+ parser.values.config_files.append(valid_variant_cfg_path)
+ setattr(parser.values, option.dest, value) # the pool
+
+ @classmethod
+ def set_build_pool(cls, option, opt, value, parser):
+ # first let's add the build pool file where there may be pool
+ # specific keys/values. Then let's store the pool name
+ parser.values.config_files.append(cls.build_pool_cfg_file)
+ setattr(parser.values, option.dest, value) # the pool
+
+ @classmethod
+ def set_build_branch(cls, option, opt, value, parser):
+ # Store the branch name we are using
+ setattr(parser.values, option.dest, value) # the branch name
+
+ @classmethod
+ def set_platform(cls, option, opt, value, parser):
+ cls.platform = value
+ setattr(parser.values, option.dest, value)
+
+ @classmethod
+ def set_bits(cls, option, opt, value, parser):
+ cls.bits = value
+ setattr(parser.values, option.dest, value)
+
+
+# this global depends on BuildOptionParser and therefore can not go at the
+# top of the file
+BUILD_BASE_CONFIG_OPTIONS = [
+ [
+ ["--developer-run"],
+ {
+ "action": "store_false",
+ "dest": "is_automation",
+ "default": True,
+ "help": "If this is running outside of Mozilla's build"
+ "infrastructure, use this option. It ignores actions"
+ "that are not needed and adds config checks.",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "action": "callback",
+ "callback": BuildOptionParser.set_platform,
+ "type": "string",
+ "dest": "platform",
+ "help": "Sets the platform we are running this against"
+ " valid values: 'windows', 'mac', 'linux'",
+ },
+ ],
+ [
+ ["--bits"],
+ {
+ "action": "callback",
+ "callback": BuildOptionParser.set_bits,
+ "type": "string",
+ "dest": "bits",
+ "help": "Sets which bits we are building this against"
+ " valid values: '32', '64'",
+ },
+ ],
+ [
+ ["--custom-build-variant-cfg"],
+ {
+ "action": "callback",
+ "callback": BuildOptionParser.set_build_variant,
+ "type": "string",
+ "dest": "build_variant",
+ "help": "Sets the build type and will determine appropriate"
+ " additional config to use. Either pass a config path"
+ " or use a valid shortname from: "
+ "%s" % (list(BuildOptionParser.build_variants.keys()),),
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "action": "callback",
+ "callback": BuildOptionParser.set_build_pool,
+ "type": "string",
+ "dest": "build_pool",
+ "help": "This will update the config with specific pool"
+ " environment keys/values. The dicts for this are"
+ " in %s\nValid values: staging or"
+ " production" % ("builds/build_pool_specifics.py",),
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "action": "callback",
+ "callback": BuildOptionParser.set_build_branch,
+ "type": "string",
+ "dest": "branch",
+ "help": "This sets the branch we will be building this for.",
+ },
+ ],
+ [
+ ["--enable-nightly"],
+ {
+ "action": "store_true",
+ "dest": "nightly_build",
+ "default": False,
+ "help": "Sets the build to run in nightly mode",
+ },
+ ],
+ [
+ ["--who"],
+ {
+ "dest": "who",
+ "default": "",
+ "help": "stores who made the created the change.",
+ },
+ ],
+]
+
+
+def generate_build_ID():
+ return time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
+
+
+def generate_build_UID():
+ return uuid.uuid4().hex
+
+
+class BuildScript(
+ AutomationMixin,
+ VirtualenvMixin,
+ MercurialScript,
+ SecretsMixin,
+ PerfherderResourceOptionsMixin,
+):
+ def __init__(self, **kwargs):
+ # objdir is referenced in _query_abs_dirs() so let's make sure we
+ # have that attribute before calling BaseScript.__init__
+ self.objdir = None
+ super(BuildScript, self).__init__(**kwargs)
+ # epoch is only here to represent the start of the build
+ # that this mozharn script came from. until I can grab bbot's
+ # status.build.gettime()[0] this will have to do as a rough estimate
+ # although it is about 4s off from the time it would be if it was
+ # done through MBF.
+ # TODO find out if that time diff matters or if we just use it to
+ # separate each build
+ self.epoch_timestamp = int(time.mktime(datetime.now().timetuple()))
+ self.branch = self.config.get("branch")
+ self.stage_platform = self.config.get("stage_platform")
+ if not self.branch or not self.stage_platform:
+ if not self.branch:
+ self.error("'branch' not determined and is required")
+ if not self.stage_platform:
+ self.error("'stage_platform' not determined and is required")
+ self.fatal("Please add missing items to your config")
+ self.client_id = None
+ self.access_token = None
+
+ # Call this before creating the virtualenv so that we can support
+ # substituting config values with other config values.
+ self.query_build_env()
+
+ # We need to create the virtualenv directly (without using an action) in
+ # order to use python modules in PreScriptRun/Action listeners
+ self.create_virtualenv()
+
+ def _pre_config_lock(self, rw_config):
+ c = self.config
+ cfg_files_and_dicts = rw_config.all_cfg_files_and_dicts
+ build_pool = c.get("build_pool", "")
+ build_variant = c.get("build_variant", "")
+ variant_cfg = ""
+ if build_variant:
+ variant_cfg = BuildOptionParser.build_variants[build_variant] % (
+ BuildOptionParser.platform,
+ BuildOptionParser.bits,
+ )
+ build_pool_cfg = BuildOptionParser.build_pool_cfg_file
+
+ cfg_match_msg = "Script was run with '%(option)s %(type)s' and \
+'%(type)s' matches a key in '%(type_config_file)s'. Updating self.config with \
+items from that key's value."
+
+ for i, (target_file, target_dict) in enumerate(cfg_files_and_dicts):
+ if build_pool_cfg and build_pool_cfg in target_file:
+ self.info(
+ cfg_match_msg
+ % {
+ "option": "--build-pool",
+ "type": build_pool,
+ "type_config_file": build_pool_cfg,
+ }
+ )
+ if variant_cfg and variant_cfg in target_file:
+ self.info(
+ cfg_match_msg
+ % {
+ "option": "--custom-build-variant-cfg",
+ "type": build_variant,
+ "type_config_file": variant_cfg,
+ }
+ )
+ self.info(
+ "To generate a config file based upon options passed and "
+ "config files used, run script as before but extend options "
+ 'with "--dump-config"'
+ )
+ self.info(
+ "For a diff of where self.config got its items, "
+ "run the script again as before but extend options with: "
+ '"--dump-config-hierarchy"'
+ )
+ self.info(
+ "Both --dump-config and --dump-config-hierarchy don't "
+ "actually run any actions."
+ )
+
+ def _query_objdir(self):
+ if self.objdir:
+ return self.objdir
+
+ if not self.config.get("objdir"):
+ return self.fatal(MISSING_CFG_KEY_MSG % ("objdir",))
+ self.objdir = self.config["objdir"]
+ return self.objdir
+
+ def query_is_nightly_promotion(self):
+ platform_enabled = self.config.get("enable_nightly_promotion")
+ branch_enabled = self.branch in self.config.get("nightly_promotion_branches")
+ return platform_enabled and branch_enabled
+
+ def query_build_env(self, **kwargs):
+ c = self.config
+
+ # let's evoke the base query_env and make a copy of it
+ # as we don't always want every key below added to the same dict
+ env = copy.deepcopy(super(BuildScript, self).query_env(**kwargs))
+
+ if self.query_is_nightly() or self.query_is_nightly_promotion():
+ # taskcluster sets the update channel for shipping builds
+ # explicitly
+ if c.get("update_channel"):
+ update_channel = c["update_channel"]
+ if six.PY2 and isinstance(update_channel, six.text_type):
+ update_channel = update_channel.encode("utf-8")
+ env["MOZ_UPDATE_CHANNEL"] = update_channel
+ else: # let's just give the generic channel based on branch
+ env["MOZ_UPDATE_CHANNEL"] = "nightly-%s" % (self.branch,)
+ self.info("Update channel set to: {}".format(env["MOZ_UPDATE_CHANNEL"]))
+
+ return env
+
+ def query_mach_build_env(self, multiLocale=None):
+ c = self.config
+ if multiLocale is None and self.query_is_nightly():
+ multiLocale = c.get("multi_locale", False)
+ mach_env = {}
+ if c.get("upload_env"):
+ mach_env.update(c["upload_env"])
+
+ # this prevents taskcluster from overwriting the target files with
+ # the multilocale files. Put everything from the en-US build in a
+ # separate folder.
+ if multiLocale and self.config.get("taskcluster_nightly"):
+ if "UPLOAD_PATH" in mach_env:
+ mach_env["UPLOAD_PATH"] = os.path.join(mach_env["UPLOAD_PATH"], "en-US")
+ return mach_env
+
+ def _get_mozconfig(self):
+ """assign mozconfig."""
+ dirs = self.query_abs_dirs()
+
+ try:
+ abs_mozconfig_path = get_mozconfig_path(
+ script=self, config=self.config, dirs=dirs
+ )
+ except MozconfigPathError as e:
+ if six.PY2:
+ self.fatal(e.message)
+ else:
+ self.fatal(e.msg)
+
+ self.info("Use mozconfig: {}".format(abs_mozconfig_path))
+
+ # print its contents
+ content = self.read_from_file(abs_mozconfig_path, error_level=FATAL)
+ self.info("mozconfig content:")
+ self.info(content)
+
+ # finally, copy the mozconfig to a path that 'mach build' expects it to
+ # be
+ self.copyfile(
+ abs_mozconfig_path, os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ )
+
+ def _run_tooltool(self):
+ env = self.query_build_env()
+ env.update(self.query_mach_build_env())
+
+ c = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = c.get("tooltool_manifest_src")
+ if not manifest_src:
+ return self.warning(ERROR_MSGS["tooltool_manifest_undetermined"])
+ cmd = [
+ sys.executable,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = c["env"].get("TOOLTOOL_CACHE")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True, env=env)
+
+ def _create_mozbuild_dir(self, mozbuild_path=None):
+ if not mozbuild_path:
+ env = self.query_build_env()
+ mozbuild_path = env.get("MOZBUILD_STATE_PATH")
+ if mozbuild_path:
+ self.mkdir_p(mozbuild_path)
+ else:
+ self.warning(
+ "mozbuild_path could not be determined. skipping " "creating it."
+ )
+
+ def preflight_build(self):
+ """set up machine state for a complete build."""
+ self._get_mozconfig()
+ self._run_tooltool()
+ self._create_mozbuild_dir()
+ self._ensure_upload_path()
+
+ def build(self):
+ """builds application."""
+
+ args = ["build", "-v"]
+
+ # This will error on non-0 exit code.
+ self._run_mach_command_in_build_env(args)
+
+ self._generate_build_stats()
+
+ def static_analysis_autotest(self):
+ """Run mach static-analysis autotest, in order to make sure we dont regress"""
+ self.preflight_build()
+ self._run_mach_command_in_build_env(["configure"])
+ self._run_mach_command_in_build_env(
+ ["static-analysis", "autotest", "--intree-tool"], use_subprocess=True
+ )
+
+ def _query_mach(self):
+ return [sys.executable, "mach"]
+
+ def _run_mach_command_in_build_env(self, args, use_subprocess=False):
+ """Run a mach command in a build context."""
+ env = self.query_build_env()
+ env.update(self.query_mach_build_env())
+
+ dirs = self.query_abs_dirs()
+
+ mach = self._query_mach()
+
+ # XXX See bug 1483883
+ # Work around an interaction between Gradle and mozharness
+ # Not using `subprocess` causes gradle to hang
+ if use_subprocess:
+ import subprocess
+
+ return_code = subprocess.call(
+ mach + ["--log-no-times"] + args, env=env, cwd=dirs["abs_src_dir"]
+ )
+ else:
+ return_code = self.run_command(
+ command=mach + ["--log-no-times"] + args,
+ cwd=dirs["abs_src_dir"],
+ env=env,
+ error_list=MakefileErrorList,
+ output_timeout=self.config.get("max_build_output_timeout", 60 * 40),
+ )
+
+ if return_code:
+ self.return_code = self.worst_level(
+ EXIT_STATUS_DICT[TBPL_FAILURE],
+ self.return_code,
+ AUTOMATION_EXIT_CODES[::-1],
+ )
+ self.fatal(
+ "'mach %s' did not run successfully. Please check "
+ "log for errors." % " ".join(args)
+ )
+
+ def multi_l10n(self):
+ if not self.query_is_nightly():
+ self.info("Not a nightly build, skipping multi l10n.")
+ return
+
+ dirs = self.query_abs_dirs()
+ base_work_dir = dirs["base_work_dir"]
+ work_dir = dirs["abs_work_dir"]
+ objdir = dirs["abs_obj_dir"]
+ branch = self.branch
+
+ # Building a nightly with the try repository fails because a
+ # config-file does not exist for try. Default to mozilla-central
+ # settings (arbitrarily).
+ if branch == "try":
+ branch = "mozilla-central"
+
+ multil10n_path = os.path.join(
+ dirs["abs_src_dir"],
+ "testing/mozharness/scripts/multil10n.py",
+ )
+
+ cmd = [
+ sys.executable,
+ multil10n_path,
+ "--work-dir",
+ work_dir,
+ "--config-file",
+ "multi_locale/android-mozharness-build.json",
+ "--pull-locale-source",
+ "--package-multi",
+ "--summary",
+ ]
+
+ self.run_command(
+ cmd, env=self.query_build_env(), cwd=base_work_dir, halt_on_failure=True
+ )
+
+ package_cmd = [
+ "make",
+ "echo-variable-PACKAGE",
+ "AB_CD=multi",
+ ]
+ package_filename = self.get_output_from_command(
+ package_cmd,
+ cwd=objdir,
+ )
+ if not package_filename:
+ self.fatal(
+ "Unable to determine the package filename for the multi-l10n build. "
+ "Was trying to run: %s" % package_cmd
+ )
+
+ self.info("Multi-l10n package filename is: %s" % package_filename)
+
+ parser = MakeUploadOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ )
+ upload_cmd = ["make", "upload", "AB_CD=multi"]
+ self.run_command(
+ upload_cmd,
+ partial_env=self.query_mach_build_env(multiLocale=False),
+ cwd=objdir,
+ halt_on_failure=True,
+ output_parser=parser,
+ )
+ upload_files_cmd = [
+ "make",
+ "echo-variable-UPLOAD_FILES",
+ "AB_CD=multi",
+ ]
+ self.get_output_from_command(
+ upload_files_cmd,
+ cwd=objdir,
+ )
+
+ def postflight_build(self):
+ """grabs properties from post build and calls ccache -s"""
+ # A list of argument lists. Better names gratefully accepted!
+ mach_commands = self.config.get("postflight_build_mach_commands", [])
+ for mach_command in mach_commands:
+ self._execute_postflight_build_mach_command(mach_command)
+
+ def _execute_postflight_build_mach_command(self, mach_command_args):
+ env = self.query_build_env()
+ env.update(self.query_mach_build_env())
+
+ command = [sys.executable, "mach", "--log-no-times"]
+ command.extend(mach_command_args)
+
+ self.run_command(
+ command=command,
+ cwd=self.query_abs_dirs()["abs_src_dir"],
+ env=env,
+ output_timeout=self.config.get("max_build_output_timeout", 60 * 20),
+ halt_on_failure=True,
+ )
+
+ def preflight_package_source(self):
+ self._get_mozconfig()
+
+ def package_source(self):
+ """generates source archives and uploads them"""
+ env = self.query_build_env()
+ env.update(self.query_mach_build_env())
+ dirs = self.query_abs_dirs()
+
+ self.run_command(
+ command=[sys.executable, "mach", "--log-no-times", "configure"],
+ cwd=dirs["abs_src_dir"],
+ env=env,
+ output_timeout=60 * 3,
+ halt_on_failure=True,
+ )
+ self.run_command(
+ command=[
+ "make",
+ "source-package",
+ "source-upload",
+ ],
+ cwd=dirs["abs_obj_dir"],
+ env=env,
+ output_timeout=60 * 45,
+ halt_on_failure=True,
+ )
+
+ def _is_configuration_shipped(self):
+ """Determine if the current build configuration is shipped to users.
+
+ This is used to drive alerting so we don't see alerts for build
+ configurations we care less about.
+ """
+ # Ideally this would be driven by a config option. However, our
+ # current inheritance mechanism of using a base config and then
+ # one-off configs for variants isn't conducive to this since derived
+ # configs we need to be reset and we don't like requiring boilerplate
+ # in derived configs.
+
+ # Debug builds are never shipped.
+ if self.config.get("debug_build"):
+ return False
+
+ # OS X opt builds without a variant are shipped.
+ if self.config.get("platform") == "macosx64":
+ if not self.config.get("build_variant"):
+ return True
+
+ # Android opt builds without a variant are shipped.
+ if self.config.get("platform") == "android":
+ if not self.config.get("build_variant"):
+ return True
+
+ return False
+
+ def _load_build_resources(self):
+ p = self.config.get("build_resources_path") % self.query_abs_dirs()
+ if not os.path.exists(p):
+ self.info("%s does not exist; not loading build resources" % p)
+ return None
+
+ with open(p, "r") as fh:
+ resources = json.load(fh)
+
+ if "duration" not in resources:
+ self.info("resource usage lacks duration; ignoring")
+ return None
+
+ # We want to always collect metrics. But alerts with sccache enabled
+ # we should disable automatic alerting
+ should_alert = False if os.environ.get("USE_SCCACHE") == "1" else True
+
+ data = {
+ "name": "build times",
+ "value": resources["duration"],
+ "extraOptions": self.perfherder_resource_options(),
+ "shouldAlert": should_alert,
+ "subtests": [],
+ }
+
+ for phase in resources["phases"]:
+ if "duration" not in phase:
+ continue
+ data["subtests"].append(
+ {
+ "name": phase["name"],
+ "value": phase["duration"],
+ }
+ )
+
+ return data
+
+ def _load_sccache_stats(self):
+ stats_file = os.path.join(
+ self.query_abs_dirs()["abs_obj_dir"], "sccache-stats.json"
+ )
+ if not os.path.exists(stats_file):
+ self.info("%s does not exist; not loading sccache stats" % stats_file)
+ return
+
+ with open(stats_file, "r") as fh:
+ stats = json.load(fh)
+
+ def get_stat(key):
+ val = stats["stats"][key]
+ # Future versions of sccache will distinguish stats by language
+ # and store them as a dict.
+ if isinstance(val, dict):
+ val = sum(val["counts"].values())
+ return val
+
+ total = get_stat("requests_executed")
+ hits = get_stat("cache_hits")
+ if total > 0:
+ hits /= float(total)
+
+ yield {
+ "name": "sccache hit rate",
+ "value": hits,
+ "subtests": [],
+ "alertThreshold": 50.0,
+ "lowerIsBetter": False,
+ # We want to always collect metrics.
+ # But disable automatic alerting on it
+ "shouldAlert": False,
+ }
+
+ yield {
+ "name": "sccache cache_write_errors",
+ "value": stats["stats"]["cache_write_errors"],
+ "alertThreshold": 50.0,
+ "subtests": [],
+ }
+
+ yield {
+ "name": "sccache requests_not_cacheable",
+ "value": stats["stats"]["requests_not_cacheable"],
+ "alertThreshold": 50.0,
+ "subtests": [],
+ }
+
+ def _get_package_metrics(self):
+ import tarfile
+ import zipfile
+
+ dirs = self.query_abs_dirs()
+
+ dist_dir = os.path.join(dirs["abs_obj_dir"], "dist")
+ for ext in ["apk", "dmg", "tar.bz2", "zip"]:
+ name = "target." + ext
+ if os.path.exists(os.path.join(dist_dir, name)):
+ packageName = name
+ break
+ else:
+ self.fatal("could not determine packageName")
+
+ interests = ["libxul.so", "classes.dex", "omni.ja", "xul.dll"]
+ installer = os.path.join(dist_dir, packageName)
+ installer_size = 0
+ size_measurements = []
+
+ def paths_with_sizes(installer):
+ if zipfile.is_zipfile(installer):
+ with zipfile.ZipFile(installer, "r") as zf:
+ for zi in zf.infolist():
+ yield zi.filename, zi.file_size
+ elif tarfile.is_tarfile(installer):
+ with tarfile.open(installer, "r:*") as tf:
+ for ti in tf:
+ yield ti.name, ti.size
+
+ if os.path.exists(installer):
+ installer_size = self.query_filesize(installer)
+ self.info("Size of %s: %s bytes" % (packageName, installer_size))
+ try:
+ subtests = {}
+ for path, size in paths_with_sizes(installer):
+ name = os.path.basename(path)
+ if name in interests:
+ # We have to be careful here: desktop Firefox installers
+ # contain two omni.ja files: one for the general runtime,
+ # and one for the browser proper.
+ if name == "omni.ja":
+ containing_dir = os.path.basename(os.path.dirname(path))
+ if containing_dir == "browser":
+ name = "browser-omni.ja"
+ if name in subtests:
+ self.fatal(
+ "should not see %s (%s) multiple times!" % (name, path)
+ )
+ subtests[name] = size
+ for name in subtests:
+ self.info("Size of %s: %s bytes" % (name, subtests[name]))
+ size_measurements.append({"name": name, "value": subtests[name]})
+ except Exception:
+ self.info("Unable to search %s for component sizes." % installer)
+ size_measurements = []
+
+ if not installer_size and not size_measurements:
+ return
+
+ # We want to always collect metrics. But alerts for installer size are
+ # only use for builds with ship. So nix the alerts for builds we don't
+ # ship.
+ def filter_alert(alert):
+ if not self._is_configuration_shipped():
+ alert["shouldAlert"] = False
+
+ return alert
+
+ if installer.endswith(".apk"): # Android
+ yield filter_alert(
+ {
+ "name": "installer size",
+ "value": installer_size,
+ "alertChangeType": "absolute",
+ "alertThreshold": (200 * 1024),
+ "subtests": size_measurements,
+ }
+ )
+ else:
+ yield filter_alert(
+ {
+ "name": "installer size",
+ "value": installer_size,
+ "alertChangeType": "absolute",
+ "alertThreshold": (100 * 1024),
+ "subtests": size_measurements,
+ }
+ )
+
+ def _get_sections(self, file, filter=None):
+ """
+ Returns a dictionary of sections and their sizes.
+ """
+ # Check for `rust_size`, our cross platform version of size. It should
+ # be fetched by run-task in $MOZ_FETCHES_DIR/rust-size/rust-size
+ rust_size = os.path.join(
+ os.environ["MOZ_FETCHES_DIR"], "rust-size", "rust-size"
+ )
+ size_prog = self.which(rust_size)
+ if not size_prog:
+ self.info("Couldn't find `rust-size` program")
+ return {}
+
+ self.info("Using %s" % size_prog)
+ cmd = [size_prog, file]
+ output = self.get_output_from_command(cmd)
+ if not output:
+ self.info("`rust-size` failed")
+ return {}
+
+ # Format is JSON:
+ # {
+ # "section_type": {
+ # "section_name": size, ....
+ # },
+ # ...
+ # }
+ try:
+ parsed = json.loads(output)
+ except ValueError:
+ self.info("`rust-size` failed: %s" % output)
+ return {}
+
+ sections = {}
+ for sec_type in list(parsed.values()):
+ for name, size in list(sec_type.items()):
+ if not filter or name in filter:
+ sections[name] = size
+
+ return sections
+
+ def _get_binary_metrics(self):
+ """
+ Provides metrics on interesting compenents of the built binaries.
+ Currently just the sizes of interesting sections.
+ """
+ lib_interests = {
+ "XUL": ("libxul.so", "xul.dll", "XUL"),
+ "NSS": ("libnss3.so", "nss3.dll", "libnss3.dylib"),
+ "NSPR": ("libnspr4.so", "nspr4.dll", "libnspr4.dylib"),
+ "avcodec": ("libmozavcodec.so", "mozavcodec.dll", "libmozavcodec.dylib"),
+ "avutil": ("libmozavutil.so", "mozavutil.dll", "libmozavutil.dylib"),
+ }
+ section_interests = (
+ ".text",
+ ".data",
+ ".rodata",
+ ".rdata",
+ ".cstring",
+ ".data.rel.ro",
+ ".bss",
+ )
+ lib_details = []
+
+ dirs = self.query_abs_dirs()
+ dist_dir = os.path.join(dirs["abs_obj_dir"], "dist")
+ bin_dir = os.path.join(dist_dir, "bin")
+
+ for lib_type, lib_names in list(lib_interests.items()):
+ for lib_name in lib_names:
+ lib = os.path.join(bin_dir, lib_name)
+ if os.path.exists(lib):
+ lib_size = 0
+ section_details = self._get_sections(lib, section_interests)
+ section_measurements = []
+ # Build up the subtests
+
+ # Lump rodata sections together
+ # - Mach-O separates out read-only string data as .cstring
+ # - PE really uses .rdata, but XUL at least has a .rodata as well
+ for ro_alias in (".cstring", ".rdata"):
+ if ro_alias in section_details:
+ if ".rodata" in section_details:
+ section_details[".rodata"] += section_details[ro_alias]
+ else:
+ section_details[".rodata"] = section_details[ro_alias]
+ del section_details[ro_alias]
+
+ for k, v in list(section_details.items()):
+ section_measurements.append({"name": k, "value": v})
+ lib_size += v
+ lib_details.append(
+ {
+ "name": lib_type,
+ "size": lib_size,
+ "sections": section_measurements,
+ }
+ )
+
+ for lib_detail in lib_details:
+ yield {
+ "name": "%s section sizes" % lib_detail["name"],
+ "value": lib_detail["size"],
+ "shouldAlert": False,
+ "subtests": lib_detail["sections"],
+ }
+
+ def _generate_build_stats(self):
+ """grab build stats following a compile.
+
+ This action handles all statistics from a build: 'count_ctors'
+ and then posts to graph server the results.
+ We only post to graph server for non nightly build
+ """
+ self.info("Collecting build metrics")
+
+ if os.environ.get("USE_ARTIFACT"):
+ self.info("Skipping due to forced artifact build.")
+ return
+
+ c = self.config
+
+ # Report some important file sizes for display in treeherder
+
+ perfherder_data = {
+ "framework": {"name": "build_metrics"},
+ "suites": [],
+ }
+
+ if not c.get("debug_build") and not c.get("disable_package_metrics"):
+ perfherder_data["suites"].extend(self._get_package_metrics())
+ perfherder_data["suites"].extend(self._get_binary_metrics())
+
+ # Extract compiler warnings count.
+ warnings = self.get_output_from_command(
+ command=[sys.executable, "mach", "warnings-list"],
+ cwd=self.query_abs_dirs()["abs_src_dir"],
+ env=self.query_build_env(),
+ # No need to pollute the log.
+ silent=True,
+ # Fail fast.
+ halt_on_failure=True,
+ )
+
+ if warnings is not None:
+ perfherder_data["suites"].append(
+ {
+ "name": "compiler warnings",
+ "value": len(warnings.strip().splitlines()),
+ "alertThreshold": 100.0,
+ "subtests": [],
+ }
+ )
+
+ build_metrics = self._load_build_resources()
+ if build_metrics:
+ perfherder_data["suites"].append(build_metrics)
+ perfherder_data["suites"].extend(self._load_sccache_stats())
+
+ # Ensure all extra options for this configuration are present.
+ for opt in os.environ.get("PERFHERDER_EXTRA_OPTIONS", "").split():
+ for suite in perfherder_data["suites"]:
+ if opt not in suite.get("extraOptions", []):
+ suite.setdefault("extraOptions", []).append(opt)
+
+ if self.query_is_nightly():
+ for suite in perfherder_data["suites"]:
+ suite.setdefault("extraOptions", []).insert(0, "nightly")
+
+ if perfherder_data["suites"]:
+ self.info("PERFHERDER_DATA: %s" % json.dumps(perfherder_data))
+
+ def valgrind_test(self):
+ """Execute mach's valgrind-test for memory leaks"""
+ env = self.query_build_env()
+ env.update(self.query_mach_build_env())
+
+ return_code = self.run_command(
+ command=[sys.executable, "mach", "valgrind-test"],
+ cwd=self.query_abs_dirs()["abs_src_dir"],
+ env=env,
+ output_timeout=self.config.get("max_build_output_timeout", 60 * 40),
+ )
+ if return_code:
+ self.return_code = self.worst_level(
+ EXIT_STATUS_DICT[TBPL_FAILURE],
+ self.return_code,
+ AUTOMATION_EXIT_CODES[::-1],
+ )
+ self.fatal(
+ "'mach valgrind-test' did not run successfully. Please check "
+ "log for errors."
+ )
+
+ def _ensure_upload_path(self):
+ env = self.query_mach_build_env()
+
+ # Some Taskcluster workers don't like it if an artifacts directory
+ # is defined but no artifacts are uploaded. Guard against this by always
+ # ensuring the artifacts directory exists.
+ if "UPLOAD_PATH" in env and not os.path.exists(env["UPLOAD_PATH"]):
+ self.mkdir_p(env["UPLOAD_PATH"])
+
+ def _post_fatal(self, message=None, exit_code=None):
+ if not self.return_code: # only overwrite return_code if it's 0
+ self.error("setting return code to 2 because fatal was called")
+ self.return_code = 2
+
+ @PostScriptRun
+ def _summarize(self):
+ """If this is run in automation, ensure the return code is valid and
+ set it to one if it's not. Finally, log any summaries we collected
+ from the script run.
+ """
+ if self.config.get("is_automation"):
+ # let's ignore all mention of tbpl status until this
+ # point so it will be easier to manage
+ if self.return_code not in AUTOMATION_EXIT_CODES:
+ self.error(
+ "Return code is set to: %s and is outside of "
+ "automation's known values. Setting to 2(failure). "
+ "Valid return codes %s" % (self.return_code, AUTOMATION_EXIT_CODES)
+ )
+ self.return_code = 2
+ for status, return_code in list(EXIT_STATUS_DICT.items()):
+ if return_code == self.return_code:
+ self.record_status(status, TBPL_STATUS_DICT[status])
+ self.summary()
+
+ @PostScriptRun
+ def _parse_build_tests_ccov(self):
+ if "MOZ_FETCHES_DIR" not in os.environ:
+ return
+
+ dirs = self.query_abs_dirs()
+ topsrcdir = dirs["abs_src_dir"]
+ base_work_dir = dirs["base_work_dir"]
+
+ env = self.query_build_env()
+
+ grcov_path = os.path.join(os.environ["MOZ_FETCHES_DIR"], "grcov", "grcov")
+ if not os.path.isabs(grcov_path):
+ grcov_path = os.path.join(base_work_dir, grcov_path)
+ if self._is_windows():
+ grcov_path += ".exe"
+ env["GRCOV_PATH"] = grcov_path
+
+ cmd = self._query_mach() + [
+ "python",
+ os.path.join("testing", "parse_build_tests_ccov.py"),
+ ]
+ self.run_command(command=cmd, cwd=topsrcdir, env=env, halt_on_failure=True)
+
+ @PostScriptRun
+ def _relocate_artifacts(self):
+ """Move certain artifacts out of the default upload directory.
+
+ These artifacts will be moved to a secondary directory called `cidata`.
+ Then they will be uploaded with different expiration values."""
+ dirs = self.query_abs_dirs()
+ topsrcdir = dirs["abs_src_dir"]
+ base_work_dir = dirs["base_work_dir"]
+
+ build_platform = os.environ.get("MOZ_ARTIFACT_PLATFORM")
+ if build_platform is not None:
+ build_platform = build_platform.lower()
+ else:
+ return
+ try:
+ upload_dir = os.environ["UPLOAD_DIR"]
+ except KeyError:
+ self.fatal("The env. var. UPLOAD_DIR is not set.")
+
+ artifact_yml_path = os.path.join(
+ topsrcdir, "taskcluster/gecko_taskgraph/transforms/artifacts.yml"
+ )
+
+ upload_short_dir = os.path.join(base_work_dir, "cidata")
+
+ # Choose artifacts based on build platform
+ if build_platform.startswith("win"):
+ main_platform = "win"
+ elif build_platform.startswith("linux"):
+ main_platform = "linux"
+ elif build_platform.startswith("mac"):
+ main_platform = "macos"
+ elif build_platform.startswith("android"):
+ if build_platform == "android-geckoview-docs":
+ return
+ main_platform = "android"
+ else:
+ err = "Build platform {} didn't start with 'mac', 'linux', 'win', or 'android'".format(
+ build_platform
+ )
+ self.fatal(err)
+ try:
+ with open(artifact_yml_path) as artfile:
+ arts = []
+ platforms = yaml.safe_load(artfile.read())
+ for artifact in platforms[main_platform]:
+ arts.append(artifact)
+ except FileNotFoundError:
+ self.fatal("Could not read artifacts.yml; file not found. Exiting.")
+ except PermissionError:
+ self.fatal("Could not read artifacts.yml; permission error.")
+ except YAMLError as ye:
+ self.fatal(f"Failed to parse artifacts.yml with error:\n{ye}")
+
+ try:
+ os.makedirs(upload_short_dir)
+ except FileExistsError:
+ pass
+ except PermissionError:
+ self.fatal(f'Failed to create dir. "{upload_short_dir}"; permission error.')
+
+ for art in arts:
+ source_file = os.path.join(upload_dir, art)
+ if not os.path.exists(source_file):
+ self.info(
+ f"The artifact {source_file} is not present in this build. Skipping"
+ )
+ continue
+ dest_file = os.path.join(upload_short_dir, art)
+ try:
+ os.rename(source_file, dest_file)
+ if os.path.exists(dest_file):
+ self.info(
+ f"Successfully moved artifact {source_file} to {dest_file}"
+ )
+ else:
+ self.fatal(
+ f"Move of {source_file} to {dest_file} was not successful."
+ )
+ except (PermissionError, FileNotFoundError) as err:
+ self.fatal(
+ f'Failed to move file "{art}" from {source_file} to {dest_file}:\n{err}'
+ )
+ continue
diff --git a/testing/mozharness/mozharness/mozilla/checksums.py b/testing/mozharness/mozharness/mozilla/checksums.py
new file mode 100644
index 0000000000..e7071d506a
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/checksums.py
@@ -0,0 +1,41 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import six
+
+
+def parse_checksums_file(checksums):
+ """
+ Parses checksums files that the build system generates and uploads:
+ https://hg.mozilla.org/mozilla-central/file/default/build/checksums.py
+ """
+ fileInfo = {}
+ for line in checksums.splitlines():
+ hash_, type_, size, file_ = line.split(None, 3)
+ type_ = six.ensure_str(type_)
+ file_ = six.ensure_str(file_)
+ size = int(size)
+ if size < 0:
+ raise ValueError("Found negative value (%d) for size." % size)
+ if file_ not in fileInfo:
+ fileInfo[file_] = {"hashes": {}}
+ # If the file already exists, make sure that the size matches the
+ # previous entry.
+ elif fileInfo[file_]["size"] != size:
+ raise ValueError(
+ "Found different sizes for same file %s (%s and %s)"
+ % (file_, fileInfo[file_]["size"], size)
+ )
+ # Same goes for the hash.
+ elif (
+ type_ in fileInfo[file_]["hashes"]
+ and fileInfo[file_]["hashes"][type_] != hash_
+ ):
+ raise ValueError(
+ "Found different %s hashes for same file %s (%s and %s)"
+ % (type_, file_, fileInfo[file_]["hashes"][type_], hash_)
+ )
+ fileInfo[file_]["size"] = size
+ fileInfo[file_]["hashes"][type_] = hash_
+ return fileInfo
diff --git a/testing/mozharness/mozharness/mozilla/firefox/__init__.py b/testing/mozharness/mozharness/mozilla/firefox/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/firefox/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/firefox/autoconfig.py b/testing/mozharness/mozharness/mozilla/firefox/autoconfig.py
new file mode 100644
index 0000000000..476277e661
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/firefox/autoconfig.py
@@ -0,0 +1,72 @@
+""" This module helps modifying Firefox with autoconfig files."""
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import os
+
+from mozharness.base.script import platform_name
+
+AUTOCONFIG_TEXT = """// Any comment. You must start the file with a comment!
+// This entry tells the browser to load a mozilla.cfg
+pref("general.config.sandbox_enabled", false);
+pref("general.config.filename", "mozilla.cfg");
+pref("general.config.obscure_value", 0);
+"""
+
+
+def write_autoconfig_files(
+ fx_install_dir, cfg_contents, autoconfig_contents=AUTOCONFIG_TEXT
+):
+ """Generate autoconfig files to modify Firefox's set up
+
+ Read documentation in here:
+ https://developer.mozilla.org/en-US/Firefox/Enterprise_deployment#Configuration
+
+ fx_install_dir - path to Firefox installation
+ cfg_contents - .cfg file containing JavaScript changes for Firefox
+ autoconfig_contents - autoconfig.js content to refer to .cfg gile
+ """
+ with open(_cfg_file_path(fx_install_dir), "w") as fd:
+ fd.write(cfg_contents)
+ with open(_autoconfig_path(fx_install_dir), "w") as fd:
+ fd.write(autoconfig_contents)
+
+
+def read_autoconfig_file(fx_install_dir):
+ """Read autoconfig file that modifies Firefox startup
+
+ fx_install_dir - path to Firefox installation
+ """
+ with open(_cfg_file_path(fx_install_dir), "r") as fd:
+ return fd.read()
+
+
+def _autoconfig_path(fx_install_dir):
+ platform = platform_name()
+ if platform in ("win32", "win64"):
+ return os.path.join(fx_install_dir, "defaults", "pref", "autoconfig.js")
+ elif platform in ("linux", "linux64"):
+ return os.path.join(fx_install_dir, "defaults/pref/autoconfig.js")
+ elif platform in ("macosx"):
+ return os.path.join(
+ fx_install_dir, "Contents/Resources/defaults/pref/autoconfig.js"
+ )
+ else:
+ raise Exception("Invalid platform.")
+
+
+def _cfg_file_path(fx_install_dir):
+ """
+ Windows: defaults\pref
+ Mac: Firefox.app/Contents/Resources/defaults/pref
+ Linux: defaults/pref
+ """
+ platform = platform_name()
+ if platform in ("win32", "win64"):
+ return os.path.join(fx_install_dir, "mozilla.cfg")
+ elif platform in ("linux", "linux64"):
+ return os.path.join(fx_install_dir, "mozilla.cfg")
+ elif platform in ("macosx"):
+ return os.path.join(fx_install_dir, "Contents/Resources/mozilla.cfg")
+ else:
+ raise Exception("Invalid platform.")
diff --git a/testing/mozharness/mozharness/mozilla/l10n/__init__.py b/testing/mozharness/mozharness/mozilla/l10n/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/l10n/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/l10n/locales.py b/testing/mozharness/mozharness/mozilla/l10n/locales.py
new file mode 100755
index 0000000000..83fadd0133
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/l10n/locales.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Localization.
+"""
+
+import os
+import pprint
+
+from mozharness.base.config import parse_config_file
+
+
+# LocalesMixin {{{1
+class LocalesMixin(object):
+ def __init__(self, **kwargs):
+ """Mixins generally don't have an __init__.
+ This breaks super().__init__() for children.
+ However, this is needed to override the query_abs_dirs()
+ """
+ self.abs_dirs = None
+ self.locales = None
+ self.gecko_locale_revisions = None
+ self.l10n_revisions = {}
+
+ def query_locales(self):
+ if self.locales is not None:
+ return self.locales
+ c = self.config
+ ignore_locales = c.get("ignore_locales", [])
+ additional_locales = c.get("additional_locales", [])
+ # List of locales can be set by using different methods in the
+ # following order:
+ # 1. "MOZ_LOCALES" env variable: a string of locale:revision separated
+ # by space
+ # 2. self.config["locales"] which can be either coming from the config
+ # or from --locale command line argument
+ # 3. using self.config["locales_file"] l10n changesets file
+ locales = None
+
+ # Environment variable
+ if not locales and "MOZ_LOCALES" in os.environ:
+ self.debug("Using locales from environment: %s" % os.environ["MOZ_LOCALES"])
+ locales = os.environ["MOZ_LOCALES"].split()
+
+ # Command line or config
+ if not locales and c.get("locales", []):
+ locales = c["locales"]
+ self.debug("Using locales from config/CLI: %s" % ", ".join(locales))
+
+ # parse locale:revision if set
+ if locales:
+ for l in locales:
+ if ":" in l:
+ # revision specified in locale string
+ locale, revision = l.split(":", 1)
+ self.debug("Using %s:%s" % (locale, revision))
+ self.l10n_revisions[locale] = revision
+ # clean up locale by removing revisions
+ locales = [l.split(":")[0] for l in locales]
+
+ if not locales and "locales_file" in c:
+ abs_dirs = self.query_abs_dirs()
+ locales_file = os.path.join(abs_dirs["abs_src_dir"], c["locales_file"])
+ locales = self.parse_locales_file(locales_file)
+
+ if not locales:
+ self.fatal("No locales set!")
+
+ for locale in ignore_locales:
+ if locale in locales:
+ self.debug("Ignoring locale %s." % locale)
+ locales.remove(locale)
+ if locale in self.l10n_revisions:
+ del self.l10n_revisions[locale]
+
+ for locale in additional_locales:
+ if locale not in locales:
+ self.debug("Adding locale %s." % locale)
+ locales.append(locale)
+
+ if not locales:
+ return None
+ self.locales = locales
+ return self.locales
+
+ def list_locales(self):
+ """Stub action method."""
+ self.info("Locale list: %s" % str(self.query_locales()))
+
+ def parse_locales_file(self, locales_file):
+ locales = []
+ c = self.config
+ self.info("Parsing locales file %s" % locales_file)
+ platform = c.get("locales_platform", None)
+
+ if locales_file.endswith("json"):
+ locales_json = parse_config_file(locales_file)
+ for locale in sorted(locales_json.keys()):
+ if isinstance(locales_json[locale], dict):
+ if platform and platform not in locales_json[locale]["platforms"]:
+ continue
+ self.l10n_revisions[locale] = locales_json[locale]["revision"]
+ else:
+ # some other way of getting this?
+ self.l10n_revisions[locale] = "default"
+ locales.append(locale)
+ else:
+ locales = self.read_from_file(locales_file).split()
+ self.info("self.l10n_revisions: %s" % pprint.pformat(self.l10n_revisions))
+ self.info("locales: %s" % locales)
+ return locales
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(LocalesMixin, self).query_abs_dirs()
+ c = self.config
+ dirs = {}
+ dirs["abs_work_dir"] = os.path.join(c["base_work_dir"], c["work_dir"])
+ dirs["abs_l10n_dir"] = os.path.abspath(
+ os.path.join(abs_dirs["abs_src_dir"], "../l10n-central")
+ )
+ dirs["abs_locales_src_dir"] = os.path.join(
+ abs_dirs["abs_src_dir"],
+ c["locales_dir"],
+ )
+
+ dirs["abs_obj_dir"] = os.path.join(dirs["abs_work_dir"], c["objdir"])
+ dirs["abs_locales_dir"] = os.path.join(dirs["abs_obj_dir"], c["locales_dir"])
+
+ for key in list(dirs.keys()):
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # This requires self to inherit a VCSMixin.
+ def pull_locale_source(self, hg_l10n_base=None, parent_dir=None, vcs="hg"):
+ c = self.config
+ if not hg_l10n_base:
+ hg_l10n_base = c["hg_l10n_base"]
+ if parent_dir is None:
+ parent_dir = self.query_abs_dirs()["abs_l10n_dir"]
+ self.mkdir_p(parent_dir)
+ # This block is to allow for pulling buildbot-configs in Fennec
+ # release builds, since we don't pull it in MBF anymore.
+ if c.get("l10n_repos"):
+ repos = c.get("l10n_repos")
+ self.vcs_checkout_repos(repos, tag_override=c.get("tag_override"))
+ # Pull locales
+ locales = self.query_locales()
+ locale_repos = []
+ for locale in locales:
+ tag = c.get("hg_l10n_tag", "default")
+ if self.l10n_revisions.get(locale):
+ tag = self.l10n_revisions[locale]
+ locale_repos.append(
+ {"repo": "%s/%s" % (hg_l10n_base, locale), "branch": tag, "vcs": vcs}
+ )
+ revs = self.vcs_checkout_repos(
+ repo_list=locale_repos,
+ parent_dir=parent_dir,
+ tag_override=c.get("tag_override"),
+ )
+ self.gecko_locale_revisions = revs
+
+
+# __main__ {{{1
+
+if __name__ == "__main__":
+ pass
diff --git a/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py b/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
new file mode 100755
index 0000000000..6b1f8c4782
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/l10n/multi_locale_build.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""multi_locale_build.py
+
+This should be a mostly generic multilocale build script.
+"""
+
+import os
+import sys
+
+from mozharness.base.errors import MakefileErrorList
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.l10n.locales import LocalesMixin
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+
+# MultiLocaleBuild {{{1
+class MultiLocaleBuild(LocalesMixin, MercurialScript):
+ """This class targets Fennec multilocale builds.
+ We were considering this for potential Firefox desktop multilocale.
+ Now that we have a different approach for B2G multilocale,
+ it's most likely misnamed."""
+
+ config_options = [
+ [
+ ["--locale"],
+ {
+ "action": "extend",
+ "dest": "locales",
+ "type": "string",
+ "help": "Specify the locale(s) to repack",
+ },
+ ],
+ [
+ ["--objdir"],
+ {
+ "action": "store",
+ "dest": "objdir",
+ "type": "string",
+ "default": "objdir",
+ "help": "Specify the objdir",
+ },
+ ],
+ [
+ ["--l10n-base"],
+ {
+ "action": "store",
+ "dest": "hg_l10n_base",
+ "type": "string",
+ "help": "Specify the L10n repo base directory",
+ },
+ ],
+ [
+ ["--l10n-tag"],
+ {
+ "action": "store",
+ "dest": "hg_l10n_tag",
+ "type": "string",
+ "help": "Specify the L10n tag",
+ },
+ ],
+ [
+ ["--tag-override"],
+ {
+ "action": "store",
+ "dest": "tag_override",
+ "type": "string",
+ "help": "Override the tags set for all repos",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ LocalesMixin.__init__(self)
+ MercurialScript.__init__(
+ self,
+ config_options=self.config_options,
+ all_actions=["pull-locale-source", "package-multi", "summary"],
+ require_config_file=require_config_file,
+ )
+
+ # pull_locale_source() defined in LocalesMixin.
+
+ def _run_mach_command(self, args):
+ dirs = self.query_abs_dirs()
+
+ mach = [sys.executable, "mach"]
+
+ return_code = self.run_command(
+ command=mach + ["--log-no-times"] + args,
+ cwd=dirs["abs_src_dir"],
+ )
+
+ if return_code:
+ self.fatal(
+ "'mach %s' did not run successfully. Please check "
+ "log for errors." % " ".join(args)
+ )
+
+ def package_multi(self):
+ dirs = self.query_abs_dirs()
+ objdir = dirs["abs_obj_dir"]
+
+ # This will error on non-0 exit code.
+ locales = list(sorted(self.query_locales()))
+ self._run_mach_command(["package-multi-locale", "--locales"] + locales)
+
+ command = "make package-tests AB_CD=multi"
+ self.run_command(
+ command, cwd=objdir, error_list=MakefileErrorList, halt_on_failure=True
+ )
+ # TODO deal with buildsymbols
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ pass
diff --git a/testing/mozharness/mozharness/mozilla/merkle.py b/testing/mozharness/mozharness/mozilla/merkle.py
new file mode 100644
index 0000000000..dba780b73a
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/merkle.py
@@ -0,0 +1,190 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import struct
+
+
+def _round2(n):
+ k = 1
+ while k < n:
+ k <<= 1
+ return k >> 1
+
+
+def _leaf_hash(hash_fn, leaf):
+ return hash_fn(b"\x00" + leaf).digest()
+
+
+def _pair_hash(hash_fn, left, right):
+ return hash_fn(b"\x01" + left + right).digest()
+
+
+class InclusionProof:
+ """
+ Represents a Merkle inclusion proof for purposes of serialization,
+ deserialization, and verification of the proof. The format for inclusion
+ proofs in RFC 6962-bis is as follows:
+
+ opaque LogID<2..127>;
+ opaque NodeHash<32..2^8-1>;
+
+ struct {
+ LogID log_id;
+ uint64 tree_size;
+ uint64 leaf_index;
+ NodeHash inclusion_path<1..2^16-1>;
+ } InclusionProofDataV2;
+
+ In other words:
+ - 1 + N octets of log_id (currently zero)
+ - 8 octets of tree_size = self.n
+ - 8 octets of leaf_index = m
+ - 2 octets of path length, followed by
+ * 1 + N octets of NodeHash
+ """
+
+ # Pre-generated 'log ID'. Not used by Firefox; it is only needed because
+ # there's a slot in the RFC 6962-bis format that requires a value at least
+ # two bytes long (plus a length byte).
+ LOG_ID = b"\x02\x00\x00"
+
+ def __init__(self, tree_size, leaf_index, path_elements):
+ self.tree_size = tree_size
+ self.leaf_index = leaf_index
+ self.path_elements = path_elements
+
+ @staticmethod
+ def from_rfc6962_bis(serialized):
+ start = 0
+ read = 1
+ if len(serialized) < start + read:
+ raise Exception("Inclusion proof too short for log ID header")
+ (log_id_len,) = struct.unpack("B", serialized[start : start + read])
+ start += read
+ start += log_id_len # Ignore the log ID itself
+
+ read = 8 + 8 + 2
+ if len(serialized) < start + read:
+ raise Exception("Inclusion proof too short for middle section")
+ tree_size, leaf_index, path_len = struct.unpack(
+ "!QQH", serialized[start : start + read]
+ )
+ start += read
+
+ path_elements = []
+ end = 1 + log_id_len + 8 + 8 + 2 + path_len
+ while start < end:
+ read = 1
+ if len(serialized) < start + read:
+ raise Exception("Inclusion proof too short for middle section")
+ (elem_len,) = struct.unpack("!B", serialized[start : start + read])
+ start += read
+
+ read = elem_len
+ if len(serialized) < start + read:
+ raise Exception("Inclusion proof too short for middle section")
+ if end < start + read:
+ raise Exception("Inclusion proof element exceeds declared length")
+ path_elements.append(serialized[start : start + read])
+ start += read
+
+ return InclusionProof(tree_size, leaf_index, path_elements)
+
+ def to_rfc6962_bis(self):
+ inclusion_path = b""
+ for step in self.path_elements:
+ step_len = struct.pack("B", len(step))
+ inclusion_path += step_len + step
+
+ middle = struct.pack(
+ "!QQH", self.tree_size, self.leaf_index, len(inclusion_path)
+ )
+ return self.LOG_ID + middle + inclusion_path
+
+ def _expected_head(self, hash_fn, leaf, leaf_index, tree_size):
+ node = _leaf_hash(hash_fn, leaf)
+
+ # Compute indicators of which direction the pair hashes should be done.
+ # Derived from the PATH logic in draft-ietf-trans-rfc6962-bis
+ lr = []
+ while tree_size > 1:
+ k = _round2(tree_size)
+ left = leaf_index < k
+ lr = [left] + lr
+
+ if left:
+ tree_size = k
+ else:
+ tree_size = tree_size - k
+ leaf_index = leaf_index - k
+
+ assert len(lr) == len(self.path_elements)
+ for i, elem in enumerate(self.path_elements):
+ if lr[i]:
+ node = _pair_hash(hash_fn, node, elem)
+ else:
+ node = _pair_hash(hash_fn, elem, node)
+
+ return node
+
+ def verify(self, hash_fn, leaf, leaf_index, tree_size, tree_head):
+ return self._expected_head(hash_fn, leaf, leaf_index, tree_size) == tree_head
+
+
+class MerkleTree:
+ """
+ Implements a Merkle tree on a set of data items following the
+ structure defined in RFC 6962-bis. This allows us to create a
+ single hash value that summarizes the data (the 'head'), and an
+ 'inclusion proof' for each element that connects it to the head.
+
+ https://tools.ietf.org/html/draft-ietf-trans-rfc6962-bis-24
+ """
+
+ def __init__(self, hash_fn, data):
+ self.n = len(data)
+ self.hash_fn = hash_fn
+
+ # We cache intermediate node values, as a dictionary of dictionaries,
+ # where the node representing data elements data[m:n] is represented by
+ # nodes[m][n]. This corresponds to the 'D[m:n]' notation in RFC
+ # 6962-bis. In particular, the leaves are stored in nodes[i][i+1] and
+ # the head is nodes[0][n].
+ self.nodes = {}
+ for i in range(self.n):
+ self.nodes[i, i + 1] = _leaf_hash(self.hash_fn, data[i])
+
+ def _node(self, start, end):
+ if (start, end) in self.nodes:
+ return self.nodes[start, end]
+
+ k = _round2(end - start)
+ left = self._node(start, start + k)
+ right = self._node(start + k, end)
+ node = _pair_hash(self.hash_fn, left, right)
+
+ self.nodes[start, end] = node
+ return node
+
+ def head(self):
+ return self._node(0, self.n)
+
+ def _relative_proof(self, target, start, end):
+ n = end - start
+ k = _round2(n)
+
+ if n == 1:
+ return []
+ elif target - start < k:
+ return self._relative_proof(target, start, start + k) + [
+ self._node(start + k, end)
+ ]
+ elif target - start >= k:
+ return self._relative_proof(target, start + k, end) + [
+ self._node(start, start + k)
+ ]
+
+ def inclusion_proof(self, leaf_index):
+ path_elements = self._relative_proof(leaf_index, 0, self.n)
+ return InclusionProof(self.n, leaf_index, path_elements)
diff --git a/testing/mozharness/mozharness/mozilla/mozbase.py b/testing/mozharness/mozharness/mozilla/mozbase.py
new file mode 100644
index 0000000000..552ffd850c
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/mozbase.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+from mozharness.base.script import PreScriptAction
+
+
+class MozbaseMixin(object):
+ """Automatically set virtualenv requirements to use mozbase
+ from test package.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(MozbaseMixin, self).__init__(*args, **kwargs)
+
+ @PreScriptAction("create-virtualenv")
+ def _install_mozbase(self, action):
+ dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"],
+ "config",
+ self.config.get("mozbase_requirements", "mozbase_requirements.txt"),
+ )
+ if not os.path.isfile(requirements):
+ self.fatal(
+ "Could not find mozbase requirements file: {}".format(requirements)
+ )
+
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
diff --git a/testing/mozharness/mozharness/mozilla/repo_manipulation.py b/testing/mozharness/mozharness/mozilla/repo_manipulation.py
new file mode 100644
index 0000000000..3a5712fadb
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/repo_manipulation.py
@@ -0,0 +1,222 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import six
+
+# pylint --py3k: W1648
+if six.PY2:
+ from ConfigParser import ConfigParser
+else:
+ from configparser import ConfigParser
+
+import json
+import os
+
+from mozharness.base.errors import HgErrorList
+from mozharness.base.log import FATAL, INFO
+from mozharness.base.vcs.mercurial import MercurialVCS
+
+
+class MercurialRepoManipulationMixin(object):
+ def get_version(self, repo_root, version_file="browser/config/version.txt"):
+ version_path = os.path.join(repo_root, version_file)
+ contents = self.read_from_file(version_path, error_level=FATAL)
+ lines = [l for l in contents.splitlines() if l and not l.startswith("#")]
+ return lines[-1].split(".")
+
+ def replace(self, file_name, from_, to_):
+ """Replace text in a file."""
+ text = self.read_from_file(file_name, error_level=FATAL)
+ new_text = text.replace(from_, to_)
+ if text == new_text:
+ self.fatal("Cannot replace '%s' to '%s' in '%s'" % (from_, to_, file_name))
+ self.write_to_file(file_name, new_text, error_level=FATAL)
+
+ def query_hg_revision(self, path):
+ """Avoid making 'pull' a required action every run, by being able
+ to fall back to figuring out the revision from the cloned repo
+ """
+ m = MercurialVCS(log_obj=self.log_obj, config=self.config)
+ revision = m.get_revision_from_path(path)
+ return revision
+
+ def hg_commit(self, cwd, message, user=None, ignore_no_changes=False):
+ """Commit changes to hg."""
+ cmd = self.query_exe("hg", return_type="list") + ["commit", "-m", message]
+ if user:
+ cmd.extend(["-u", user])
+ success_codes = [0]
+ if ignore_no_changes:
+ success_codes.append(1)
+ self.run_command(
+ cmd,
+ cwd=cwd,
+ error_list=HgErrorList,
+ halt_on_failure=True,
+ success_codes=success_codes,
+ )
+ return self.query_hg_revision(cwd)
+
+ def clean_repos(self):
+ """We may end up with contaminated local repos at some point, but
+ we don't want to have to clobber and reclone from scratch every
+ time.
+
+ This is an attempt to clean up the local repos without needing a
+ clobber.
+ """
+ dirs = self.query_abs_dirs()
+ hg = self.query_exe("hg", return_type="list")
+ hg_repos = self.query_repos()
+ hg_strip_error_list = [
+ {
+ "substr": r"""abort: empty revision set""",
+ "level": INFO,
+ "explanation": "Nothing to clean up; we're good!",
+ }
+ ] + HgErrorList
+ for repo_config in hg_repos:
+ repo_name = repo_config["dest"]
+ repo_path = os.path.join(dirs["abs_work_dir"], repo_name)
+ if os.path.exists(repo_path):
+ # hg up -C to discard uncommitted changes
+ self.run_command(
+ hg + ["up", "-C", "-r", repo_config["branch"]],
+ cwd=repo_path,
+ error_list=HgErrorList,
+ halt_on_failure=True,
+ )
+ # discard unpushed commits
+ status = self.retry(
+ self.run_command,
+ args=(
+ hg
+ + [
+ "--config",
+ "extensions.mq=",
+ "strip",
+ "--no-backup",
+ "outgoing()",
+ ],
+ ),
+ kwargs={
+ "cwd": repo_path,
+ "error_list": hg_strip_error_list,
+ "return_type": "num_errors",
+ "success_codes": (0, 255),
+ },
+ )
+ if status not in [0, 255]:
+ self.fatal("Issues stripping outgoing revisions!")
+ # 2nd hg up -C to make sure we're not on a stranded head
+ # which can happen when reverting debugsetparents
+ self.run_command(
+ hg + ["up", "-C", "-r", repo_config["branch"]],
+ cwd=repo_path,
+ error_list=HgErrorList,
+ halt_on_failure=True,
+ )
+
+ def commit_changes(self):
+ """Do the commit."""
+ hg = self.query_exe("hg", return_type="list")
+ for cwd in self.query_commit_dirs():
+ self.run_command(hg + ["diff"], cwd=cwd)
+ self.hg_commit(
+ cwd,
+ user=self.config["hg_user"],
+ message=self.query_commit_message(),
+ ignore_no_changes=self.config.get("ignore_no_changes", False),
+ )
+ self.info(
+ "Now verify |hg out| and |hg out --patch| if you're paranoid, and --push"
+ )
+
+ def hg_tag(
+ self,
+ cwd,
+ tags,
+ user=None,
+ message=None,
+ revision=None,
+ force=None,
+ halt_on_failure=True,
+ ):
+ if isinstance(tags, six.string_types):
+ tags = [tags]
+ cmd = self.query_exe("hg", return_type="list") + ["tag"]
+ if not message:
+ message = "No bug - Tagging %s" % os.path.basename(cwd)
+ if revision:
+ message = "%s %s" % (message, revision)
+ message = "%s with %s" % (message, ", ".join(tags))
+ message += " a=release DONTBUILD CLOSED TREE"
+ self.info(message)
+ cmd.extend(["-m", message])
+ if user:
+ cmd.extend(["-u", user])
+ if revision:
+ cmd.extend(["-r", revision])
+ if force:
+ cmd.append("-f")
+ cmd.extend(tags)
+ return self.run_command(
+ cmd, cwd=cwd, halt_on_failure=halt_on_failure, error_list=HgErrorList
+ )
+
+ def query_existing_tags(self, cwd, halt_on_failure=True):
+ cmd = self.query_exe("hg", return_type="list") + ["tags"]
+ existing_tags = {}
+ output = self.get_output_from_command(
+ cmd, cwd=cwd, halt_on_failure=halt_on_failure
+ )
+ for line in output.splitlines():
+ parts = line.split(" ")
+ if len(parts) > 1:
+ # existing_tags = {TAG: REVISION, ...}
+ existing_tags[parts[0]] = parts[-1].split(":")[-1]
+ self.info(
+ "existing_tags:\n{}".format(
+ json.dumps(existing_tags, sort_keys=True, indent=4)
+ )
+ )
+ return existing_tags
+
+ def push(self):
+ """"""
+ error_message = """Push failed! If there was a push race, try rerunning
+the script (--clean-repos --pull --migrate). The second run will be faster."""
+ hg = self.query_exe("hg", return_type="list")
+ for cwd in self.query_push_dirs():
+ if not cwd:
+ self.warning("Skipping %s" % cwd)
+ continue
+ push_cmd = hg + ["push"] + self.query_push_args(cwd)
+ if self.config.get("push_dest"):
+ push_cmd.append(self.config["push_dest"])
+ status = self.run_command(
+ push_cmd,
+ cwd=cwd,
+ error_list=HgErrorList,
+ success_codes=[0, 1],
+ )
+ if status == 1:
+ self.warning("No changes for %s!" % cwd)
+ elif status:
+ self.fatal(error_message)
+
+ def edit_repo_hg_rc(self, cwd, section, key, value):
+ hg_rc = self.read_repo_hg_rc(cwd)
+ hg_rc.set(section, key, value)
+
+ with open(self._get_hg_rc_path(cwd), "wb") as f:
+ hg_rc.write(f)
+
+ def read_repo_hg_rc(self, cwd):
+ hg_rc = ConfigParser()
+ hg_rc.read(self._get_hg_rc_path(cwd))
+ return hg_rc
+
+ def _get_hg_rc_path(self, cwd):
+ return os.path.join(cwd, ".hg", "hgrc")
diff --git a/testing/mozharness/mozharness/mozilla/secrets.py b/testing/mozharness/mozharness/mozilla/secrets.py
new file mode 100644
index 0000000000..7ec4c8a2e9
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/secrets.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Support for fetching secrets from the secrets API
+"""
+
+import json
+import os
+
+import six
+from six.moves import urllib
+
+
+class SecretsMixin(object):
+ def _fetch_secret(self, secret_name):
+ self.info("fetching secret {} from API".format(secret_name))
+ # fetch from TASKCLUSTER_PROXY_URL, which points to the taskcluster proxy
+ # within a taskcluster task. Outside of that environment, do not
+ # use this action.
+ proxy = os.environ.get("TASKCLUSTER_PROXY_URL", "http://taskcluster")
+ proxy = proxy.rstrip("/")
+ url = proxy + "/secrets/v1/secret/" + secret_name
+ res = urllib.request.urlopen(url)
+ if res.getcode() != 200:
+ self.fatal("Error fetching from secrets API:" + res.read())
+
+ return json.loads(six.ensure_str(res.read()))["secret"]["content"]
+
+ def get_secrets(self):
+ """
+ Get the secrets specified by the `secret_files` configuration. This is
+ a list of dictionaries, one for each secret. The `secret_name` key
+ names the key in the TaskCluster secrets API to fetch (see
+ http://docs.taskcluster.net/services/secrets/). It can contain
+ %-substitutions based on the `subst` dictionary below.
+
+ Since secrets must be JSON objects, the `content` property of the
+ secret is used as the value to be written to disk.
+
+ The `filename` key in the dictionary gives the filename to which the
+ secret should be written.
+
+ The optional `min_scm_level` key gives a minimum SCM level at which
+ this secret is required. For lower levels, the value of the 'default`
+ key or the contents of the file specified by `default-file` is used, or
+ no secret is written.
+
+ The optional 'mode' key allows a mode change (chmod) after the file is written
+ """
+ dirs = self.query_abs_dirs()
+ secret_files = self.config.get("secret_files", [])
+
+ scm_level = int(os.environ.get("MOZ_SCM_LEVEL", "1"))
+ subst = {
+ "scm-level": scm_level,
+ }
+
+ for sf in secret_files:
+ filename = os.path.abspath(sf["filename"])
+ secret_name = sf["secret_name"] % subst
+ min_scm_level = sf.get("min_scm_level", 0)
+ if scm_level < min_scm_level:
+ if "default" in sf:
+ self.info("Using default value for " + filename)
+ secret = sf["default"]
+ elif "default-file" in sf:
+ default_path = sf["default-file"].format(**dirs)
+ with open(default_path, "r") as f:
+ secret = f.read()
+ else:
+ self.info("No default for secret; not writing " + filename)
+ continue
+ else:
+ secret = self._fetch_secret(secret_name)
+
+ open(filename, "w").write(secret)
+
+ if sf.get("mode"):
+ os.chmod(filename, sf["mode"])
diff --git a/testing/mozharness/mozharness/mozilla/structuredlog.py b/testing/mozharness/mozharness/mozilla/structuredlog.py
new file mode 100644
index 0000000000..6193038796
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/structuredlog.py
@@ -0,0 +1,309 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import json
+from collections import defaultdict, namedtuple
+
+from mozharness.base import log
+from mozharness.base.log import ERROR, INFO, WARNING, OutputParser
+from mozharness.mozilla.automation import (
+ TBPL_FAILURE,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WARNING,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.errors import TinderBoxPrintRe
+from mozharness.mozilla.testing.unittest import tbox_print_summary
+
+
+class StructuredOutputParser(OutputParser):
+ # The script class using this must inherit the MozbaseMixin to ensure
+ # that mozlog is available.
+ def __init__(self, **kwargs):
+ """Object that tracks the overall status of the test run"""
+ # The 'strict' argument dictates whether the presence of output
+ # from the harness process other than line-delimited json indicates
+ # failure. If it does not, the errors_list parameter may be used
+ # to detect additional failure output from the harness process.
+ if "strict" in kwargs:
+ self.strict = kwargs.pop("strict")
+ else:
+ self.strict = True
+
+ self.suite_category = kwargs.pop("suite_category", None)
+
+ tbpl_compact = kwargs.pop("log_compact", False)
+ super(StructuredOutputParser, self).__init__(**kwargs)
+ self.allow_crashes = kwargs.pop("allow_crashes", False)
+
+ mozlog = self._get_mozlog_module()
+ self.formatter = mozlog.formatters.TbplFormatter(compact=tbpl_compact)
+ self.handler = mozlog.handlers.StatusHandler()
+ self.log_actions = mozlog.structuredlog.log_actions()
+
+ self.worst_log_level = INFO
+ self.tbpl_status = TBPL_SUCCESS
+ self.harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+ self.prev_was_unstructured = False
+
+ def _get_mozlog_module(self):
+ try:
+ import mozlog
+ except ImportError:
+ self.fatal(
+ "A script class using structured logging must inherit "
+ "from the MozbaseMixin to ensure that mozlog is available."
+ )
+ return mozlog
+
+ def _handle_unstructured_output(self, line, log_output=True):
+ self.log_output = log_output
+ return super(StructuredOutputParser, self).parse_single_line(line)
+
+ def parse_single_line(self, line):
+ """Parses a line of log output from the child process and passes
+ it to mozlog to update the overall status of the run.
+ Re-emits the logged line in human-readable format.
+ """
+ level = INFO
+ tbpl_level = TBPL_SUCCESS
+
+ data = None
+ try:
+ candidate_data = json.loads(line)
+ if (
+ isinstance(candidate_data, dict)
+ and "action" in candidate_data
+ and candidate_data["action"] in self.log_actions
+ ):
+ data = candidate_data
+ except ValueError:
+ pass
+
+ if data is None:
+ if self.strict:
+ if not self.prev_was_unstructured:
+ self.critical(
+ (
+ "Test harness output was not a valid structured log message: "
+ "\n%s"
+ )
+ % line
+ )
+ else:
+ self.critical(line)
+ self.update_levels(TBPL_FAILURE, log.CRITICAL)
+ self.prev_was_unstructured = True
+ else:
+ self._handle_unstructured_output(line)
+ return
+
+ self.prev_was_unstructured = False
+
+ self.handler(data)
+
+ action = data["action"]
+ if action in ("log", "process_output"):
+ if action == "log":
+ message = data["message"]
+ level = getattr(log, data["level"].upper())
+ else:
+ message = data["data"]
+
+ # Run log and process_output actions through the error lists, but make sure
+ # the super parser doesn't print them to stdout (they should go through the
+ # log formatter).
+ error_level = self._handle_unstructured_output(message, log_output=False)
+ if error_level is not None:
+ level = self.worst_level(error_level, level)
+
+ if self.harness_retry_re.search(message):
+ self.update_levels(TBPL_RETRY, log.CRITICAL)
+ tbpl_level = TBPL_RETRY
+ level = log.CRITICAL
+
+ log_data = self.formatter(data)
+ if log_data is not None:
+ self.log(log_data, level=level)
+ self.update_levels(tbpl_level, level)
+
+ def _subtract_tuples(self, old, new):
+ items = set(list(old.keys()) + list(new.keys()))
+ merged = defaultdict(int)
+ for item in items:
+ merged[item] = new.get(item, 0) - old.get(item, 0)
+ if merged[item] <= 0:
+ del merged[item]
+ return merged
+
+ def evaluate_parser(self, return_code, success_codes=None, previous_summary=None):
+ success_codes = success_codes or [0]
+ summary = self.handler.summarize()
+
+ """
+ We can run evaluate_parser multiple times, it will duplicate failures
+ and status which can mean that future tests will fail if a previous test fails.
+ When we have a previous summary, we want to do 2 things:
+ 1) Remove previous data from the new summary to only look at new data
+ 2) Build a joined summary to include the previous + new data
+ """
+ RunSummary = namedtuple(
+ "RunSummary",
+ (
+ "unexpected_statuses",
+ "expected_statuses",
+ "known_intermittent_statuses",
+ "log_level_counts",
+ "action_counts",
+ ),
+ )
+ if previous_summary == {}:
+ previous_summary = RunSummary(
+ defaultdict(int),
+ defaultdict(int),
+ defaultdict(int),
+ defaultdict(int),
+ defaultdict(int),
+ )
+ if previous_summary:
+ # Always preserve retry status: if any failure triggers retry, the script
+ # must exit with TBPL_RETRY to trigger task retry.
+ if self.tbpl_status != TBPL_RETRY:
+ self.tbpl_status = TBPL_SUCCESS
+ joined_summary = summary
+
+ # Remove previously known status messages
+ if "ERROR" in summary.log_level_counts:
+ summary.log_level_counts["ERROR"] -= self.handler.no_tests_run_count
+
+ summary = RunSummary(
+ self._subtract_tuples(
+ previous_summary.unexpected_statuses, summary.unexpected_statuses
+ ),
+ self._subtract_tuples(
+ previous_summary.expected_statuses, summary.expected_statuses
+ ),
+ self._subtract_tuples(
+ previous_summary.known_intermittent_statuses,
+ summary.known_intermittent_statuses,
+ ),
+ self._subtract_tuples(
+ previous_summary.log_level_counts, summary.log_level_counts
+ ),
+ summary.action_counts,
+ )
+
+ # If we have previous data to ignore,
+ # cache it so we don't parse the log multiple times
+ self.summary = summary
+ else:
+ joined_summary = summary
+
+ fail_pair = TBPL_WARNING, WARNING
+ error_pair = TBPL_FAILURE, ERROR
+
+ # These are warning/orange statuses.
+ failure_conditions = [
+ (sum(summary.unexpected_statuses.values()), 0, "statuses", False),
+ (
+ summary.action_counts.get("crash", 0),
+ summary.expected_statuses.get("CRASH", 0),
+ "crashes",
+ self.allow_crashes,
+ ),
+ (
+ summary.action_counts.get("valgrind_error", 0),
+ 0,
+ "valgrind errors",
+ False,
+ ),
+ ]
+ for value, limit, type_name, allow in failure_conditions:
+ if value > limit:
+ msg = "%d unexpected %s" % (value, type_name)
+ if limit != 0:
+ msg += " expected at most %d" % (limit)
+ if not allow:
+ self.update_levels(*fail_pair)
+ msg = "Got " + msg
+ self.error(msg)
+ else:
+ msg = "Ignored " + msg
+ self.warning(msg)
+
+ # These are error/red statuses. A message is output here every time something
+ # wouldn't otherwise be highlighted in the UI.
+ required_actions = {
+ "suite_end": "No suite end message was emitted by this harness.",
+ "test_end": "No checks run.",
+ }
+ for action, diagnostic_message in required_actions.items():
+ if action not in summary.action_counts:
+ self.log(diagnostic_message, ERROR)
+ self.update_levels(*error_pair)
+
+ failure_log_levels = ["ERROR", "CRITICAL"]
+ for level in failure_log_levels:
+ if level in summary.log_level_counts:
+ self.update_levels(*error_pair)
+
+ # If a superclass was used to detect errors with a regex based output parser,
+ # this will be reflected in the status here.
+ if self.num_errors:
+ self.update_levels(*error_pair)
+
+ # Harnesses typically return non-zero on test failure, so don't promote
+ # to error if we already have a failing status.
+ if return_code not in success_codes and self.tbpl_status == TBPL_SUCCESS:
+ self.update_levels(*error_pair)
+
+ return self.tbpl_status, self.worst_log_level, joined_summary
+
+ def update_levels(self, tbpl_level, log_level):
+ self.worst_log_level = self.worst_level(log_level, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ tbpl_level, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ def print_summary(self, suite_name):
+ # Summary text provided for compatibility. Counts are currently
+ # in the format <pass count>/<fail count>/<todo count>,
+ # <expected count>/<unexpected count>/<expected fail count> will yield the
+ # expected info from a structured log (fail count from the prior implementation
+ # includes unexpected passes from "todo" assertions).
+ try:
+ summary = self.summary
+ except AttributeError:
+ summary = self.handler.summarize()
+
+ unexpected_count = sum(summary.unexpected_statuses.values())
+ expected_count = sum(summary.expected_statuses.values())
+ expected_failures = summary.expected_statuses.get("FAIL", 0)
+
+ if unexpected_count:
+ fail_text = '<em class="testfail">%s</em>' % unexpected_count
+ else:
+ fail_text = "0"
+
+ text_summary = "%s/%s/%s" % (expected_count, fail_text, expected_failures)
+ self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, text_summary))
+
+ def append_tinderboxprint_line(self, suite_name):
+ try:
+ summary = self.summary
+ except AttributeError:
+ summary = self.handler.summarize()
+
+ unexpected_count = sum(summary.unexpected_statuses.values())
+ expected_count = sum(summary.expected_statuses.values())
+ expected_failures = summary.expected_statuses.get("FAIL", 0)
+ crashed = 0
+ if "crash" in summary.action_counts:
+ crashed = summary.action_counts["crash"]
+ text_summary = tbox_print_summary(
+ expected_count, unexpected_count, expected_failures, crashed > 0, False
+ )
+ self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, text_summary))
diff --git a/testing/mozharness/mozharness/mozilla/testing/__init__.py b/testing/mozharness/mozharness/mozilla/testing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/__init__.py
diff --git a/testing/mozharness/mozharness/mozilla/testing/android.py b/testing/mozharness/mozharness/mozilla/testing/android.py
new file mode 100644
index 0000000000..0635f025a2
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/android.py
@@ -0,0 +1,723 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import datetime
+import functools
+import glob
+import os
+import posixpath
+import re
+import signal
+import subprocess
+import tempfile
+import time
+from threading import Timer
+
+import six
+from mozharness.base.script import PostScriptAction, PreScriptAction
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_RETRY
+
+
+def ensure_dir(dir):
+ """Ensures the given directory exists"""
+ if dir and not os.path.exists(dir):
+ try:
+ os.makedirs(dir)
+ except OSError as error:
+ if error.errno != errno.EEXIST:
+ raise
+
+
+class AndroidMixin(object):
+ """
+ Mixin class used by Android test scripts.
+ """
+
+ def __init__(self, **kwargs):
+ self._adb_path = None
+ self._device = None
+ self.app_name = None
+ self.device_name = os.environ.get("DEVICE_NAME", None)
+ self.device_serial = os.environ.get("DEVICE_SERIAL", None)
+ self.device_ip = os.environ.get("DEVICE_IP", None)
+ self.logcat_proc = None
+ self.logcat_file = None
+ self.use_gles3 = False
+ self.xre_path = None
+ super(AndroidMixin, self).__init__(**kwargs)
+
+ @property
+ def adb_path(self):
+ """Get the path to the adb executable."""
+ self.activate_virtualenv()
+ if not self._adb_path:
+ self._adb_path = self.query_exe("adb")
+ return self._adb_path
+
+ @property
+ def device(self):
+ if not self._device:
+ # We must access the adb_path property to activate the
+ # virtualenv before importing mozdevice in order to
+ # import the mozdevice installed into the virtualenv and
+ # not any system-wide installation of mozdevice.
+ adb = self.adb_path
+ import mozdevice
+
+ self._device = mozdevice.ADBDeviceFactory(
+ adb=adb, device=self.device_serial
+ )
+ return self._device
+
+ @property
+ def is_android(self):
+ c = self.config
+ installer_url = c.get("installer_url", None)
+ return (
+ self.device_serial is not None
+ or self.is_emulator
+ or (
+ installer_url is not None
+ and (installer_url.endswith(".apk") or installer_url.endswith(".aab"))
+ )
+ )
+
+ @property
+ def is_emulator(self):
+ c = self.config
+ return True if c.get("emulator_avd_name") else False
+
+ def _get_repo_url(self, path):
+ """
+ Return a url for a file (typically a tooltool manifest) in this hg repo
+ and using this revision (or mozilla-central/default if repo/rev cannot
+ be determined).
+
+ :param path specifies the directory path to the file of interest.
+ """
+ if "GECKO_HEAD_REPOSITORY" in os.environ and "GECKO_HEAD_REV" in os.environ:
+ # probably taskcluster
+ repo = os.environ["GECKO_HEAD_REPOSITORY"]
+ revision = os.environ["GECKO_HEAD_REV"]
+ else:
+ # something unexpected!
+ repo = "https://hg.mozilla.org/mozilla-central"
+ revision = "default"
+ self.warning(
+ "Unable to find repo/revision for manifest; "
+ "using mozilla-central/default"
+ )
+ url = "%s/raw-file/%s/%s" % (repo, revision, path)
+ return url
+
+ def _tooltool_fetch(self, url, dir):
+ c = self.config
+ manifest_path = self.download_file(
+ url, file_name="releng.manifest", parent_dir=dir
+ )
+ if not os.path.exists(manifest_path):
+ self.fatal(
+ "Could not retrieve manifest needed to retrieve "
+ "artifacts from %s" % manifest_path
+ )
+ # from TooltoolMixin, included in TestingMixin
+ self.tooltool_fetch(
+ manifest_path, output_dir=dir, cache=c.get("tooltool_cache", None)
+ )
+
+ def _launch_emulator(self):
+ env = self.query_env()
+
+ # Write a default ddms.cfg to avoid unwanted prompts
+ avd_home_dir = self.abs_dirs["abs_avds_dir"]
+ DDMS_FILE = os.path.join(avd_home_dir, "ddms.cfg")
+ with open(DDMS_FILE, "w") as f:
+ f.write("pingOptIn=false\npingId=0\n")
+ self.info("wrote dummy %s" % DDMS_FILE)
+
+ # Delete emulator auth file, so it doesn't prompt
+ AUTH_FILE = os.path.join(
+ os.path.expanduser("~"), ".emulator_console_auth_token"
+ )
+ if os.path.exists(AUTH_FILE):
+ try:
+ os.remove(AUTH_FILE)
+ self.info("deleted %s" % AUTH_FILE)
+ except Exception:
+ self.warning("failed to remove %s" % AUTH_FILE)
+
+ env["ANDROID_EMULATOR_HOME"] = avd_home_dir
+ avd_path = os.path.join(avd_home_dir, "avd")
+ if os.path.exists(avd_path):
+ env["ANDROID_AVD_HOME"] = avd_path
+ self.info("Found avds at %s" % avd_path)
+ else:
+ self.warning("AVDs missing? Not found at %s" % avd_path)
+
+ if "deprecated_sdk_path" in self.config:
+ sdk_path = os.path.abspath(os.path.join(avd_home_dir, ".."))
+ else:
+ sdk_path = self.abs_dirs["abs_sdk_dir"]
+ if os.path.exists(sdk_path):
+ env["ANDROID_SDK_HOME"] = sdk_path
+ env["ANDROID_SDK_ROOT"] = sdk_path
+ self.info("Found sdk at %s" % sdk_path)
+ else:
+ self.warning("Android sdk missing? Not found at %s" % sdk_path)
+
+ avd_config_path = os.path.join(
+ avd_path, "%s.ini" % self.config["emulator_avd_name"]
+ )
+ avd_folder = os.path.join(avd_path, "%s.avd" % self.config["emulator_avd_name"])
+ if os.path.isfile(avd_config_path):
+ # The ini file points to the absolute path to the emulator folder,
+ # which might be different, so we need to update it.
+ old_config = ""
+ with open(avd_config_path, "r") as config_file:
+ old_config = config_file.readlines()
+ self.info("Old Config: %s" % old_config)
+ with open(avd_config_path, "w") as config_file:
+ for line in old_config:
+ if line.startswith("path="):
+ config_file.write("path=%s\n" % avd_folder)
+ self.info("Updating path from: %s" % line)
+ else:
+ config_file.write("%s\n" % line)
+ else:
+ self.warning("Could not find config path at %s" % avd_config_path)
+
+ # enable EGL 3.0 in advancedFeatures.ini
+ AF_FILE = os.path.join(avd_home_dir, "advancedFeatures.ini")
+ with open(AF_FILE, "w") as f:
+ if self.use_gles3:
+ f.write("GLESDynamicVersion=on\n")
+ else:
+ f.write("GLESDynamicVersion=off\n")
+
+ # extra diagnostics for kvm acceleration
+ emu = self.config.get("emulator_process_name")
+ if os.path.exists("/dev/kvm") and emu and "x86" in emu:
+ try:
+ self.run_command(["ls", "-l", "/dev/kvm"])
+ self.run_command(["kvm-ok"])
+ self.run_command(["emulator", "-accel-check"], env=env)
+ except Exception as e:
+ self.warning("Extra kvm diagnostics failed: %s" % str(e))
+
+ self.info("emulator env: %s" % str(env))
+ command = ["emulator", "-avd", self.config["emulator_avd_name"]]
+ if "emulator_extra_args" in self.config:
+ command += self.config["emulator_extra_args"]
+
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ tmp_file = tempfile.NamedTemporaryFile(
+ mode="w", prefix="emulator-", suffix=".log", dir=dir, delete=False
+ )
+ self.info("Launching the emulator with: %s" % " ".join(command))
+ self.info("Writing log to %s" % tmp_file.name)
+ proc = subprocess.Popen(
+ command, stdout=tmp_file, stderr=tmp_file, env=env, bufsize=0
+ )
+ return proc
+
+ def _verify_emulator(self):
+ boot_ok = self._retry(
+ 30,
+ 10,
+ self.is_boot_completed,
+ "Verify Android boot completed",
+ max_time=330,
+ )
+ if not boot_ok:
+ self.warning("Unable to verify Android boot completion")
+ return False
+ return True
+
+ def _verify_emulator_and_restart_on_fail(self):
+ emulator_ok = self._verify_emulator()
+ if not emulator_ok:
+ self.device_screenshot("screenshot-emulator-start")
+ self.kill_processes(self.config["emulator_process_name"])
+ subprocess.check_call(["ps", "-ef"])
+ # remove emulator tmp files
+ for dir in glob.glob("/tmp/android-*"):
+ self.rmtree(dir)
+ time.sleep(5)
+ self.emulator_proc = self._launch_emulator()
+ return emulator_ok
+
+ def _retry(self, max_attempts, interval, func, description, max_time=0):
+ """
+ Execute func until it returns True, up to max_attempts times, waiting for
+ interval seconds between each attempt. description is logged on each attempt.
+ If max_time is specified, no further attempts will be made once max_time
+ seconds have elapsed; this provides some protection for the case where
+ the run-time for func is long or highly variable.
+ """
+ status = False
+ attempts = 0
+ if max_time > 0:
+ end_time = datetime.datetime.now() + datetime.timedelta(seconds=max_time)
+ else:
+ end_time = None
+ while attempts < max_attempts and not status:
+ if (end_time is not None) and (datetime.datetime.now() > end_time):
+ self.info(
+ "Maximum retry run-time of %d seconds exceeded; "
+ "remaining attempts abandoned" % max_time
+ )
+ break
+ if attempts != 0:
+ self.info("Sleeping %d seconds" % interval)
+ time.sleep(interval)
+ attempts += 1
+ self.info(
+ ">> %s: Attempt #%d of %d" % (description, attempts, max_attempts)
+ )
+ status = func()
+ return status
+
+ def dump_perf_info(self):
+ """
+ Dump some host and android device performance-related information
+ to an artifact file, to help understand task performance.
+ """
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ perf_path = os.path.join(dir, "android-performance.log")
+ with open(perf_path, "w") as f:
+
+ f.write("\n\nHost cpufreq/scaling_governor:\n")
+ cpus = glob.glob("/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor")
+ for cpu in cpus:
+ out = subprocess.check_output(["cat", cpu], universal_newlines=True)
+ f.write("%s: %s" % (cpu, out))
+
+ f.write("\n\nHost /proc/cpuinfo:\n")
+ out = subprocess.check_output(
+ ["cat", "/proc/cpuinfo"], universal_newlines=True
+ )
+ f.write(out)
+
+ f.write("\n\nHost /proc/meminfo:\n")
+ out = subprocess.check_output(
+ ["cat", "/proc/meminfo"], universal_newlines=True
+ )
+ f.write(out)
+
+ f.write("\n\nHost process list:\n")
+ out = subprocess.check_output(["ps", "-ef"], universal_newlines=True)
+ f.write(out)
+
+ f.write("\n\nDevice /proc/cpuinfo:\n")
+ cmd = "cat /proc/cpuinfo"
+ out = self.shell_output(cmd)
+ f.write(out)
+ cpuinfo = out
+
+ f.write("\n\nDevice /proc/meminfo:\n")
+ cmd = "cat /proc/meminfo"
+ out = self.shell_output(cmd)
+ f.write(out)
+
+ f.write("\n\nDevice process list:\n")
+ cmd = "ps"
+ out = self.shell_output(cmd)
+ f.write(out)
+
+ # Search android cpuinfo for "BogoMIPS"; if found and < (minimum), retry
+ # this task, in hopes of getting a higher-powered environment.
+ # (Carry on silently if BogoMIPS is not found -- this may vary by
+ # Android implementation -- no big deal.)
+ # See bug 1321605: Sometimes the emulator is really slow, and
+ # low bogomips can be a good predictor of that condition.
+ bogomips_minimum = int(self.config.get("bogomips_minimum") or 0)
+ for line in cpuinfo.split("\n"):
+ m = re.match("BogoMIPS.*: (\d*)", line, re.IGNORECASE)
+ if m:
+ bogomips = int(m.group(1))
+ if bogomips_minimum > 0 and bogomips < bogomips_minimum:
+ self.fatal(
+ "INFRA-ERROR: insufficient Android bogomips (%d < %d)"
+ % (bogomips, bogomips_minimum),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+ self.info("Found Android bogomips: %d" % bogomips)
+ break
+
+ def logcat_path(self):
+ logcat_filename = "logcat-%s.log" % self.device_serial
+ return os.path.join(
+ self.query_abs_dirs()["abs_blob_upload_dir"], logcat_filename
+ )
+
+ def logcat_start(self):
+ """
+ Start recording logcat. Writes logcat to the upload directory.
+ """
+ # Start logcat for the device. The adb process runs until the
+ # corresponding device is stopped. Output is written directly to
+ # the blobber upload directory so that it is uploaded automatically
+ # at the end of the job.
+ self.logcat_file = open(self.logcat_path(), "w")
+ logcat_cmd = [
+ self.adb_path,
+ "-s",
+ self.device_serial,
+ "logcat",
+ "-v",
+ "threadtime",
+ "Trace:S",
+ "StrictMode:S",
+ "ExchangeService:S",
+ ]
+ self.info(" ".join(logcat_cmd))
+ self.logcat_proc = subprocess.Popen(
+ logcat_cmd, stdout=self.logcat_file, stdin=subprocess.PIPE
+ )
+
+ def logcat_stop(self):
+ """
+ Stop logcat process started by logcat_start.
+ """
+ if self.logcat_proc:
+ self.info("Killing logcat pid %d." % self.logcat_proc.pid)
+ self.logcat_proc.kill()
+ self.logcat_file.close()
+
+ def _install_android_app_retry(self, app_path, replace):
+ import mozdevice
+
+ try:
+ if app_path.endswith(".aab"):
+ self.device.install_app_bundle(
+ self.query_abs_dirs()["abs_bundletool_path"], app_path, timeout=120
+ )
+ self.device.run_as_package = self.query_package_name()
+ else:
+ self.device.run_as_package = self.device.install_app(
+ app_path, replace=replace, timeout=120
+ )
+ return True
+ except (
+ mozdevice.ADBError,
+ mozdevice.ADBProcessError,
+ mozdevice.ADBTimeoutError,
+ ) as e:
+ self.info(
+ "Failed to install %s on %s: %s %s"
+ % (app_path, self.device_name, type(e).__name__, e)
+ )
+ return False
+
+ def install_android_app(self, app_path, replace=False):
+ """
+ Install the specified app.
+ """
+ app_installed = self._retry(
+ 5,
+ 10,
+ functools.partial(self._install_android_app_retry, app_path, replace),
+ "Install app",
+ )
+
+ if not app_installed:
+ self.fatal(
+ "INFRA-ERROR: Failed to install %s" % os.path.basename(app_path),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ def uninstall_android_app(self):
+ """
+ Uninstall the app associated with the configured app, if it is
+ installed.
+ """
+ import mozdevice
+
+ try:
+ package_name = self.query_package_name()
+ self.device.uninstall_app(package_name)
+ except (
+ mozdevice.ADBError,
+ mozdevice.ADBProcessError,
+ mozdevice.ADBTimeoutError,
+ ) as e:
+ self.info(
+ "Failed to uninstall %s from %s: %s %s"
+ % (package_name, self.device_name, type(e).__name__, e)
+ )
+ self.fatal(
+ "INFRA-ERROR: %s Failed to uninstall %s"
+ % (type(e).__name__, package_name),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ def is_boot_completed(self):
+ import mozdevice
+
+ try:
+ return self.device.is_device_ready(timeout=30)
+ except (ValueError, mozdevice.ADBError, mozdevice.ADBTimeoutError):
+ pass
+ return False
+
+ def shell_output(self, cmd, enable_run_as=False):
+ import mozdevice
+
+ try:
+ return self.device.shell_output(
+ cmd, timeout=30, enable_run_as=enable_run_as
+ )
+ except (mozdevice.ADBTimeoutError) as e:
+ self.info(
+ "Failed to run shell command %s from %s: %s %s"
+ % (cmd, self.device_name, type(e).__name__, e)
+ )
+ self.fatal(
+ "INFRA-ERROR: %s Failed to run shell command %s"
+ % (type(e).__name__, cmd),
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ def device_screenshot(self, prefix):
+ """
+ On emulator, save a screenshot of the entire screen to the upload directory;
+ otherwise, save a screenshot of the device to the upload directory.
+
+ :param prefix specifies a filename prefix for the screenshot
+ """
+ from mozscreenshot import dump_device_screen, dump_screen
+
+ reset_dir = False
+ if not os.environ.get("MOZ_UPLOAD_DIR", None):
+ dirs = self.query_abs_dirs()
+ os.environ["MOZ_UPLOAD_DIR"] = dirs["abs_blob_upload_dir"]
+ reset_dir = True
+ if self.is_emulator:
+ if self.xre_path:
+ dump_screen(self.xre_path, self, prefix=prefix)
+ else:
+ self.info("Not saving screenshot: no XRE configured")
+ else:
+ dump_device_screen(self.device, self, prefix=prefix)
+ if reset_dir:
+ del os.environ["MOZ_UPLOAD_DIR"]
+
+ def download_hostutils(self, xre_dir):
+ """
+ Download and install hostutils from tooltool.
+ """
+ xre_path = None
+ self.rmtree(xre_dir)
+ self.mkdir_p(xre_dir)
+ if self.config["hostutils_manifest_path"]:
+ url = self._get_repo_url(self.config["hostutils_manifest_path"])
+ self._tooltool_fetch(url, xre_dir)
+ for p in glob.glob(os.path.join(xre_dir, "host-utils-*")):
+ if os.path.isdir(p) and os.path.isfile(os.path.join(p, "xpcshell")):
+ xre_path = p
+ if not xre_path:
+ self.fatal("xre path not found in %s" % xre_dir)
+ else:
+ self.fatal("configure hostutils_manifest_path!")
+ return xre_path
+
+ def query_package_name(self):
+ if self.app_name is None:
+ # For convenience, assume geckoview.test/geckoview_example when install
+ # target looks like geckoview.
+ if "androidTest" in self.installer_path:
+ self.app_name = "org.mozilla.geckoview.test"
+ elif "test_runner" in self.installer_path:
+ self.app_name = "org.mozilla.geckoview.test_runner"
+ elif "geckoview" in self.installer_path:
+ self.app_name = "org.mozilla.geckoview_example"
+ if self.app_name is None:
+ # Find appname from package-name.txt - assumes download-and-extract
+ # has completed successfully.
+ # The app/package name will typically be org.mozilla.fennec,
+ # but org.mozilla.firefox for release builds, and there may be
+ # other variations. 'aapt dump badging <apk>' could be used as an
+ # alternative to package-name.txt, but introduces a dependency
+ # on aapt, found currently in the Android SDK build-tools component.
+ app_dir = self.abs_dirs["abs_work_dir"]
+ self.app_path = os.path.join(app_dir, self.installer_path)
+ unzip = self.query_exe("unzip")
+ package_path = os.path.join(app_dir, "package-name.txt")
+ unzip_cmd = [unzip, "-q", "-o", self.app_path]
+ self.run_command(unzip_cmd, cwd=app_dir, halt_on_failure=True)
+ self.app_name = str(
+ self.read_from_file(package_path, verbose=True)
+ ).rstrip()
+ return self.app_name
+
+ def kill_processes(self, process_name):
+ self.info("Killing every process called %s" % process_name)
+ process_name = six.ensure_binary(process_name)
+ out = subprocess.check_output(["ps", "-A"])
+ for line in out.splitlines():
+ if process_name in line:
+ pid = int(line.split(None, 1)[0])
+ self.info("Killing pid %d." % pid)
+ os.kill(pid, signal.SIGKILL)
+
+ def delete_ANRs(self):
+ remote_dir = self.device.stack_trace_dir
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.device.mkdir(remote_dir)
+ self.info("%s created" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ for trace_file in self.device.ls(remote_dir, recursive=True):
+ trace_path = posixpath.join(remote_dir, trace_file)
+ if self.device.is_file(trace_path):
+ self.device.rm(trace_path)
+ self.info("%s deleted" % trace_path)
+ except Exception as e:
+ self.info(
+ "failed to delete %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ def check_for_ANRs(self):
+ """
+ Copy ANR (stack trace) files from device to upload directory.
+ """
+ dirs = self.query_abs_dirs()
+ remote_dir = self.device.stack_trace_dir
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.info("%s not found; ANR check skipped" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ self.device.pull(remote_dir, dirs["abs_blob_upload_dir"])
+ self.delete_ANRs()
+ except Exception as e:
+ self.info(
+ "failed to pull %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ def delete_tombstones(self):
+ remote_dir = "/data/tombstones"
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.device.mkdir(remote_dir)
+ self.info("%s created" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ for trace_file in self.device.ls(remote_dir, recursive=True):
+ trace_path = posixpath.join(remote_dir, trace_file)
+ if self.device.is_file(trace_path):
+ self.device.rm(trace_path)
+ self.info("%s deleted" % trace_path)
+ except Exception as e:
+ self.info(
+ "failed to delete %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ def check_for_tombstones(self):
+ """
+ Copy tombstone files from device to upload directory.
+ """
+ dirs = self.query_abs_dirs()
+ remote_dir = "/data/tombstones"
+ try:
+ if not self.device.is_dir(remote_dir):
+ self.info("%s not found; tombstone check skipped" % remote_dir)
+ return
+ self.device.chmod(remote_dir, recursive=True)
+ self.device.pull(remote_dir, dirs["abs_blob_upload_dir"])
+ self.delete_tombstones()
+ except Exception as e:
+ self.info(
+ "failed to pull %s: %s %s" % (remote_dir, type(e).__name__, str(e))
+ )
+
+ # Script actions
+
+ def start_emulator(self):
+ """
+ Starts the emulator
+ """
+ if not self.is_emulator:
+ return
+
+ dirs = self.query_abs_dirs()
+ ensure_dir(dirs["abs_work_dir"])
+ ensure_dir(dirs["abs_blob_upload_dir"])
+
+ if not os.path.isfile(self.adb_path):
+ self.fatal("The adb binary '%s' is not a valid file!" % self.adb_path)
+ self.kill_processes("xpcshell")
+ self.emulator_proc = self._launch_emulator()
+
+ def verify_device(self):
+ """
+ Check to see if the emulator can be contacted via adb.
+ If any communication attempt fails, kill the emulator, re-launch, and re-check.
+ """
+ if not self.is_android:
+ return
+
+ if self.is_emulator:
+ max_restarts = 5
+ emulator_ok = self._retry(
+ max_restarts,
+ 10,
+ self._verify_emulator_and_restart_on_fail,
+ "Check emulator",
+ )
+ if not emulator_ok:
+ self.fatal(
+ "INFRA-ERROR: Unable to start emulator after %d attempts"
+ % max_restarts,
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ self.mkdir_p(self.query_abs_dirs()["abs_blob_upload_dir"])
+ self.dump_perf_info()
+ self.logcat_start()
+ self.delete_ANRs()
+ self.delete_tombstones()
+ self.info("verify_device complete")
+
+ @PreScriptAction("run-tests")
+ def timed_screenshots(self, action, success=None):
+ """
+ If configured, start screenshot timers.
+ """
+ if not self.is_android:
+ return
+
+ def take_screenshot(seconds):
+ self.device_screenshot("screenshot-%ss-" % str(seconds))
+ self.info("timed (%ss) screenshot complete" % str(seconds))
+
+ self.timers = []
+ for seconds in self.config.get("screenshot_times", []):
+ self.info("screenshot requested %s seconds from now" % str(seconds))
+ t = Timer(int(seconds), take_screenshot, [seconds])
+ t.start()
+ self.timers.append(t)
+
+ @PostScriptAction("run-tests")
+ def stop_device(self, action, success=None):
+ """
+ Stop logcat and kill the emulator, if necessary.
+ """
+ if not self.is_android:
+ return
+
+ for t in self.timers:
+ t.cancel()
+ if self.worst_status != TBPL_RETRY:
+ self.check_for_ANRs()
+ self.check_for_tombstones()
+ else:
+ self.info("ANR and tombstone checks skipped due to TBPL_RETRY")
+ self.logcat_stop()
+ if self.is_emulator:
+ self.kill_processes(self.config["emulator_process_name"])
diff --git a/testing/mozharness/mozharness/mozilla/testing/codecoverage.py b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
new file mode 100644
index 0000000000..ec86cc3f14
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/codecoverage.py
@@ -0,0 +1,663 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import errno
+import json
+import os
+import posixpath
+import shutil
+import sys
+import tempfile
+import uuid
+import zipfile
+
+import mozinfo
+from mozharness.base.script import PostScriptAction, PreScriptAction
+from mozharness.mozilla.testing.per_test_base import SingleTestMixin
+
+code_coverage_config_options = [
+ [
+ ["--code-coverage"],
+ {
+ "action": "store_true",
+ "dest": "code_coverage",
+ "default": False,
+ "help": "Whether gcov c++ code coverage should be run.",
+ },
+ ],
+ [
+ ["--per-test-coverage"],
+ {
+ "action": "store_true",
+ "dest": "per_test_coverage",
+ "default": False,
+ "help": "Whether per-test coverage should be collected.",
+ },
+ ],
+ [
+ ["--disable-ccov-upload"],
+ {
+ "action": "store_true",
+ "dest": "disable_ccov_upload",
+ "default": False,
+ "help": "Whether test run should package and upload code coverage data.",
+ },
+ ],
+ [
+ ["--java-code-coverage"],
+ {
+ "action": "store_true",
+ "dest": "java_code_coverage",
+ "default": False,
+ "help": "Whether Java code coverage should be run.",
+ },
+ ],
+]
+
+
+class CodeCoverageMixin(SingleTestMixin):
+ """
+ Mixin for setting GCOV_PREFIX during test execution, packaging up
+ the resulting .gcda files and uploading them to blobber.
+ """
+
+ gcov_dir = None
+ grcov_dir = None
+ grcov_bin = None
+ jsvm_dir = None
+ prefix = None
+ per_test_reports = {}
+
+ def __init__(self, **kwargs):
+ if mozinfo.os == "linux" or mozinfo.os == "mac":
+ self.grcov_bin = "grcov"
+ elif mozinfo.os == "win":
+ self.grcov_bin = "grcov.exe"
+ else:
+ raise Exception("Unexpected OS: {}".format(mozinfo.os))
+
+ super(CodeCoverageMixin, self).__init__(**kwargs)
+
+ @property
+ def code_coverage_enabled(self):
+ try:
+ return bool(self.config.get("code_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def per_test_coverage(self):
+ try:
+ return bool(self.config.get("per_test_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def ccov_upload_disabled(self):
+ try:
+ return bool(self.config.get("disable_ccov_upload"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def jsd_code_coverage_enabled(self):
+ try:
+ return bool(self.config.get("jsd_code_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @property
+ def java_code_coverage_enabled(self):
+ try:
+ return bool(self.config.get("java_code_coverage"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ def _setup_cpp_js_coverage_tools(self):
+ fetches_dir = os.environ["MOZ_FETCHES_DIR"]
+ with open(os.path.join(fetches_dir, "target.mozinfo.json"), "r") as f:
+ build_mozinfo = json.load(f)
+
+ self.prefix = build_mozinfo["topsrcdir"]
+
+ strip_count = len(list(filter(None, self.prefix.split("/"))))
+ os.environ["GCOV_PREFIX_STRIP"] = str(strip_count)
+
+ # Download the gcno archive from the build machine.
+ url_to_gcno = self.query_build_dir_url("target.code-coverage-gcno.zip")
+ self.download_file(url_to_gcno, parent_dir=self.grcov_dir)
+
+ # Download the chrome-map.json file from the build machine.
+ url_to_chrome_map = self.query_build_dir_url("chrome-map.json")
+ self.download_file(url_to_chrome_map, parent_dir=self.grcov_dir)
+
+ def _setup_java_coverage_tools(self):
+ # Download and extract jacoco-cli from the build task.
+ url_to_jacoco = self.query_build_dir_url("target.jacoco-cli.jar")
+ self.jacoco_jar = os.path.join(tempfile.mkdtemp(), "target.jacoco-cli.jar")
+ self.download_file(url_to_jacoco, self.jacoco_jar)
+
+ # Download and extract class files from the build task.
+ self.classfiles_dir = tempfile.mkdtemp()
+ for archive in ["target.geckoview_classfiles.zip", "target.app_classfiles.zip"]:
+ url_to_classfiles = self.query_build_dir_url(archive)
+ classfiles_zip_path = os.path.join(self.classfiles_dir, archive)
+ self.download_file(url_to_classfiles, classfiles_zip_path)
+ with zipfile.ZipFile(classfiles_zip_path, "r") as z:
+ z.extractall(self.classfiles_dir)
+ os.remove(classfiles_zip_path)
+
+ # Create the directory where the emulator coverage file will be placed.
+ self.java_coverage_output_dir = tempfile.mkdtemp()
+
+ @PostScriptAction("download-and-extract")
+ def setup_coverage_tools(self, action, success=None):
+ if not self.code_coverage_enabled and not self.java_code_coverage_enabled:
+ return
+
+ self.grcov_dir = os.path.join(os.environ["MOZ_FETCHES_DIR"], "grcov")
+ if not os.path.isfile(os.path.join(self.grcov_dir, self.grcov_bin)):
+ raise Exception(
+ "File not found: {}".format(
+ os.path.join(self.grcov_dir, self.grcov_bin)
+ )
+ )
+
+ if self.code_coverage_enabled:
+ self._setup_cpp_js_coverage_tools()
+
+ if self.java_code_coverage_enabled:
+ self._setup_java_coverage_tools()
+
+ @PostScriptAction("download-and-extract")
+ def find_tests_for_coverage(self, action, success=None):
+ """
+ For each file modified on this push, determine if the modified file
+ is a test, by searching test manifests. Populate self.verify_suites
+ with test files, organized by suite.
+
+ This depends on test manifests, so can only run after test zips have
+ been downloaded and extracted.
+ """
+ if not self.per_test_coverage:
+ return
+
+ self.find_modified_tests()
+
+ # TODO: Add tests that haven't been run for a while (a week? N pushes?)
+
+ # Add baseline code coverage collection tests
+ baseline_tests_by_ext = {
+ ".html": {
+ "test": "testing/mochitest/baselinecoverage/plain/test_baselinecoverage.html",
+ "suite": "mochitest-plain",
+ },
+ ".js": {
+ "test": "testing/mochitest/baselinecoverage/browser_chrome/browser_baselinecoverage.js", # NOQA: E501
+ "suite": "mochitest-browser-chrome",
+ },
+ ".xhtml": {
+ "test": "testing/mochitest/baselinecoverage/chrome/test_baselinecoverage.xhtml",
+ "suite": "mochitest-chrome",
+ },
+ }
+
+ baseline_tests_by_suite = {
+ "mochitest-browser-chrome": "testing/mochitest/baselinecoverage/browser_chrome/"
+ "browser_baselinecoverage_browser-chrome.js"
+ }
+
+ wpt_baseline_test = "tests/web-platform/mozilla/tests/baselinecoverage/wpt_baselinecoverage.html" # NOQA: E501
+ if self.config.get("per_test_category") == "web-platform":
+ if "testharness" not in self.suites:
+ self.suites["testharness"] = []
+ if wpt_baseline_test not in self.suites["testharness"]:
+ self.suites["testharness"].append(wpt_baseline_test)
+ return
+
+ # Go through all the tests and find all
+ # the baseline tests that are needed.
+ tests_to_add = {}
+ for suite in self.suites:
+ if len(self.suites[suite]) == 0:
+ continue
+ if suite in baseline_tests_by_suite:
+ if suite not in tests_to_add:
+ tests_to_add[suite] = []
+ tests_to_add[suite].append(baseline_tests_by_suite[suite])
+ continue
+
+ # Default to file types if the suite has no baseline
+ for test in self.suites[suite]:
+ _, test_ext = os.path.splitext(test)
+
+ if test_ext not in baseline_tests_by_ext:
+ # Add the '.js' test as a default baseline
+ # if none other exists.
+ test_ext = ".js"
+ baseline_test_suite = baseline_tests_by_ext[test_ext]["suite"]
+ baseline_test_name = baseline_tests_by_ext[test_ext]["test"]
+
+ if baseline_test_suite not in tests_to_add:
+ tests_to_add[baseline_test_suite] = []
+ if baseline_test_name not in tests_to_add[baseline_test_suite]:
+ tests_to_add[baseline_test_suite].append(baseline_test_name)
+
+ # Add all baseline tests needed
+ for suite in tests_to_add:
+ for test in tests_to_add[suite]:
+ if suite not in self.suites:
+ self.suites[suite] = []
+ if test not in self.suites[suite]:
+ self.suites[suite].append(test)
+
+ @property
+ def coverage_args(self):
+ return []
+
+ def set_coverage_env(self, env, is_baseline_test=False):
+ # Set the GCOV directory.
+ self.gcov_dir = tempfile.mkdtemp()
+ env["GCOV_PREFIX"] = self.gcov_dir
+
+ # Set the GCOV/JSVM directories where counters will be dumped in per-test mode.
+ if self.per_test_coverage and not is_baseline_test:
+ env["GCOV_RESULTS_DIR"] = tempfile.mkdtemp()
+ env["JSVM_RESULTS_DIR"] = tempfile.mkdtemp()
+
+ # Set JSVM directory.
+ self.jsvm_dir = tempfile.mkdtemp()
+ env["JS_CODE_COVERAGE_OUTPUT_DIR"] = self.jsvm_dir
+
+ @PreScriptAction("run-tests")
+ def _set_gcov_prefix(self, action):
+ if not self.code_coverage_enabled:
+ return
+
+ if self.per_test_coverage:
+ return
+
+ self.set_coverage_env(os.environ)
+
+ def parse_coverage_artifacts(
+ self,
+ gcov_dir,
+ jsvm_dir,
+ merge=False,
+ output_format="lcov",
+ filter_covered=False,
+ ):
+ jsvm_output_file = "jsvm_lcov_output.info"
+ grcov_output_file = "grcov_lcov_output.info"
+
+ dirs = self.query_abs_dirs()
+
+ sys.path.append(dirs["abs_test_install_dir"])
+ sys.path.append(os.path.join(dirs["abs_test_install_dir"], "mozbuild"))
+
+ from codecoverage.lcov_rewriter import LcovFileRewriter
+
+ jsvm_files = [os.path.join(jsvm_dir, e) for e in os.listdir(jsvm_dir)]
+ rewriter = LcovFileRewriter(os.path.join(self.grcov_dir, "chrome-map.json"))
+ rewriter.rewrite_files(jsvm_files, jsvm_output_file, "")
+
+ # Run grcov on the zipped .gcno and .gcda files.
+ grcov_command = [
+ os.path.join(self.grcov_dir, self.grcov_bin),
+ "-t",
+ output_format,
+ "-p",
+ self.prefix,
+ "--ignore",
+ "**/fetches/*",
+ os.path.join(self.grcov_dir, "target.code-coverage-gcno.zip"),
+ gcov_dir,
+ ]
+
+ if "coveralls" in output_format:
+ grcov_command += ["--token", "UNUSED", "--commit-sha", "UNUSED"]
+
+ if merge:
+ grcov_command += [jsvm_output_file]
+
+ if mozinfo.os == "win" or mozinfo.os == "mac":
+ grcov_command += ["--llvm"]
+
+ if filter_covered:
+ grcov_command += ["--filter", "covered"]
+
+ # 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
+ # the other is the path to the standard error output.
+ tmp_output_file, _ = self.get_output_from_command(
+ grcov_command,
+ silent=True,
+ save_tmpfiles=True,
+ return_type="files",
+ throw_exception=True,
+ )
+ shutil.move(tmp_output_file, grcov_output_file)
+
+ shutil.rmtree(gcov_dir)
+ shutil.rmtree(jsvm_dir)
+
+ if merge:
+ os.remove(jsvm_output_file)
+ return grcov_output_file
+ else:
+ return grcov_output_file, jsvm_output_file
+
+ def add_per_test_coverage_report(self, env, suite, test):
+ gcov_dir = (
+ env["GCOV_RESULTS_DIR"] if "GCOV_RESULTS_DIR" in env else self.gcov_dir
+ )
+ jsvm_dir = (
+ env["JSVM_RESULTS_DIR"] if "JSVM_RESULTS_DIR" in env else self.jsvm_dir
+ )
+
+ grcov_file = self.parse_coverage_artifacts(
+ gcov_dir,
+ jsvm_dir,
+ merge=True,
+ output_format="coveralls",
+ filter_covered=True,
+ )
+
+ report_file = str(uuid.uuid4()) + ".json"
+ shutil.move(grcov_file, report_file)
+
+ # Get the test path relative to topsrcdir.
+ # This mapping is constructed by self.find_modified_tests().
+ test = self.test_src_path.get(test.replace(os.sep, posixpath.sep), test)
+
+ # Log a warning if the test path is still an absolute path.
+ if os.path.isabs(test):
+ self.warn("Found absolute path for test: {}".format(test))
+
+ if suite not in self.per_test_reports:
+ self.per_test_reports[suite] = {}
+ assert test not in self.per_test_reports[suite]
+ self.per_test_reports[suite][test] = report_file
+
+ if "GCOV_RESULTS_DIR" in env:
+ assert "JSVM_RESULTS_DIR" in env
+ # In this case, parse_coverage_artifacts has removed GCOV_RESULTS_DIR and
+ # JSVM_RESULTS_DIR so we need to remove GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR.
+ shutil.rmtree(self.gcov_dir)
+ shutil.rmtree(self.jsvm_dir)
+
+ def is_covered(self, sf):
+ # For C/C++ source files, we can consider a file as being uncovered
+ # when all its source lines are uncovered.
+ all_lines_uncovered = all(c is None or c == 0 for c in sf["coverage"])
+ if all_lines_uncovered:
+ return False
+
+ # For JavaScript files, we can't do the same, as the top-level is always
+ # executed, even if it just contains declarations. So, we need to check if
+ # all its functions, except the top-level, are uncovered.
+ functions = sf["functions"] if "functions" in sf else []
+ all_functions_uncovered = all(
+ not f["exec"] or f["name"] == "top-level" for f in functions
+ )
+ if all_functions_uncovered and len(functions) > 1:
+ return False
+
+ return True
+
+ @PostScriptAction("run-tests")
+ def _package_coverage_data(self, action, success=None):
+ dirs = self.query_abs_dirs()
+
+ if not self.code_coverage_enabled:
+ return
+
+ if self.per_test_coverage:
+ if not self.per_test_reports:
+ self.info("No tests were found...not saving coverage data.")
+ return
+
+ # Get the baseline tests that were run.
+ baseline_tests_ext_cov = {}
+ baseline_tests_suite_cov = {}
+ for suite, data in self.per_test_reports.items():
+ for test, grcov_file in data.items():
+ if "baselinecoverage" not in test:
+ continue
+
+ # TODO: Optimize this part which loads JSONs
+ # with a size of about 40Mb into memory for diffing later.
+ # Bug 1460064 is filed for this.
+ with open(grcov_file, "r") as f:
+ data = json.load(f)
+
+ if suite in os.path.split(test)[-1]:
+ baseline_tests_suite_cov[suite] = data
+ else:
+ _, baseline_filetype = os.path.splitext(test)
+ baseline_tests_ext_cov[baseline_filetype] = data
+
+ dest = os.path.join(
+ dirs["abs_blob_upload_dir"], "per-test-coverage-reports.zip"
+ )
+ with zipfile.ZipFile(dest, "w", zipfile.ZIP_DEFLATED) as z:
+ for suite, data in self.per_test_reports.items():
+ for test, grcov_file in data.items():
+ if "baselinecoverage" in test:
+ # Don't keep the baseline coverage
+ continue
+ else:
+ # Get test coverage
+ with open(grcov_file, "r") as f:
+ report = json.load(f)
+
+ # Remove uncovered files, as they are unneeded for per-test
+ # coverage purposes.
+ report["source_files"] = [
+ sf
+ for sf in report["source_files"]
+ if self.is_covered(sf)
+ ]
+
+ # Get baseline coverage
+ baseline_coverage = {}
+ if suite in baseline_tests_suite_cov:
+ baseline_coverage = baseline_tests_suite_cov[suite]
+ elif self.config.get("per_test_category") == "web-platform":
+ baseline_coverage = baseline_tests_ext_cov[".html"]
+ else:
+ for file_type in baseline_tests_ext_cov:
+ if not test.endswith(file_type):
+ continue
+ baseline_coverage = baseline_tests_ext_cov[
+ file_type
+ ]
+ break
+
+ if not baseline_coverage:
+ # Default to the '.js' baseline as it is the largest
+ self.info("Did not find a baseline test for: " + test)
+ baseline_coverage = baseline_tests_ext_cov[".js"]
+
+ unique_coverage = rm_baseline_cov(baseline_coverage, report)
+
+ with open(grcov_file, "w") as f:
+ json.dump(
+ {
+ "test": test,
+ "suite": suite,
+ "report": unique_coverage,
+ },
+ f,
+ )
+
+ z.write(grcov_file)
+ return
+
+ del os.environ["GCOV_PREFIX_STRIP"]
+ del os.environ["GCOV_PREFIX"]
+ del os.environ["JS_CODE_COVERAGE_OUTPUT_DIR"]
+
+ if not self.ccov_upload_disabled:
+ grcov_output_file, jsvm_output_file = self.parse_coverage_artifacts(
+ self.gcov_dir, self.jsvm_dir
+ )
+
+ try:
+ os.makedirs(dirs["abs_blob_upload_dir"])
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # Zip the grcov output and upload it.
+ grcov_zip_path = os.path.join(
+ dirs["abs_blob_upload_dir"], "code-coverage-grcov.zip"
+ )
+ with zipfile.ZipFile(grcov_zip_path, "w", zipfile.ZIP_DEFLATED) as z:
+ z.write(grcov_output_file)
+
+ # Zip the JSVM coverage data and upload it.
+ jsvm_zip_path = os.path.join(
+ dirs["abs_blob_upload_dir"], "code-coverage-jsvm.zip"
+ )
+ with zipfile.ZipFile(jsvm_zip_path, "w", zipfile.ZIP_DEFLATED) as z:
+ z.write(jsvm_output_file)
+
+ shutil.rmtree(self.grcov_dir)
+
+ @PostScriptAction("run-tests")
+ def process_java_coverage_data(self, action, success=None):
+ """
+ Run JaCoCo on the coverage.ec file in order to get a XML report.
+ After that, run grcov on the XML report to get a lcov report.
+ Finally, archive the lcov file and upload it, as process_coverage_data is doing.
+ """
+ if not self.java_code_coverage_enabled:
+ return
+
+ # If the emulator became unresponsive, the task has failed and we don't
+ # have any coverage report file, so stop running this function and
+ # allow the task to be retried automatically.
+ if not success and not os.listdir(self.java_coverage_output_dir):
+ return
+
+ report_files = [
+ os.path.join(self.java_coverage_output_dir, f)
+ for f in os.listdir(self.java_coverage_output_dir)
+ ]
+ assert len(report_files) > 0, "JaCoCo coverage data files were not found."
+
+ dirs = self.query_abs_dirs()
+ xml_path = tempfile.mkdtemp()
+ jacoco_command = (
+ ["java", "-jar", self.jacoco_jar, "report"]
+ + report_files
+ + [
+ "--classfiles",
+ self.classfiles_dir,
+ "--name",
+ "geckoview-junit",
+ "--xml",
+ os.path.join(xml_path, "geckoview-junit.xml"),
+ ]
+ )
+ self.run_command(jacoco_command, halt_on_failure=True)
+
+ grcov_command = [
+ os.path.join(self.grcov_dir, self.grcov_bin),
+ "-t",
+ "lcov",
+ xml_path,
+ ]
+ tmp_output_file, _ = self.get_output_from_command(
+ grcov_command,
+ silent=True,
+ save_tmpfiles=True,
+ return_type="files",
+ throw_exception=True,
+ )
+
+ if not self.ccov_upload_disabled:
+ grcov_zip_path = os.path.join(
+ dirs["abs_blob_upload_dir"], "code-coverage-grcov.zip"
+ )
+ with zipfile.ZipFile(grcov_zip_path, "w", zipfile.ZIP_DEFLATED) as z:
+ z.write(tmp_output_file, "grcov_lcov_output.info")
+
+
+def rm_baseline_cov(baseline_coverage, test_coverage):
+ """
+ Returns the difference between test_coverage and
+ baseline_coverage, such that what is returned
+ is the unique coverage for the test in question.
+ """
+
+ # Get all files into a quicker search format
+ unique_test_coverage = test_coverage
+ baseline_files = {el["name"]: el for el in baseline_coverage["source_files"]}
+ test_files = {el["name"]: el for el in test_coverage["source_files"]}
+
+ # Perform the difference and find everything
+ # unique to the test.
+ unique_file_coverage = {}
+ for test_file in test_files:
+ if test_file not in baseline_files:
+ unique_file_coverage[test_file] = test_files[test_file]
+ continue
+
+ if len(test_files[test_file]["coverage"]) != len(
+ baseline_files[test_file]["coverage"]
+ ):
+ # File has line number differences due to gcov bug:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1410217
+ continue
+
+ # TODO: Attempt to rewrite this section to remove one of the two
+ # iterations over a test's source file's coverage for optimization.
+ # Bug 1460064 was filed for this.
+
+ # Get line numbers and the differences
+ file_coverage = {
+ i
+ for i, cov in enumerate(test_files[test_file]["coverage"])
+ if cov is not None and cov > 0
+ }
+
+ baseline = {
+ i
+ for i, cov in enumerate(baseline_files[test_file]["coverage"])
+ if cov is not None and cov > 0
+ }
+
+ unique_coverage = file_coverage - baseline
+
+ if len(unique_coverage) > 0:
+ unique_file_coverage[test_file] = test_files[test_file]
+
+ # Return the data to original format to return
+ # coverage within the test_coverge data object.
+ fmt_unique_coverage = []
+ for i, cov in enumerate(unique_file_coverage[test_file]["coverage"]):
+ if cov is None:
+ fmt_unique_coverage.append(None)
+ continue
+
+ # TODO: Bug 1460061, determine if hit counts
+ # need to be considered.
+ if cov > 0:
+ # If there is a count
+ if i in unique_coverage:
+ # Only add the count if it's unique
+ fmt_unique_coverage.append(
+ unique_file_coverage[test_file]["coverage"][i]
+ )
+ continue
+ # Zero out everything that is not unique
+ fmt_unique_coverage.append(0)
+ unique_file_coverage[test_file]["coverage"] = fmt_unique_coverage
+
+ # Reformat to original test_coverage list structure
+ unique_test_coverage["source_files"] = list(unique_file_coverage.values())
+
+ return unique_test_coverage
diff --git a/testing/mozharness/mozharness/mozilla/testing/errors.py b/testing/mozharness/mozharness/mozilla/testing/errors.py
new file mode 100644
index 0000000000..84c00b0a8b
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/errors.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""Mozilla error lists for running tests.
+
+Error lists are used to parse output in mozharness.base.log.OutputParser.
+
+Each line of output is matched against each substring or regular expression
+in the error list. On a match, we determine the 'level' of that line,
+whether IGNORE, DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL.
+
+"""
+
+import re
+
+from mozharness.base.log import ERROR, INFO, WARNING
+
+# ErrorLists {{{1
+_mochitest_summary = {
+ "regex": re.compile(
+ r"""(\d+ INFO (Passed|Failed|Todo):\ +(\d+)|\t(Passed|Failed|Todo): (\d+))"""
+ ), # NOQA: E501
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": "Todo",
+}
+
+_reftest_summary = {
+ "regex": re.compile(
+ r"""REFTEST INFO \| (Successful|Unexpected|Known problems): (\d+) \("""
+ ), # NOQA: E501
+ "pass_group": "Successful",
+ "fail_group": "Unexpected",
+ "known_fail_group": "Known problems",
+}
+
+TinderBoxPrintRe = {
+ "mochitest-chrome_summary": _mochitest_summary,
+ "mochitest-webgl1-core_summary": _mochitest_summary,
+ "mochitest-webgl1-ext_summary": _mochitest_summary,
+ "mochitest-webgl2-core_summary": _mochitest_summary,
+ "mochitest-webgl2-ext_summary": _mochitest_summary,
+ "mochitest-webgl2-deqp_summary": _mochitest_summary,
+ "mochitest-webgpu_summary": _mochitest_summary,
+ "mochitest-media_summary": _mochitest_summary,
+ "mochitest-plain_summary": _mochitest_summary,
+ "mochitest-plain-gpu_summary": _mochitest_summary,
+ "marionette_summary": {
+ "regex": re.compile(r"""(passed|failed|todo):\ +(\d+)"""),
+ "pass_group": "passed",
+ "fail_group": "failed",
+ "known_fail_group": "todo",
+ },
+ "reftest_summary": _reftest_summary,
+ "reftest-qr_summary": _reftest_summary,
+ "crashtest_summary": _reftest_summary,
+ "crashtest-qr_summary": _reftest_summary,
+ "xpcshell_summary": {
+ "regex": re.compile(r"""INFO \| (Passed|Failed|Todo): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": "Todo",
+ },
+ "jsreftest_summary": _reftest_summary,
+ "instrumentation_summary": _mochitest_summary,
+ "cppunittest_summary": {
+ "regex": re.compile(r"""cppunittests INFO \| (Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "gtest_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "jittest_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "mozbase_summary": {
+ "regex": re.compile(r"""(OK)|(FAILED) \(errors=(\d+)"""),
+ "pass_group": "OK",
+ "fail_group": "FAILED",
+ "known_fail_group": None,
+ },
+ "geckoview_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "geckoview-junit_summary": {
+ "regex": re.compile(r"""(Passed|Failed): (\d+)"""),
+ "pass_group": "Passed",
+ "fail_group": "Failed",
+ "known_fail_group": None,
+ },
+ "harness_error": {
+ "full_regex": re.compile(
+ r"(?:TEST-UNEXPECTED-FAIL|PROCESS-CRASH) \| .* \|[^\|]* (application crashed|missing output line for total leaks!|negative leaks caught!|\d+ bytes leaked)" # NOQA: E501
+ ),
+ "minimum_regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""),
+ "retry_regex": re.compile(
+ r"""(FAIL-SHOULD-RETRY|No space left on device|ADBError|ADBProcessError|ADBTimeoutError|program finished with exit code 80|INFRA-ERROR)""" # NOQA: E501
+ ),
+ },
+}
+
+TestPassed = [
+ {
+ "regex": re.compile("""(TEST-INFO|TEST-KNOWN-FAIL|TEST-PASS|INFO \| )"""),
+ "level": INFO,
+ },
+]
+
+BaseHarnessErrorList = [
+ {
+ "substr": "TEST-UNEXPECTED",
+ "level": ERROR,
+ },
+ {
+ "substr": "PROCESS-CRASH",
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile("""ERROR: (Address|Leak)Sanitizer"""),
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile("""thread '([^']+)' panicked"""),
+ "level": ERROR,
+ },
+ {
+ "substr": "pure virtual method called",
+ "level": ERROR,
+ },
+ {
+ "substr": "Pure virtual function called!",
+ "level": ERROR,
+ },
+]
+
+HarnessErrorList = BaseHarnessErrorList + [
+ {
+ "substr": "A content process crashed",
+ "level": ERROR,
+ },
+]
+
+# wpt can have expected crashes so we can't always turn treeherder orange in those cases
+WptHarnessErrorList = BaseHarnessErrorList
+
+LogcatErrorList = [
+ {
+ "substr": "Fatal signal 11 (SIGSEGV)",
+ "level": ERROR,
+ "explanation": "This usually indicates the B2G process has crashed",
+ },
+ {
+ "substr": "Fatal signal 7 (SIGBUS)",
+ "level": ERROR,
+ "explanation": "This usually indicates the B2G process has crashed",
+ },
+ {"substr": "[JavaScript Error:", "level": WARNING},
+ {
+ "substr": "seccomp sandbox violation",
+ "level": ERROR,
+ "explanation": "A content process has violated the system call sandbox (bug 790923)",
+ },
+]
diff --git a/testing/mozharness/mozharness/mozilla/testing/per_test_base.py b/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
new file mode 100644
index 0000000000..97906abca8
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/per_test_base.py
@@ -0,0 +1,541 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import itertools
+import json
+import math
+import os
+import posixpath
+import sys
+
+import mozinfo
+from manifestparser import TestManifest
+
+
+class SingleTestMixin(object):
+ """Utility functions for per-test testing like test verification and per-test coverage."""
+
+ def __init__(self, **kwargs):
+ super(SingleTestMixin, self).__init__(**kwargs)
+
+ self.suites = {}
+ self.tests_downloaded = False
+ self.reftest_test_dir = None
+ self.jsreftest_test_dir = None
+ # Map from full test path on the test machine to a relative path in the source checkout.
+ # Use self._map_test_path_to_source(test_machine_path, source_path) to add a mapping.
+ self.test_src_path = {}
+ self.per_test_log_index = 1
+
+ def _map_test_path_to_source(self, test_machine_path, source_path):
+ test_machine_path = test_machine_path.replace(os.sep, posixpath.sep)
+ source_path = source_path.replace(os.sep, posixpath.sep)
+ self.test_src_path[test_machine_path] = source_path
+
+ def _is_gpu_suite(self, suite):
+ if suite and (suite == "gpu" or suite.startswith("webgl")):
+ return True
+ return False
+
+ def _find_misc_tests(self, dirs, changed_files, gpu=False):
+ manifests = [
+ (
+ os.path.join(dirs["abs_mochitest_dir"], "tests", "mochitest.ini"),
+ "mochitest-plain",
+ ),
+ (
+ os.path.join(dirs["abs_mochitest_dir"], "chrome", "chrome.ini"),
+ "mochitest-chrome",
+ ),
+ (
+ os.path.join(
+ dirs["abs_mochitest_dir"], "browser", "browser-chrome.ini"
+ ),
+ "mochitest-browser-chrome",
+ ),
+ (
+ os.path.join(dirs["abs_mochitest_dir"], "a11y", "a11y.ini"),
+ "mochitest-a11y",
+ ),
+ (
+ os.path.join(dirs["abs_xpcshell_dir"], "tests", "xpcshell.ini"),
+ "xpcshell",
+ ),
+ ]
+ is_fission = "fission.autostart=true" in self.config.get("extra_prefs", [])
+ tests_by_path = {}
+ all_disabled = []
+ for (path, suite) in manifests:
+ if os.path.exists(path):
+ man = TestManifest([path], strict=False)
+ active = man.active_tests(
+ exists=False, disabled=True, filters=[], **mozinfo.info
+ )
+ # Remove disabled tests. Also, remove tests with the same path as
+ # disabled tests, even if they are not disabled, since per-test mode
+ # specifies tests by path (it cannot distinguish between two or more
+ # tests with the same path specified in multiple manifests).
+ disabled = [t["relpath"] for t in active if "disabled" in t]
+ all_disabled += disabled
+ new_by_path = {
+ t["relpath"]: (suite, t.get("subsuite"), None)
+ for t in active
+ if "disabled" not in t and t["relpath"] not in disabled
+ }
+ tests_by_path.update(new_by_path)
+ self.info(
+ "Per-test run updated with manifest %s (%d active, %d skipped)"
+ % (path, len(new_by_path), len(disabled))
+ )
+
+ ref_manifests = [
+ (
+ os.path.join(
+ dirs["abs_reftest_dir"],
+ "tests",
+ "layout",
+ "reftests",
+ "reftest.list",
+ ),
+ "reftest",
+ "gpu",
+ ), # gpu
+ (
+ os.path.join(
+ dirs["abs_reftest_dir"],
+ "tests",
+ "testing",
+ "crashtest",
+ "crashtests.list",
+ ),
+ "crashtest",
+ None,
+ ),
+ ]
+ sys.path.append(dirs["abs_reftest_dir"])
+ import manifest
+
+ self.reftest_test_dir = os.path.join(dirs["abs_reftest_dir"], "tests")
+ for (path, suite, subsuite) in ref_manifests:
+ if os.path.exists(path):
+ man = manifest.ReftestManifest()
+ man.load(path)
+ for t in man.tests:
+ relpath = os.path.relpath(t["path"], self.reftest_test_dir)
+ referenced = (
+ t["referenced-test"] if "referenced-test" in t else None
+ )
+ tests_by_path[relpath] = (suite, subsuite, referenced)
+ self._map_test_path_to_source(t["path"], relpath)
+ self.info(
+ "Per-test run updated with manifest %s (%d tests)"
+ % (path, len(man.tests))
+ )
+
+ suite = "jsreftest"
+ self.jsreftest_test_dir = os.path.join(
+ dirs["abs_test_install_dir"], "jsreftest", "tests"
+ )
+ path = os.path.join(self.jsreftest_test_dir, "jstests.list")
+ if os.path.exists(path):
+ man = manifest.ReftestManifest()
+ man.load(path)
+ for t in man.files:
+ # expect manifest test to look like:
+ # ".../tests/jsreftest/tests/jsreftest.html?test=test262/.../some_test.js"
+ # while the test is in mercurial at:
+ # js/src/tests/test262/.../some_test.js
+ epos = t.find("=")
+ if epos > 0:
+ relpath = t[epos + 1 :]
+ test_path = os.path.join(self.jsreftest_test_dir, relpath)
+ relpath = os.path.join("js", "src", "tests", relpath)
+ self._map_test_path_to_source(test_path, relpath)
+ tests_by_path.update({relpath: (suite, None, None)})
+ else:
+ self.warning("unexpected jsreftest test format: %s" % str(t))
+ self.info(
+ "Per-test run updated with manifest %s (%d tests)"
+ % (path, len(man.files))
+ )
+
+ # for each changed file, determine if it is a test file, and what suite it is in
+ for file in changed_files:
+ # manifest paths use os.sep (like backslash on Windows) but
+ # automation-relevance uses posixpath.sep
+ file = file.replace(posixpath.sep, os.sep)
+ entry = tests_by_path.get(file)
+ if not entry:
+ if file in all_disabled:
+ self.info("'%s' has been skipped on this platform." % file)
+ if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
+ self.info("Per-test run could not find requested test '%s'" % file)
+ continue
+
+ if gpu and not self._is_gpu_suite(entry[1]):
+ self.info(
+ "Per-test run (gpu) discarded non-gpu test %s (%s)"
+ % (file, entry[1])
+ )
+ continue
+ elif not gpu and self._is_gpu_suite(entry[1]):
+ self.info(
+ "Per-test run (non-gpu) discarded gpu test %s (%s)"
+ % (file, entry[1])
+ )
+ continue
+
+ if is_fission and (
+ (entry[0] == "mochitest-a11y") or (entry[0] == "mochitest-chrome")
+ ):
+ self.info(
+ "Per-test run (fission) discarded non-e10s test %s (%s)"
+ % (file, entry[0])
+ )
+ continue
+
+ if entry[2] is not None:
+ # Test name substitution, for reftest reference file handling:
+ # - if both test and reference modified, run the test file
+ # - if only reference modified, run the test file
+ test_file = os.path.join(
+ os.path.dirname(file), os.path.basename(entry[2])
+ )
+ self.info("Per-test run substituting %s for %s" % (test_file, file))
+ file = test_file
+
+ self.info("Per-test run found test %s (%s/%s)" % (file, entry[0], entry[1]))
+ subsuite_mapping = {
+ # Map (<suite>, <subsuite>): <full-suite>
+ # <suite> is associated with a manifest, explicitly in code above
+ # <subsuite> comes from "subsuite" tags in some manifest entries
+ # <full-suite> is a unique id for the suite, matching desktop mozharness configs
+ (
+ "mochitest-browser-chrome",
+ "a11y",
+ None,
+ ): "mochitest-browser-a11y",
+ (
+ "mochitest-browser-chrome",
+ "media-bc",
+ None,
+ ): "mochitest-browser-media",
+ (
+ "mochitest-browser-chrome",
+ "devtools",
+ None,
+ ): "mochitest-devtools-chrome",
+ ("mochitest-browser-chrome", "remote", None): "mochitest-remote",
+ (
+ "mochitest-browser-chrome",
+ "screenshots",
+ None,
+ ): "mochitest-browser-chrome-screenshots", # noqa
+ ("mochitest-plain", "media", None): "mochitest-media",
+ # below should be on test-verify-gpu job
+ ("mochitest-chrome", "gpu", None): "mochitest-chrome-gpu",
+ ("mochitest-plain", "gpu", None): "mochitest-plain-gpu",
+ ("mochitest-plain", "webgl1-core", None): "mochitest-webgl1-core",
+ ("mochitest-plain", "webgl1-ext", None): "mochitest-webgl1-ext",
+ ("mochitest-plain", "webgl2-core", None): "mochitest-webgl2-core",
+ ("mochitest-plain", "webgl2-ext", None): "mochitest-webgl2-ext",
+ ("mochitest-plain", "webgl2-deqp", None): "mochitest-webgl2-deqp",
+ ("mochitest-plain", "webgpu", None): "mochitest-webgpu",
+ }
+ if entry in subsuite_mapping:
+ suite = subsuite_mapping[entry]
+ else:
+ suite = entry[0]
+ suite_files = self.suites.get(suite)
+ if not suite_files:
+ suite_files = []
+ if file not in suite_files:
+ suite_files.append(file)
+ self.suites[suite] = suite_files
+
+ def _find_wpt_tests(self, dirs, changed_files):
+ # Setup sys.path to include all the dependencies required to import
+ # the web-platform-tests manifest parser. web-platform-tests provides
+ # the localpaths.py to do the path manipulation, which we load,
+ # providing the __file__ variable so it can resolve the relative
+ # paths correctly.
+ paths_file = os.path.join(
+ dirs["abs_wpttest_dir"], "tests", "tools", "localpaths.py"
+ )
+ with open(paths_file, "r") as f:
+ exec(f.read(), {"__file__": paths_file})
+ import manifest as wptmanifest
+
+ tests_root = os.path.join(dirs["abs_wpttest_dir"], "tests")
+
+ for extra in ("", "mozilla"):
+ base_path = os.path.join(dirs["abs_wpttest_dir"], extra)
+ man_path = os.path.join(base_path, "meta", "MANIFEST.json")
+ man = wptmanifest.manifest.load(tests_root, man_path)
+ self.info("Per-test run updated with manifest %s" % man_path)
+
+ repo_tests_path = os.path.join("testing", "web-platform", extra, "tests")
+ tests_path = os.path.join("tests", "web-platform", extra, "tests")
+ for (type, path, test) in man:
+ if type not in ["testharness", "reftest", "wdspec"]:
+ continue
+ repo_path = os.path.join(repo_tests_path, path)
+ # manifest paths use os.sep (like backslash on Windows) but
+ # automation-relevance uses posixpath.sep
+ repo_path = repo_path.replace(os.sep, posixpath.sep)
+ if repo_path in changed_files:
+ self.info(
+ "Per-test run found web-platform test '%s', type %s"
+ % (path, type)
+ )
+ suite_files = self.suites.get(type)
+ if not suite_files:
+ suite_files = []
+ test_path = os.path.join(tests_path, path)
+ suite_files.append(test_path)
+ self.suites[type] = suite_files
+ self._map_test_path_to_source(test_path, repo_path)
+ changed_files.remove(repo_path)
+
+ if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
+ for file in changed_files:
+ self.info(
+ "Per-test run could not find requested web-platform test '%s'"
+ % file
+ )
+
+ def find_modified_tests(self):
+ """
+ For each file modified on this push, determine if the modified file
+ is a test, by searching test manifests. Populate self.suites
+ with test files, organized by suite.
+
+ This depends on test manifests, so can only run after test zips have
+ been downloaded and extracted.
+ """
+ repository = os.environ.get("GECKO_HEAD_REPOSITORY")
+ revision = os.environ.get("GECKO_HEAD_REV")
+ if not repository or not revision:
+ self.warning("unable to run tests in per-test mode: no repo or revision!")
+ self.suites = {}
+ self.tests_downloaded = True
+ return
+
+ def get_automationrelevance():
+ response = self.load_json_url(url)
+ return response
+
+ dirs = self.query_abs_dirs()
+ mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])
+ e10s = self.config.get("e10s", False)
+ mozinfo.update({"e10s": e10s})
+ is_fission = "fission.autostart=true" in self.config.get("extra_prefs", [])
+ mozinfo.update({"fission": is_fission})
+ headless = self.config.get("headless", False)
+ mozinfo.update({"headless": headless})
+ if mozinfo.info["buildapp"] == "mobile/android":
+ # extra android mozinfo normally comes from device queries, but this
+ # code may run before the device is ready, so rely on configuration
+ mozinfo.update(
+ {"android_version": str(self.config.get("android_version", 24))}
+ )
+ mozinfo.update({"is_fennec": self.config.get("is_fennec", False)})
+ mozinfo.update({"is_emulator": self.config.get("is_emulator", True)})
+ mozinfo.update({"verify": True})
+ self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))
+
+ # determine which files were changed on this push
+ changed_files = set()
+ url = "%s/json-automationrelevance/%s" % (repository.rstrip("/"), revision)
+ contents = self.retry(get_automationrelevance, attempts=2, sleeptime=10)
+ for c in contents["changesets"]:
+ self.info(
+ " {cset} {desc}".format(
+ cset=c["node"][0:12],
+ desc=c["desc"].splitlines()[0].encode("ascii", "ignore"),
+ )
+ )
+ changed_files |= set(c["files"])
+ changed_files = list(changed_files)
+
+ # check specified test paths, as from 'mach try ... <path>'
+ if os.environ.get("MOZHARNESS_TEST_PATHS", None) is not None:
+ suite_to_paths = json.loads(os.environ["MOZHARNESS_TEST_PATHS"])
+ specified_paths = itertools.chain.from_iterable(suite_to_paths.values())
+ specified_paths = list(specified_paths)
+ # filter the list of changed files to those found under the
+ # specified path(s)
+ changed_and_specified = set()
+ for changed in changed_files:
+ for specified in specified_paths:
+ if changed.startswith(specified):
+ changed_and_specified.add(changed)
+ break
+ if changed_and_specified:
+ changed_files = changed_and_specified
+ else:
+ # if specified paths do not match changed files, assume the
+ # specified paths are explicitly requested tests
+ changed_files = set()
+ changed_files.update(specified_paths)
+ self.info("Per-test run found explicit request in MOZHARNESS_TEST_PATHS:")
+ self.info(str(changed_files))
+
+ if self.config.get("per_test_category") == "web-platform":
+ self._find_wpt_tests(dirs, changed_files)
+ elif self.config.get("gpu_required", False) is not False:
+ self._find_misc_tests(dirs, changed_files, gpu=True)
+ else:
+ self._find_misc_tests(dirs, changed_files)
+
+ # per test mode run specific tests from any given test suite
+ # _find_*_tests organizes tests to run into suites so we can
+ # run each suite at a time
+
+ # chunk files
+ total_tests = sum([len(self.suites[x]) for x in self.suites])
+
+ if total_tests == 0:
+ self.warning("No tests to verify.")
+ self.suites = {}
+ self.tests_downloaded = True
+ return
+
+ files_per_chunk = total_tests / float(self.config.get("total_chunks", 1))
+ files_per_chunk = int(math.ceil(files_per_chunk))
+
+ chunk_number = int(self.config.get("this_chunk", 1))
+ suites = {}
+ start = (chunk_number - 1) * files_per_chunk
+ end = chunk_number * files_per_chunk
+ current = -1
+ for suite in self.suites:
+ for test in self.suites[suite]:
+ current += 1
+ if current >= start and current < end:
+ if suite not in suites:
+ suites[suite] = []
+ suites[suite].append(test)
+ if current >= end:
+ break
+
+ self.suites = suites
+ self.tests_downloaded = True
+
+ def query_args(self, suite):
+ """
+ For the specified suite, return an array of command line arguments to
+ be passed to test harnesses when running in per-test mode.
+
+ Each array element is an array of command line arguments for a modified
+ test in the suite.
+ """
+ # not in verify or per-test coverage mode: run once, with no additional args
+ if not self.per_test_coverage and not self.verify_enabled:
+ return [[]]
+
+ files = []
+ jsreftest_extra_dir = os.path.join("js", "src", "tests")
+ # For some suites, the test path needs to be updated before passing to
+ # the test harness.
+ for file in self.suites.get(suite):
+ if self.config.get("per_test_category") != "web-platform" and suite in [
+ "reftest",
+ "crashtest",
+ ]:
+ file = os.path.join(self.reftest_test_dir, file)
+ elif (
+ self.config.get("per_test_category") != "web-platform"
+ and suite == "jsreftest"
+ ):
+ file = os.path.relpath(file, jsreftest_extra_dir)
+ file = os.path.join(self.jsreftest_test_dir, file)
+
+ if file is None:
+ continue
+
+ file = file.replace(os.sep, posixpath.sep)
+ files.append(file)
+
+ self.info("Per-test file(s) for '%s': %s" % (suite, files))
+
+ args = []
+ for file in files:
+ cur = []
+
+ cur.extend(self.coverage_args)
+ cur.extend(self.verify_args)
+
+ cur.append(file)
+ args.append(cur)
+
+ return args
+
+ def query_per_test_category_suites(self, category, all_suites):
+ """
+ In per-test mode, determine which suites are active, for the given
+ suite category.
+ """
+ suites = None
+ if self.verify_enabled or self.per_test_coverage:
+ if self.config.get("per_test_category") == "web-platform":
+ suites = list(self.suites)
+ self.info("Per-test suites: %s" % suites)
+ elif all_suites and self.tests_downloaded:
+ suites = dict(
+ (key, all_suites.get(key))
+ for key in self.suites
+ if key in all_suites.keys()
+ )
+ self.info("Per-test suites: %s" % suites)
+ else:
+ # Until test zips are downloaded, manifests are not available,
+ # so it is not possible to determine which suites are active/
+ # required for per-test mode; assume all suites from supported
+ # suite categories are required.
+ if category in ["mochitest", "xpcshell", "reftest"]:
+ suites = all_suites
+ return suites
+
+ def log_per_test_status(self, test_name, tbpl_status, log_level):
+ """
+ Log status of a single test. This will display in the
+ Job Details pane in treeherder - a convenient summary of per-test mode.
+ Special test name formatting is needed because treeherder truncates
+ lines that are too long, and may remove duplicates after truncation.
+ """
+ max_test_name_len = 40
+ if len(test_name) > max_test_name_len:
+ head = test_name
+ new = ""
+ previous = None
+ max_test_name_len = max_test_name_len - len(".../")
+ while len(new) < max_test_name_len:
+ head, tail = os.path.split(head)
+ previous = new
+ new = os.path.join(tail, new)
+ test_name = os.path.join("...", previous or new)
+ test_name = test_name.rstrip(os.path.sep)
+ self.log(
+ "TinderboxPrint: Per-test run of %s<br/>: %s" % (test_name, tbpl_status),
+ level=log_level,
+ )
+
+ def get_indexed_logs(self, dir, test_suite):
+ """
+ Per-test tasks need distinct file names for the raw and errorsummary logs
+ on each run.
+ """
+ index = ""
+ if self.verify_enabled or self.per_test_coverage:
+ index = "-test%d" % self.per_test_log_index
+ self.per_test_log_index += 1
+ raw_log_file = os.path.join(dir, "%s%s_raw.log" % (test_suite, index))
+ error_summary_file = os.path.join(
+ dir, "%s%s_errorsummary.log" % (test_suite, index)
+ )
+ return raw_log_file, error_summary_file
diff --git a/testing/mozharness/mozharness/mozilla/testing/raptor.py b/testing/mozharness/mozharness/mozilla/testing/raptor.py
new file mode 100644
index 0000000000..886ce2117f
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/raptor.py
@@ -0,0 +1,1415 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import copy
+import glob
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import tempfile
+from shutil import copyfile, rmtree
+
+import mozharness
+from mozharness.base.errors import PythonErrorList
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, INFO, OutputParser
+from mozharness.base.python import Python3Virtualenv
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import (
+ EXIT_STATUS_DICT,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList, TinderBoxPrintRe
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from six import string_types
+
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+here = os.path.abspath(os.path.dirname(__file__))
+
+RaptorErrorList = (
+ PythonErrorList
+ + HarnessErrorList
+ + [
+ {"regex": re.compile(r"""run-as: Package '.*' is unknown"""), "level": DEBUG},
+ {"substr": r"""raptorDebug""", "level": DEBUG},
+ {
+ "regex": re.compile(r"""^raptor[a-zA-Z-]*( - )?( )?(?i)error(:)?"""),
+ "level": ERROR,
+ },
+ {
+ "regex": re.compile(r"""^raptor[a-zA-Z-]*( - )?( )?(?i)critical(:)?"""),
+ "level": CRITICAL,
+ },
+ {
+ "regex": re.compile(r"""No machine_name called '.*' can be found"""),
+ "level": CRITICAL,
+ },
+ {
+ "substr": r"""No such file or directory: 'browser_output.txt'""",
+ "level": CRITICAL,
+ "explanation": "Most likely the browser failed to launch, or the test otherwise "
+ "failed to start.",
+ },
+ ]
+)
+
+# When running raptor locally, we can attempt to make use of
+# the users locally cached ffmpeg binary from from when the user
+# ran `./mach browsertime --setup`
+FFMPEG_LOCAL_CACHE = {
+ "mac": "ffmpeg-macos",
+ "linux": "ffmpeg-4.4.1-i686-static",
+ "win": "ffmpeg-4.4.1-full_build",
+}
+
+
+class Raptor(
+ TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin, Python3Virtualenv
+):
+ """
+ Install and run Raptor tests
+ """
+
+ # Options to Browsertime. Paths are expected to be absolute.
+ browsertime_options = [
+ [
+ ["--browsertime-node"],
+ {"dest": "browsertime_node", "default": None, "help": argparse.SUPPRESS},
+ ],
+ [
+ ["--browsertime-browsertimejs"],
+ {
+ "dest": "browsertime_browsertimejs",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-vismet-script"],
+ {
+ "dest": "browsertime_vismet_script",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-chromedriver"],
+ {
+ "dest": "browsertime_chromedriver",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-ffmpeg"],
+ {"dest": "browsertime_ffmpeg", "default": None, "help": argparse.SUPPRESS},
+ ],
+ [
+ ["--browsertime-geckodriver"],
+ {
+ "dest": "browsertime_geckodriver",
+ "default": None,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-video"],
+ {
+ "dest": "browsertime_video",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-visualmetrics"],
+ {
+ "dest": "browsertime_visualmetrics",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-no-ffwindowrecorder"],
+ {
+ "dest": "browsertime_no_ffwindowrecorder",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime-arg"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "browsertime_user_args",
+ "default": [],
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--browsertime"],
+ {
+ "dest": "browsertime",
+ "action": "store_true",
+ "default": True,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ ]
+
+ config_options = (
+ [
+ [
+ ["--test"],
+ {"action": "store", "dest": "test", "help": "Raptor test to run"},
+ ],
+ [
+ ["--app"],
+ {
+ "default": "firefox",
+ "choices": [
+ "firefox",
+ "chrome",
+ "chrome-m",
+ "chromium",
+ "fennec",
+ "geckoview",
+ "refbrow",
+ "fenix",
+ "safari",
+ ],
+ "dest": "app",
+ "help": "Name of the application we are testing (default: firefox).",
+ },
+ ],
+ [
+ ["--activity"],
+ {
+ "dest": "activity",
+ "help": "The Android activity used to launch the Android app. "
+ "e.g.: org.mozilla.fenix.browser.BrowserPerformanceTestActivity",
+ },
+ ],
+ [
+ ["--intent"],
+ {
+ "dest": "intent",
+ "help": "Name of the Android intent action used to launch the Android app",
+ },
+ ],
+ [
+ ["--is-release-build"],
+ {
+ "action": "store_true",
+ "dest": "is_release_build",
+ "help": "Whether the build is a release build which requires work arounds "
+ "using MOZ_DISABLE_NONLOCAL_CONNECTIONS to support installing unsigned "
+ "webextensions. Defaults to False.",
+ },
+ ],
+ [
+ ["--add-option"],
+ {
+ "action": "extend",
+ "dest": "raptor_cmd_line_args",
+ "default": None,
+ "help": "Extra options to Raptor.",
+ },
+ ],
+ [
+ ["--device-name"],
+ {
+ "dest": "device_name",
+ "default": None,
+ "help": "Device name of mobile device.",
+ },
+ ],
+ [
+ ["--geckoProfile"],
+ {
+ "dest": "gecko_profile",
+ "action": "store_true",
+ "default": False,
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--geckoProfileInterval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": "int",
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--geckoProfileEntries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": "int",
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--geckoProfileFeatures"],
+ {
+ "dest": "gecko_profile_features",
+ "type": "str",
+ "help": argparse.SUPPRESS,
+ },
+ ],
+ [
+ ["--gecko-profile"],
+ {
+ "dest": "gecko_profile",
+ "action": "store_true",
+ "default": False,
+ "help": "Whether to profile the test run and save the profile results.",
+ },
+ ],
+ [
+ ["--gecko-profile-interval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": "int",
+ "help": "The interval between samples taken by the profiler (ms).",
+ },
+ ],
+ [
+ ["--gecko-profile-entries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": "int",
+ "help": "How many samples to take with the profiler.",
+ },
+ ],
+ [
+ ["--gecko-profile-threads"],
+ {
+ "dest": "gecko_profile_threads",
+ "type": "str",
+ "help": "Comma-separated list of threads to sample.",
+ },
+ ],
+ [
+ ["--gecko-profile-features"],
+ {
+ "dest": "gecko_profile_features",
+ "type": "str",
+ "help": "Features to enable in the profiler.",
+ },
+ ],
+ [
+ ["--extra-profiler-run"],
+ {
+ "dest": "extra_profiler_run",
+ "action": "store_true",
+ "default": False,
+ "help": "Run the tests again with profiler enabled after the main run.",
+ },
+ ],
+ [
+ ["--page-cycles"],
+ {
+ "dest": "page_cycles",
+ "type": "int",
+ "help": (
+ "How many times to repeat loading the test page (for page load "
+ "tests); for benchmark tests this is how many times the benchmark test "
+ "will be run."
+ ),
+ },
+ ],
+ [
+ ["--page-timeout"],
+ {
+ "dest": "page_timeout",
+ "type": "int",
+ "help": "How long to wait (ms) for one page_cycle to complete, before timing out.", # NOQA: E501
+ },
+ ],
+ [
+ ["--browser-cycles"],
+ {
+ "dest": "browser_cycles",
+ "type": "int",
+ "help": (
+ "The number of times a cold load test is repeated (for cold load tests "
+ "only, where the browser is shutdown and restarted between test "
+ "iterations)."
+ ),
+ },
+ ],
+ [
+ ["--project"],
+ {
+ "action": "store",
+ "dest": "project",
+ "default": "mozilla-central",
+ "type": "str",
+ "help": "Name of the project (try, mozilla-central, etc.)",
+ },
+ ],
+ [
+ ["--test-url-params"],
+ {
+ "action": "store",
+ "dest": "test_url_params",
+ "help": "Parameters to add to the test_url query string.",
+ },
+ ],
+ [
+ ["--host"],
+ {
+ "dest": "host",
+ "type": "str",
+ "default": "127.0.0.1",
+ "help": "Hostname from which to serve urls (default: 127.0.0.1). "
+ "The value HOST_IP will cause the value of host to be "
+ "to be loaded from the environment variable HOST_IP.",
+ },
+ ],
+ [
+ ["--power-test"],
+ {
+ "dest": "power_test",
+ "action": "store_true",
+ "default": False,
+ "help": (
+ "Use Raptor to measure power usage on Android browsers (Geckoview "
+ "Example, Fenix, Refbrow, and Fennec) as well as on Intel-based MacOS "
+ "machines that have Intel Power Gadget installed."
+ ),
+ },
+ ],
+ [
+ ["--memory-test"],
+ {
+ "dest": "memory_test",
+ "action": "store_true",
+ "default": False,
+ "help": "Use Raptor to measure memory usage.",
+ },
+ ],
+ [
+ ["--cpu-test"],
+ {
+ "dest": "cpu_test",
+ "action": "store_true",
+ "default": False,
+ "help": "Use Raptor to measure CPU usage.",
+ },
+ ],
+ [
+ ["--disable-perf-tuning"],
+ {
+ "action": "store_true",
+ "dest": "disable_perf_tuning",
+ "default": False,
+ "help": "Disable performance tuning on android.",
+ },
+ ],
+ [
+ ["--conditioned-profile"],
+ {
+ "dest": "conditioned_profile",
+ "type": "str",
+ "default": None,
+ "help": (
+ "Name of conditioned profile to use. Prefix with `artifact:` "
+ "if we should obtain the profile from CI.",
+ ),
+ },
+ ],
+ [
+ ["--live-sites"],
+ {
+ "dest": "live_sites",
+ "action": "store_true",
+ "default": False,
+ "help": "Run tests using live sites instead of recorded sites.",
+ },
+ ],
+ [
+ ["--test-bytecode-cache"],
+ {
+ "dest": "test_bytecode_cache",
+ "action": "store_true",
+ "default": False,
+ "help": (
+ "If set, the pageload test will set the preference "
+ "`dom.script_loader.bytecode_cache.strategy=-1` and wait 20 seconds "
+ "after the first cold pageload to populate the bytecode cache before "
+ "running a warm pageload test. Only available if `--chimera` "
+ "is also provided."
+ ),
+ },
+ ],
+ [
+ ["--chimera"],
+ {
+ "dest": "chimera",
+ "action": "store_true",
+ "default": False,
+ "help": "Run tests in chimera mode. Each browser cycle will run a cold and warm test.", # NOQA: E501
+ },
+ ],
+ [
+ ["--debug-mode"],
+ {
+ "dest": "debug_mode",
+ "action": "store_true",
+ "default": False,
+ "help": "Run Raptor in debug mode (open browser console, limited page-cycles, etc.)", # NOQA: E501
+ },
+ ],
+ [
+ ["--noinstall"],
+ {
+ "dest": "noinstall",
+ "action": "store_true",
+ "default": False,
+ "help": "Do not offer to install Android APK.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Run without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_false",
+ "dest": "fission",
+ "default": True,
+ "help": "Disable Fission (site isolation) in Gecko.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Set a browser preference. May be used multiple times.",
+ },
+ ],
+ [
+ ["--setenv"],
+ {
+ "action": "append",
+ "metavar": "NAME=VALUE",
+ "dest": "environment",
+ "default": [],
+ "help": "Set a variable in the test environment. May be used multiple times.",
+ },
+ ],
+ [
+ ["--skip-preflight"],
+ {
+ "action": "store_true",
+ "dest": "skip_preflight",
+ "default": False,
+ "help": "skip preflight commands to prepare machine.",
+ },
+ ],
+ [
+ ["--cold"],
+ {
+ "action": "store_true",
+ "dest": "cold",
+ "default": False,
+ "help": "Enable cold page-load for browsertime tp6",
+ },
+ ],
+ [
+ ["--verbose"],
+ {
+ "action": "store_true",
+ "dest": "verbose",
+ "default": False,
+ "help": "Verbose output",
+ },
+ ],
+ [
+ ["--enable-marionette-trace"],
+ {
+ "action": "store_true",
+ "dest": "enable_marionette_trace",
+ "default": False,
+ "help": "Enable marionette tracing",
+ },
+ ],
+ [
+ ["--clean"],
+ {
+ "action": "store_true",
+ "dest": "clean",
+ "default": False,
+ "help": (
+ "Clean the python virtualenv (remove, and rebuild) for "
+ "Raptor before running tests."
+ ),
+ },
+ ],
+ [
+ ["--webext"],
+ {
+ "action": "store_true",
+ "dest": "webext",
+ "default": False,
+ "help": (
+ "Whether to use webextension to execute pageload tests "
+ "(WebExtension is being deprecated).",
+ ),
+ },
+ ],
+ [
+ ["--collect-perfstats"],
+ {
+ "action": "store_true",
+ "dest": "collect_perfstats",
+ "default": False,
+ "help": (
+ "If set, the test will collect perfstats in addition to "
+ "the regular metrics it gathers."
+ ),
+ },
+ ],
+ [
+ ["--extra-summary-methods"],
+ {
+ "action": "append",
+ "metavar": "OPTION",
+ "dest": "extra_summary_methods",
+ "default": [],
+ "help": (
+ "Alternative methods for summarizing technical and visual"
+ "pageload metrics."
+ "Options: geomean, mean."
+ ),
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ + browsertime_options
+ )
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chrome-android",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install-chromium-distribution",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(Raptor, self).__init__(**kwargs)
+
+ # Convenience
+ self.workdir = self.query_abs_dirs()["abs_work_dir"]
+
+ self.run_local = self.config.get("run_local")
+
+ # App (browser testing on) defaults to firefox
+ self.app = "firefox"
+
+ if self.run_local:
+ # Get app from command-line args, passed in from mach, inside 'raptor_cmd_line_args'
+ # Command-line args can be in two formats depending on how the user entered them
+ # i.e. "--app=geckoview" or separate as "--app", "geckoview" so we have to
+ # parse carefully. It's simplest to use `argparse` to parse partially.
+ self.app = "firefox"
+ if "raptor_cmd_line_args" in self.config:
+ sub_parser = argparse.ArgumentParser()
+ # It's not necessary to limit the allowed values: each value
+ # will be parsed and verifed by raptor/raptor.py.
+ sub_parser.add_argument("--app", default=None, dest="app")
+ sub_parser.add_argument("-i", "--intent", default=None, dest="intent")
+ sub_parser.add_argument(
+ "-a", "--activity", default=None, dest="activity"
+ )
+
+ # We'd prefer to use `parse_known_intermixed_args`, but that's
+ # new in Python 3.7.
+ known, unknown = sub_parser.parse_known_args(
+ self.config["raptor_cmd_line_args"]
+ )
+
+ if known.app:
+ self.app = known.app
+ if known.intent:
+ self.intent = known.intent
+ if known.activity:
+ self.activity = known.activity
+ else:
+ # Raptor initiated in production via mozharness
+ self.test = self.config["test"]
+ self.app = self.config.get("app", "firefox")
+ self.binary_path = self.config.get("binary_path", None)
+
+ if self.app in ("refbrow", "fenix"):
+ self.app_name = self.binary_path
+
+ self.installer_url = self.config.get("installer_url")
+ self.raptor_json_url = self.config.get("raptor_json_url")
+ self.raptor_json = self.config.get("raptor_json")
+ self.raptor_json_config = self.config.get("raptor_json_config")
+ self.repo_path = self.config.get("repo_path")
+ self.obj_path = self.config.get("obj_path")
+ self.mozbuild_path = self.config.get("mozbuild_path")
+ self.test = None
+ self.gecko_profile = self.config.get(
+ "gecko_profile"
+ ) or "--geckoProfile" in self.config.get("raptor_cmd_line_args", [])
+ self.gecko_profile_interval = self.config.get("gecko_profile_interval")
+ self.gecko_profile_entries = self.config.get("gecko_profile_entries")
+ self.gecko_profile_threads = self.config.get("gecko_profile_threads")
+ self.gecko_profile_features = self.config.get("gecko_profile_features")
+ self.extra_profiler_run = self.config.get("extra_profiler_run")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url_params = self.config.get("test_url_params")
+ self.host = self.config.get("host")
+ if self.host == "HOST_IP":
+ self.host = os.environ["HOST_IP"]
+ self.power_test = self.config.get("power_test")
+ self.memory_test = self.config.get("memory_test")
+ self.cpu_test = self.config.get("cpu_test")
+ self.live_sites = self.config.get("live_sites")
+ self.chimera = self.config.get("chimera")
+ self.disable_perf_tuning = self.config.get("disable_perf_tuning")
+ self.conditioned_profile = self.config.get("conditioned_profile")
+ self.extra_prefs = self.config.get("extra_prefs")
+ self.environment = self.config.get("environment")
+ self.is_release_build = self.config.get("is_release_build")
+ self.debug_mode = self.config.get("debug_mode", False)
+ self.chromium_dist_path = None
+ self.firefox_android_browsers = ["fennec", "geckoview", "refbrow", "fenix"]
+ self.android_browsers = self.firefox_android_browsers + ["chrome-m"]
+ self.browsertime_visualmetrics = self.config.get("browsertime_visualmetrics")
+ self.browsertime_node = self.config.get("browsertime_node")
+ self.browsertime_user_args = self.config.get("browsertime_user_args")
+ self.browsertime_video = False
+ self.enable_marionette_trace = self.config.get("enable_marionette_trace")
+ self.browser_cycles = self.config.get("browser_cycles")
+ self.clean = self.config.get("clean")
+
+ for (arg,), details in Raptor.browsertime_options:
+ # Allow overriding defaults on the `./mach raptor-test ...` command-line.
+ value = self.config.get(details["dest"])
+ if value and arg not in self.config.get("raptor_cmd_line_args", []):
+ setattr(self, details["dest"], value)
+
+ # We accept some configuration options from the try commit message in the
+ # format mozharness: <options>. Example try commit message: mozharness:
+ # --geckoProfile try: <stuff>
+ def query_gecko_profile_options(self):
+ gecko_results = []
+ # If gecko_profile is set, we add that to Raptor's options
+ if self.gecko_profile:
+ gecko_results.append("--gecko-profile")
+ if self.gecko_profile_interval:
+ gecko_results.extend(
+ ["--gecko-profile-interval", str(self.gecko_profile_interval)]
+ )
+ if self.gecko_profile_entries:
+ gecko_results.extend(
+ ["--gecko-profile-entries", str(self.gecko_profile_entries)]
+ )
+ if self.gecko_profile_features:
+ gecko_results.extend(
+ ["--gecko-profile-features", self.gecko_profile_features]
+ )
+ if self.gecko_profile_threads:
+ gecko_results.extend(
+ ["--gecko-profile-threads", self.gecko_profile_threads]
+ )
+ else:
+ if self.extra_profiler_run:
+ gecko_results.append("--extra-profiler-run")
+ return gecko_results
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Raptor, self).query_abs_dirs()
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ abs_dirs["abs_test_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests"
+ )
+
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def install_chrome_android(self):
+ """Install Google Chrome for Android in production from tooltool"""
+ if self.app != "chrome-m":
+ self.info("Google Chrome for Android not required")
+ return
+ if self.config.get("run_local"):
+ self.info(
+ "Google Chrome for Android will not be installed "
+ "from tooltool when running locally"
+ )
+ return
+ self.info("Fetching and installing Google Chrome for Android")
+
+ # Fetch the APK
+ tmpdir = tempfile.mkdtemp()
+ self.tooltool_fetch(
+ os.path.join(
+ self.raptor_path,
+ "raptor",
+ "tooltool-manifests",
+ "chrome-android",
+ "chrome87.manifest",
+ ),
+ output_dir=tmpdir,
+ )
+
+ # Find the downloaded APK
+ files = os.listdir(tmpdir)
+ if len(files) > 1:
+ raise Exception(
+ "Found more than one chrome APK file after tooltool download"
+ )
+ chromeapk = os.path.join(tmpdir, files[0])
+
+ # Disable verification and install the APK
+ self.device.shell_output("settings put global verifier_verify_adb_installs 0")
+ self.install_android_app(chromeapk, replace=True)
+
+ # Re-enable verification and delete the temporary directory
+ self.device.shell_output("settings put global verifier_verify_adb_installs 1")
+ rmtree(tmpdir)
+
+ self.info("Google Chrome for Android successfully installed")
+
+ def install_chromium_distribution(self):
+ """Install Google Chromium distribution in production"""
+ linux, mac, win = "linux", "mac", "win"
+ chrome, chromium = "chrome", "chromium"
+
+ available_chromium_dists = [chrome, chromium]
+ binary_location = {
+ chromium: {
+ linux: ["chrome-linux", "chrome"],
+ mac: ["chrome-mac", "Chromium.app", "Contents", "MacOS", "Chromium"],
+ win: ["chrome-win", "Chrome.exe"],
+ },
+ }
+
+ if self.app not in available_chromium_dists:
+ self.info("Google Chrome or Chromium distributions are not required.")
+ return
+
+ if self.app == "chrome":
+ self.info("Chrome should be preinstalled.")
+ if win in self.platform_name():
+ base_path = "C:\\%s\\Google\\Chrome\\Application\\chrome.exe"
+ self.chromium_dist_path = base_path % "Progra~1"
+ if not os.path.exists(self.chromium_dist_path):
+ self.chromium_dist_path = base_path % "Progra~2"
+ elif linux in self.platform_name():
+ self.chromium_dist_path = "/usr/bin/google-chrome"
+ elif mac in self.platform_name():
+ self.chromium_dist_path = (
+ "/Applications/Google Chrome.app/" "Contents/MacOS/Google Chrome"
+ )
+ else:
+ self.error(
+ "Chrome is not installed on the platform %s yet."
+ % self.platform_name()
+ )
+
+ if os.path.exists(self.chromium_dist_path):
+ self.info(
+ "Google Chrome found in expected location %s"
+ % self.chromium_dist_path
+ )
+ else:
+ self.error("Cannot find Google Chrome at %s" % self.chromium_dist_path)
+
+ return
+
+ chromium_dist = self.app
+
+ if self.config.get("run_local"):
+ self.info("Expecting %s to be pre-installed locally" % chromium_dist)
+ return
+
+ self.info("Getting fetched %s build" % chromium_dist)
+ self.chromium_dist_dest = os.path.normpath(
+ os.path.abspath(os.environ["MOZ_FETCHES_DIR"])
+ )
+
+ if mac in self.platform_name():
+ self.chromium_dist_path = os.path.join(
+ self.chromium_dist_dest, *binary_location[chromium_dist][mac]
+ )
+
+ elif linux in self.platform_name():
+ self.chromium_dist_path = os.path.join(
+ self.chromium_dist_dest, *binary_location[chromium_dist][linux]
+ )
+
+ else:
+ self.chromium_dist_path = os.path.join(
+ self.chromium_dist_dest, *binary_location[chromium_dist][win]
+ )
+
+ self.info("%s dest is: %s" % (chromium_dist, self.chromium_dist_dest))
+ self.info("%s path is: %s" % (chromium_dist, self.chromium_dist_path))
+
+ # Now ensure Chromium binary exists
+ if os.path.exists(self.chromium_dist_path):
+ self.info(
+ "Successfully installed %s to: %s"
+ % (chromium_dist, self.chromium_dist_path)
+ )
+ else:
+ self.info("Abort: failed to install %s" % chromium_dist)
+
+ def raptor_options(self, args=None, **kw):
+ """Return options to Raptor"""
+ options = []
+ kw_options = {}
+
+ # Get the APK location to be able to get the browser version
+ # through mozversion
+ if self.app in self.firefox_android_browsers and not self.run_local:
+ kw_options["installerpath"] = self.installer_path
+
+ # If testing on Firefox, the binary path already came from mozharness/pro;
+ # otherwise the binary path is forwarded from command-line arg (raptor_cmd_line_args).
+ kw_options["app"] = self.app
+ if self.app == "firefox" or (
+ self.app in self.firefox_android_browsers and not self.run_local
+ ):
+ binary_path = self.binary_path or self.config.get("binary_path")
+ if not binary_path:
+ self.fatal("Raptor requires a path to the binary.")
+ kw_options["binary"] = binary_path
+ if self.app in self.firefox_android_browsers:
+ # In production ensure we have correct app name,
+ # i.e. fennec_aurora or fennec_release etc.
+ kw_options["binary"] = self.query_package_name()
+ self.info(
+ "Set binary to %s instead of %s"
+ % (kw_options["binary"], binary_path)
+ )
+ elif self.app == "safari" and not self.run_local:
+ binary_path = "/Applications/Safari.app/Contents/MacOS/Safari"
+ kw_options["binary"] = binary_path
+ else: # Running on Chromium
+ if not self.run_local:
+ # When running locally we already set the Chromium binary above, in init.
+ # In production, we already installed Chromium, so set the binary path
+ # to our install.
+ kw_options["binary"] = self.chromium_dist_path or ""
+
+ # Options overwritten from **kw
+ if "test" in self.config:
+ kw_options["test"] = self.config["test"]
+ if "binary" in self.config:
+ kw_options["binary"] = self.config["binary"]
+ if self.symbols_path:
+ kw_options["symbolsPath"] = self.symbols_path
+ if self.config.get("obj_path", None) is not None:
+ kw_options["obj-path"] = self.config["obj_path"]
+ if self.config.get("mozbuild_path", None) is not None:
+ kw_options["mozbuild-path"] = self.config["mozbuild_path"]
+ if self.test_url_params:
+ kw_options["test-url-params"] = self.test_url_params
+ if self.config.get("device_name") is not None:
+ kw_options["device-name"] = self.config["device_name"]
+ if self.config.get("activity") is not None:
+ kw_options["activity"] = self.config["activity"]
+ if self.config.get("conditioned_profile") is not None:
+ kw_options["conditioned-profile"] = self.config["conditioned_profile"]
+
+ kw_options.update(kw)
+ if self.host:
+ kw_options["host"] = self.host
+ # Configure profiling options
+ options.extend(self.query_gecko_profile_options())
+ # Extra arguments
+ if args is not None:
+ options += args
+
+ if self.config.get("run_local", False):
+ options.extend(["--run-local"])
+ if "raptor_cmd_line_args" in self.config:
+ options += self.config["raptor_cmd_line_args"]
+ if self.config.get("code_coverage", False):
+ options.extend(["--code-coverage"])
+ if self.config.get("is_release_build", False):
+ options.extend(["--is-release-build"])
+ if self.config.get("power_test", False):
+ options.extend(["--power-test"])
+ if self.config.get("memory_test", False):
+ options.extend(["--memory-test"])
+ if self.config.get("cpu_test", False):
+ options.extend(["--cpu-test"])
+ if self.config.get("live_sites", False):
+ options.extend(["--live-sites"])
+ if self.config.get("chimera", False):
+ options.extend(["--chimera"])
+ if self.config.get("disable_perf_tuning", False):
+ options.extend(["--disable-perf-tuning"])
+ if self.config.get("cold", False):
+ options.extend(["--cold"])
+ if not self.config.get("fission", True):
+ options.extend(["--disable-fission"])
+ if self.config.get("verbose", False):
+ options.extend(["--verbose"])
+ if self.config.get("extra_prefs"):
+ options.extend(
+ ["--setpref={}".format(i) for i in self.config.get("extra_prefs")]
+ )
+ if self.config.get("environment"):
+ options.extend(
+ ["--setenv={}".format(i) for i in self.config.get("environment")]
+ )
+ if self.config.get("enable_marionette_trace", False):
+ options.extend(["--enable-marionette-trace"])
+ if self.config.get("browser_cycles"):
+ options.extend(
+ ["--browser-cycles={}".format(self.config.get("browser_cycles"))]
+ )
+ if self.config.get("test_bytecode_cache", False):
+ options.extend(["--test-bytecode-cache"])
+ if self.config.get("collect_perfstats", False):
+ options.extend(["--collect-perfstats"])
+ if self.config.get("extra_summary_methods"):
+ options.extend(
+ [
+ "--extra-summary-methods={}".format(method)
+ for method in self.config.get("extra_summary_methods")
+ ]
+ )
+ if self.config.get("webext", False):
+ options.extend(["--webext"])
+ else:
+ for (arg,), details in Raptor.browsertime_options:
+ # Allow overriding defaults on the `./mach raptor-test ...` command-line
+ value = self.config.get(details["dest"])
+ if value is None or value != getattr(self, details["dest"], None):
+ # Check for modifications done to the instance variables
+ value = getattr(self, details["dest"], None)
+ if value and arg not in self.config.get("raptor_cmd_line_args", []):
+ if isinstance(value, string_types):
+ options.extend([arg, os.path.expandvars(value)])
+ elif isinstance(value, (tuple, list)):
+ for val in value:
+ options.extend([arg, val])
+ else:
+ options.extend([arg])
+
+ for key, value in kw_options.items():
+ options.extend(["--%s" % key, value])
+
+ return options
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.raptor_path = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"], "raptor"
+ )
+ if self.config.get("run_local"):
+ self.raptor_path = os.path.join(self.repo_path, "testing", "raptor")
+
+ def clobber(self):
+ # Recreate the upload directory for storing the logcat collected
+ # during APK installation.
+ super(Raptor, self).clobber()
+ upload_dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not os.path.isdir(upload_dir):
+ self.mkdir_p(upload_dir)
+
+ def install_android_app(self, apk, replace=False):
+ # Override AndroidMixin's install_android_app in order to capture
+ # logcat during the installation. If the installation fails,
+ # the logcat file will be left in the upload directory.
+ self.logcat_start()
+ try:
+ super(Raptor, self).install_android_app(apk, replace=replace)
+ finally:
+ self.logcat_stop()
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ # Use in-tree wptserve for Python 3.10 compatibility
+ extract_dirs = [
+ "tools/wptserve/*",
+ "tools/wpt_third_party/pywebsocket3/*",
+ ]
+ return super(Raptor, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=["common", "condprof", "raptor"]
+ )
+
+ def create_virtualenv(self, **kwargs):
+ """VirtualenvMixin.create_virtualenv() assumes we're using
+ self.config['virtualenv_modules']. Since we're installing
+ raptor from its source, we have to wrap that method here."""
+ # If virtualenv already exists, just add to path and don't re-install.
+ # We need it in-path to import jsonschema later when validating output for perfherder.
+ _virtualenv_path = self.config.get("virtualenv_path")
+
+ if self.clean:
+ rmtree(_virtualenv_path, ignore_errors=True)
+
+ _python_interp = self.query_exe("python")
+ if "win" in self.platform_name() and os.path.exists(_python_interp):
+ multiprocessing.set_executable(_python_interp)
+
+ if self.run_local and os.path.exists(_virtualenv_path):
+ self.info("Virtualenv already exists, skipping creation")
+ # ffmpeg exists outside of this virtual environment so
+ # we re-add it to the platform environment on repeated
+ # local runs of browsertime visual metric tests
+ self.setup_local_ffmpeg()
+
+ if "win" in self.platform_name():
+ _path = os.path.join(_virtualenv_path, "Lib", "site-packages")
+ else:
+ _path = os.path.join(
+ _virtualenv_path,
+ "lib",
+ os.path.basename(_python_interp),
+ "site-packages",
+ )
+
+ sys.path.append(_path)
+ return
+
+ # virtualenv doesn't already exist so create it
+ # Install mozbase first, so we use in-tree versions
+ # Additionally, decide where to pull raptor requirements from.
+ if not self.run_local:
+ mozbase_requirements = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"],
+ "config",
+ "mozbase_requirements.txt",
+ )
+ raptor_requirements = os.path.join(self.raptor_path, "requirements.txt")
+ else:
+ mozbase_requirements = os.path.join(
+ os.path.dirname(self.raptor_path),
+ "config",
+ "mozbase_source_requirements.txt",
+ )
+ raptor_requirements = os.path.join(
+ self.raptor_path, "source_requirements.txt"
+ )
+ self.register_virtualenv_module(
+ requirements=[mozbase_requirements],
+ two_pass=True,
+ editable=True,
+ )
+
+ modules = ["pip>=1.5"]
+
+ # Add modules required for visual metrics
+ py3_minor = sys.version_info.minor
+ if py3_minor <= 7:
+ modules.extend(
+ [
+ "numpy==1.16.1",
+ "Pillow==6.1.0",
+ "scipy==1.2.3",
+ "pyssim==0.4",
+ "opencv-python==4.5.4.60",
+ ]
+ )
+ else: # python version >= 3.8
+ modules.extend(
+ [
+ "numpy==1.22.0",
+ "Pillow==9.0.0",
+ "scipy==1.7.3",
+ "pyssim==0.4",
+ "opencv-python==4.5.4.60",
+ ]
+ )
+
+ if self.run_local:
+ self.setup_local_ffmpeg()
+
+ # Require pip >= 1.5 so pip will prefer .whl files to install
+ super(Raptor, self).create_virtualenv(modules=modules)
+
+ # Install Raptor dependencies
+ self.install_module(requirements=[raptor_requirements])
+
+ def setup_local_ffmpeg(self):
+ """Make use of the users local ffmpeg when running browsertime visual
+ metrics tests.
+ """
+
+ if "ffmpeg" in os.environ["PATH"]:
+ return
+
+ platform = self.platform_name()
+ btime_cache = os.path.join(self.config["mozbuild_path"], "browsertime")
+ if "mac" in platform:
+ path_to_ffmpeg = os.path.join(
+ btime_cache,
+ FFMPEG_LOCAL_CACHE["mac"],
+ )
+ elif "linux" in platform:
+ path_to_ffmpeg = os.path.join(
+ btime_cache,
+ FFMPEG_LOCAL_CACHE["linux"],
+ )
+ elif "win" in platform:
+ path_to_ffmpeg = os.path.join(
+ btime_cache,
+ FFMPEG_LOCAL_CACHE["win"],
+ "bin",
+ )
+
+ if os.path.exists(path_to_ffmpeg):
+ os.environ["PATH"] += os.pathsep + path_to_ffmpeg
+ self.browsertime_ffmpeg = path_to_ffmpeg
+ self.info(
+ "Added local ffmpeg found at: %s to environment." % path_to_ffmpeg
+ )
+ else:
+ raise Exception(
+ "No local ffmpeg binary found. Expected it to be here: %s"
+ % path_to_ffmpeg
+ )
+
+ def install(self):
+ if not self.config.get("noinstall", False):
+ if self.app in self.firefox_android_browsers:
+ self.device.uninstall_app(self.binary_path)
+ self.install_android_app(self.installer_path)
+ else:
+ super(Raptor, self).install()
+
+ def _artifact_perf_data(self, src, dest):
+ if not os.path.isdir(os.path.dirname(dest)):
+ # create upload dir if it doesn't already exist
+ self.info("Creating dir: %s" % os.path.dirname(dest))
+ os.makedirs(os.path.dirname(dest))
+ self.info("Copying raptor results from %s to %s" % (src, dest))
+ try:
+ copyfile(src, dest)
+ except Exception as e:
+ self.critical("Error copying results %s to upload dir %s" % (src, dest))
+ self.info(str(e))
+
+ def run_tests(self, args=None, **kw):
+ """Run raptor tests"""
+
+ # Get Raptor options
+ options = self.raptor_options(args=args, **kw)
+
+ # Python version check
+ python = self.query_python_path()
+ self.run_command([python, "--version"])
+ parser = RaptorOutputParser(
+ config=self.config, log_obj=self.log_obj, error_list=RaptorErrorList
+ )
+ env = {}
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.run_local:
+ env["MINIDUMP_STACKWALK"] = self.query_minidump_stackwalk()
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ env = self.query_env(partial_env=env, log_level=INFO)
+ # adjust PYTHONPATH to be able to use raptor as a python package
+ if "PYTHONPATH" in env:
+ env["PYTHONPATH"] = self.raptor_path + os.pathsep + env["PYTHONPATH"]
+ else:
+ env["PYTHONPATH"] = self.raptor_path
+
+ # mitmproxy needs path to mozharness when installing the cert, and tooltool
+ env["SCRIPTSPATH"] = scripts_path
+ env["EXTERNALTOOLSPATH"] = external_tools_path
+
+ # Needed to load unsigned Raptor WebExt on release builds
+ if self.is_release_build:
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+ if self.repo_path is not None:
+ env["MOZ_DEVELOPER_REPO_DIR"] = self.repo_path
+ if self.obj_path is not None:
+ env["MOZ_DEVELOPER_OBJ_DIR"] = self.obj_path
+ if self.mozbuild_path is not None:
+ env["MOZ_MOZBUILD_DIR"] = self.mozbuild_path
+
+ # Sets a timeout for how long Raptor should run without output
+ output_timeout = self.config.get("raptor_output_timeout", 3600)
+ # Run Raptor tests
+ run_tests = os.path.join(self.raptor_path, "raptor", "raptor.py")
+
+ mozlog_opts = ["--log-tbpl-level=debug"]
+ if not self.run_local and "suite" in self.config:
+ fname_pattern = "%s_%%s.log" % self.config["test"]
+ mozlog_opts.append(
+ "--log-errorsummary=%s"
+ % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "errorsummary")
+ )
+ mozlog_opts.append(
+ "--log-raw=%s"
+ % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "raw")
+ )
+
+ def launch_in_debug_mode(cmdline):
+ cmdline = set(cmdline)
+ debug_opts = {"--debug", "--debugger", "--debugger_args"}
+
+ return bool(debug_opts.intersection(cmdline))
+
+ if self.app in self.android_browsers:
+ self.logcat_start()
+
+ command = [python, run_tests] + options + mozlog_opts
+ if launch_in_debug_mode(command):
+ raptor_process = subprocess.Popen(command, cwd=self.workdir, env=env)
+ raptor_process.wait()
+ else:
+ self.return_code = self.run_command(
+ command,
+ cwd=self.workdir,
+ output_timeout=output_timeout,
+ output_parser=parser,
+ env=env,
+ )
+
+ if self.app in self.android_browsers:
+ self.logcat_stop()
+
+ if parser.minidump_output:
+ self.info("Looking at the minidump files for debugging purposes...")
+ for item in parser.minidump_output:
+ self.run_command(["ls", "-l", item])
+
+ elif not self.run_local:
+ # Copy results to upload dir so they are included as an artifact
+ self.info("Copying Raptor results to upload dir:")
+
+ src = os.path.join(self.query_abs_dirs()["abs_work_dir"], "raptor.json")
+ dest = os.path.join(env["MOZ_UPLOAD_DIR"], "perfherder-data.json")
+ self.info(str(dest))
+ self._artifact_perf_data(src, dest)
+
+ # Make individual perfherder data JSON's for each supporting data type
+ for file in glob.glob(
+ os.path.join(self.query_abs_dirs()["abs_work_dir"], "*")
+ ):
+ path, filename = os.path.split(file)
+
+ if not filename.startswith("raptor-"):
+ continue
+
+ # filename is expected to contain a unique data name
+ # i.e. raptor-os-baseline-power.json would result in
+ # the data name os-baseline-power
+ data_name = "-".join(filename.split("-")[1:])
+ data_name = ".".join(data_name.split(".")[:-1])
+
+ src = file
+ dest = os.path.join(
+ env["MOZ_UPLOAD_DIR"], "perfherder-data-%s.json" % data_name
+ )
+ self._artifact_perf_data(src, dest)
+
+ src = os.path.join(
+ self.query_abs_dirs()["abs_work_dir"], "screenshots.html"
+ )
+ if os.path.exists(src):
+ dest = os.path.join(env["MOZ_UPLOAD_DIR"], "screenshots.html")
+ self.info(str(dest))
+ self._artifact_perf_data(src, dest)
+
+ # Allow log failures to over-ride successful runs of the test harness and
+ # give log failures priority, so that, for instance, log failures resulting
+ # in TBPL_RETRY cause a retry rather than simply reporting an error.
+ if parser.tbpl_status != TBPL_SUCCESS:
+ parser_status = EXIT_STATUS_DICT[parser.tbpl_status]
+ self.info(
+ "return code %s changed to %s due to log output"
+ % (str(self.return_code), str(parser_status))
+ )
+ self.return_code = parser_status
+
+
+class RaptorOutputParser(OutputParser):
+ minidump_regex = re.compile(
+ r'''raptorError: "error executing: '(\S+) (\S+) (\S+)'"'''
+ )
+ RE_PERF_DATA = re.compile(r".*PERFHERDER_DATA:\s+(\{.*\})")
+
+ def __init__(self, **kwargs):
+ super(RaptorOutputParser, self).__init__(**kwargs)
+ self.minidump_output = None
+ self.found_perf_data = []
+ self.tbpl_status = TBPL_SUCCESS
+ self.worst_log_level = INFO
+ self.harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+
+ def parse_single_line(self, line):
+ m = self.minidump_regex.search(line)
+ if m:
+ self.minidump_output = (m.group(1), m.group(2), m.group(3))
+
+ m = self.RE_PERF_DATA.match(line)
+ if m:
+ self.found_perf_data.append(m.group(1))
+
+ if self.harness_retry_re.search(line):
+ self.critical(" %s" % line)
+ self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_RETRY, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+ return # skip base parse_single_line
+ super(RaptorOutputParser, self).parse_single_line(line)
diff --git a/testing/mozharness/mozharness/mozilla/testing/talos.py b/testing/mozharness/mozharness/mozilla/testing/talos.py
new file mode 100755
index 0000000000..4ccd0ab3e6
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -0,0 +1,896 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""
+run talos tests in a virtualenv
+"""
+
+import copy
+import io
+import json
+import multiprocessing
+import os
+import pprint
+import re
+import shutil
+import subprocess
+import sys
+
+import mozharness
+import six
+from mozharness.base.config import parse_config_file
+from mozharness.base.errors import PythonErrorList
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, INFO, WARNING, OutputParser
+from mozharness.base.python import Python3Virtualenv
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import (
+ TBPL_FAILURE,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WARNING,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import TinderBoxPrintRe
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+
+TalosErrorList = PythonErrorList + [
+ {"regex": re.compile(r"""run-as: Package '.*' is unknown"""), "level": DEBUG},
+ {"substr": r"""FAIL: Graph server unreachable""", "level": CRITICAL},
+ {"substr": r"""FAIL: Busted:""", "level": CRITICAL},
+ {"substr": r"""FAIL: failed to cleanup""", "level": ERROR},
+ {"substr": r"""erfConfigurator.py: Unknown error""", "level": CRITICAL},
+ {"substr": r"""talosError""", "level": CRITICAL},
+ {
+ "regex": re.compile(r"""No machine_name called '.*' can be found"""),
+ "level": CRITICAL,
+ },
+ {
+ "substr": r"""No such file or directory: 'browser_output.txt'""",
+ "level": CRITICAL,
+ "explanation": "Most likely the browser failed to launch, or the test was otherwise "
+ "unsuccessful in even starting.",
+ },
+]
+
+GeckoProfilerSettings = (
+ "gecko_profile_interval",
+ "gecko_profile_entries",
+ "gecko_profile_features",
+ "gecko_profile_threads",
+)
+
+# TODO: check for running processes on script invocation
+
+
+class TalosOutputParser(OutputParser):
+ minidump_regex = re.compile(
+ r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"'''
+ )
+ RE_PERF_DATA = re.compile(r".*PERFHERDER_DATA:\s+(\{.*\})")
+ worst_tbpl_status = TBPL_SUCCESS
+
+ def __init__(self, **kwargs):
+ super(TalosOutputParser, self).__init__(**kwargs)
+ self.minidump_output = None
+ self.found_perf_data = []
+
+ def update_worst_log_and_tbpl_levels(self, log_level, tbpl_level):
+ self.worst_log_level = self.worst_level(log_level, self.worst_log_level)
+ self.worst_tbpl_status = self.worst_level(
+ tbpl_level, self.worst_tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ def parse_single_line(self, line):
+ """In Talos land, every line that starts with RETURN: needs to be
+ printed with a TinderboxPrint:"""
+ if line.startswith("RETURN:"):
+ line.replace("RETURN:", "TinderboxPrint:")
+ m = self.minidump_regex.search(line)
+ if m:
+ self.minidump_output = (m.group(1), m.group(2), m.group(3))
+
+ m = self.RE_PERF_DATA.match(line)
+ if m:
+ self.found_perf_data.append(m.group(1))
+
+ # now let's check if we should retry
+ harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+ if harness_retry_re.search(line):
+ self.critical(" %s" % line)
+ self.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_RETRY)
+ return # skip base parse_single_line
+ super(TalosOutputParser, self).parse_single_line(line)
+
+
+class Talos(
+ TestingMixin, MercurialScript, TooltoolMixin, Python3Virtualenv, CodeCoverageMixin
+):
+ """
+ install and run Talos tests
+ """
+
+ config_options = (
+ [
+ [
+ ["--use-talos-json"],
+ {
+ "action": "store_true",
+ "dest": "use_talos_json",
+ "default": False,
+ "help": "Use talos config from talos.json",
+ },
+ ],
+ [
+ ["--suite"],
+ {
+ "action": "store",
+ "dest": "suite",
+ "help": "Talos suite to run (from talos json)",
+ },
+ ],
+ [
+ ["--system-bits"],
+ {
+ "action": "store",
+ "dest": "system_bits",
+ "type": "choice",
+ "default": "32",
+ "choices": ["32", "64"],
+ "help": "Testing 32 or 64 (for talos json plugins)",
+ },
+ ],
+ [
+ ["--add-option"],
+ {
+ "action": "extend",
+ "dest": "talos_extra_options",
+ "default": None,
+ "help": "extra options to talos",
+ },
+ ],
+ [
+ ["--gecko-profile"],
+ {
+ "dest": "gecko_profile",
+ "action": "store_true",
+ "default": False,
+ "help": "Whether or not to profile the test run and save the profile results",
+ },
+ ],
+ [
+ ["--gecko-profile-interval"],
+ {
+ "dest": "gecko_profile_interval",
+ "type": "int",
+ "help": "The interval between samples taken by the profiler (milliseconds)",
+ },
+ ],
+ [
+ ["--gecko-profile-entries"],
+ {
+ "dest": "gecko_profile_entries",
+ "type": "int",
+ "help": "How many samples to take with the profiler",
+ },
+ ],
+ [
+ ["--gecko-profile-features"],
+ {
+ "dest": "gecko_profile_features",
+ "type": "str",
+ "default": None,
+ "help": "The features to enable in the profiler (comma-separated)",
+ },
+ ],
+ [
+ ["--gecko-profile-threads"],
+ {
+ "dest": "gecko_profile_threads",
+ "type": "str",
+ "help": "Comma-separated list of threads to sample.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Run without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_false",
+ "dest": "fission",
+ "default": True,
+ "help": "Disable Fission (site isolation) in Gecko.",
+ },
+ ],
+ [
+ ["--project"],
+ {
+ "dest": "project",
+ "type": "str",
+ "help": "The project branch we're running tests on. Used for "
+ "disabling/skipping tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Set a browser preference. May be used multiple times.",
+ },
+ ],
+ [
+ ["--skip-preflight"],
+ {
+ "action": "store_true",
+ "dest": "skip_preflight",
+ "default": False,
+ "help": "skip preflight commands to prepare machine.",
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(Talos, self).__init__(**kwargs)
+
+ self.workdir = self.query_abs_dirs()["abs_work_dir"] # convenience
+
+ self.run_local = self.config.get("run_local")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.talos_json_url = self.config.get("talos_json_url")
+ self.talos_json = self.config.get("talos_json")
+ self.talos_json_config = self.config.get("talos_json_config")
+ self.repo_path = self.config.get("repo_path")
+ self.obj_path = self.config.get("obj_path")
+ self.tests = None
+ extra_opts = self.config.get("talos_extra_options", [])
+ self.gecko_profile = (
+ self.config.get("gecko_profile") or "--gecko-profile" in extra_opts
+ )
+ for setting in GeckoProfilerSettings:
+ value = self.config.get(setting)
+ arg = "--" + setting.replace("_", "-")
+ if value is None:
+ try:
+ value = extra_opts[extra_opts.index(arg) + 1]
+ except ValueError:
+ pass # Not found
+ if value is not None:
+ setattr(self, setting, value)
+ if not self.gecko_profile:
+ self.warning("enabling Gecko profiler for %s setting!" % setting)
+ self.gecko_profile = True
+ self.pagesets_name = None
+ self.benchmark_zip = None
+ self.webextensions_zip = None
+
+ # We accept some configuration options from the try commit message in the format
+ # mozharness: <options>
+ # Example try commit message:
+ # mozharness: --gecko-profile try: <stuff>
+ def query_gecko_profile_options(self):
+ gecko_results = []
+ # finally, if gecko_profile is set, we add that to the talos options
+ if self.gecko_profile:
+ gecko_results.append("--gecko-profile")
+ for setting in GeckoProfilerSettings:
+ value = getattr(self, setting, None)
+ if value:
+ arg = "--" + setting.replace("_", "-")
+ gecko_results.extend([arg, str(value)])
+ return gecko_results
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Talos, self).query_abs_dirs()
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ abs_dirs["abs_test_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests"
+ )
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def query_talos_json_config(self):
+ """Return the talos json config."""
+ if self.talos_json_config:
+ return self.talos_json_config
+ if not self.talos_json:
+ self.talos_json = os.path.join(self.talos_path, "talos.json")
+ self.talos_json_config = parse_config_file(self.talos_json)
+ self.info(pprint.pformat(self.talos_json_config))
+ return self.talos_json_config
+
+ def make_talos_domain(self, host):
+ return host + "-talos"
+
+ def split_path(self, path):
+ result = []
+ while True:
+ path, folder = os.path.split(path)
+ if folder:
+ result.append(folder)
+ continue
+ elif path:
+ result.append(path)
+ break
+
+ result.reverse()
+ return result
+
+ def merge_paths(self, lhs, rhs):
+ backtracks = 0
+ for subdir in rhs:
+ if subdir == "..":
+ backtracks += 1
+ else:
+ break
+ return lhs[:-backtracks] + rhs[backtracks:]
+
+ def replace_relative_iframe_paths(self, directory, filename):
+ """This will find iframes with relative paths and replace them with
+ absolute paths containing domains derived from the original source's
+ domain. This helps us better simulate real-world cases for fission
+ """
+ if not filename.endswith(".html"):
+ return
+
+ directory_pieces = self.split_path(directory)
+ while directory_pieces and directory_pieces[0] != "fis":
+ directory_pieces = directory_pieces[1:]
+ path = os.path.join(directory, filename)
+
+ # XXX: ugh, is there a better way to account for multiple encodings than just
+ # trying each of them?
+ encodings = ["utf-8", "latin-1"]
+ iframe_pattern = re.compile(r'(iframe.*")(\.\./.*\.html)"')
+ for encoding in encodings:
+ try:
+ with io.open(path, "r", encoding=encoding) as f:
+ content = f.read()
+
+ def replace_iframe_src(match):
+ src = match.group(2)
+ split = self.split_path(src)
+ merged = self.merge_paths(directory_pieces, split)
+ host = merged[3]
+ site_origin_hash = self.make_talos_domain(host)
+ new_url = 'http://%s/%s"' % (
+ site_origin_hash,
+ "/".join(merged), # pylint --py3k: W1649
+ )
+ self.info(
+ "Replacing %s with %s in iframe inside %s"
+ % (match.group(2), new_url, path)
+ )
+ return match.group(1) + new_url
+
+ content = re.sub(iframe_pattern, replace_iframe_src, content)
+ with io.open(path, "w", encoding=encoding) as f:
+ f.write(content)
+ break
+ except UnicodeDecodeError:
+ pass
+
+ def query_pagesets_name(self):
+ """Certain suites require external pagesets to be downloaded and
+ extracted.
+ """
+ if self.pagesets_name:
+ return self.pagesets_name
+ if self.query_talos_json_config() and self.suite is not None:
+ self.pagesets_name = self.talos_json_config["suites"][self.suite].get(
+ "pagesets_name"
+ )
+ self.pagesets_name_manifest = "tp5n-pageset.manifest"
+ return self.pagesets_name
+
+ def query_benchmark_zip(self):
+ """Certain suites require external benchmarks to be downloaded and
+ extracted.
+ """
+ if self.benchmark_zip:
+ return self.benchmark_zip
+ if self.query_talos_json_config() and self.suite is not None:
+ self.benchmark_zip = self.talos_json_config["suites"][self.suite].get(
+ "benchmark_zip"
+ )
+ self.benchmark_zip_manifest = "jetstream-benchmark.manifest"
+ return self.benchmark_zip
+
+ def query_webextensions_zip(self):
+ """Certain suites require external WebExtension sets to be downloaded and
+ extracted.
+ """
+ if self.webextensions_zip:
+ return self.webextensions_zip
+ if self.query_talos_json_config() and self.suite is not None:
+ self.webextensions_zip = self.talos_json_config["suites"][self.suite].get(
+ "webextensions_zip"
+ )
+ self.webextensions_zip_manifest = "webextensions.manifest"
+ return self.webextensions_zip
+
+ def get_suite_from_test(self):
+ """Retrieve the talos suite name from a given talos test name."""
+ # running locally, single test name provided instead of suite; go through tests and
+ # find suite name
+ suite_name = None
+ if self.query_talos_json_config():
+ if "-a" in self.config["talos_extra_options"]:
+ test_name_index = self.config["talos_extra_options"].index("-a") + 1
+ if "--activeTests" in self.config["talos_extra_options"]:
+ test_name_index = (
+ self.config["talos_extra_options"].index("--activeTests") + 1
+ )
+ if test_name_index < len(self.config["talos_extra_options"]):
+ test_name = self.config["talos_extra_options"][test_name_index]
+ for talos_suite in self.talos_json_config["suites"]:
+ if test_name in self.talos_json_config["suites"][talos_suite].get(
+ "tests"
+ ):
+ suite_name = talos_suite
+ if not suite_name:
+ # no suite found to contain the specified test, error out
+ self.fatal("Test name is missing or invalid")
+ else:
+ self.fatal("Talos json config not found, cannot verify suite")
+ return suite_name
+
+ def query_suite_extra_prefs(self):
+ if self.query_talos_json_config() and self.suite is not None:
+ return self.talos_json_config["suites"][self.suite].get("extra_prefs", [])
+
+ return []
+
+ def validate_suite(self):
+ """Ensure suite name is a valid talos suite."""
+ if self.query_talos_json_config() and self.suite is not None:
+ if self.suite not in self.talos_json_config.get("suites"):
+ self.fatal(
+ "Suite '%s' is not valid (not found in talos json config)"
+ % self.suite
+ )
+
+ def talos_options(self, args=None, **kw):
+ """return options to talos"""
+ # binary path
+ binary_path = self.binary_path or self.config.get("binary_path")
+ if not binary_path:
+ msg = """Talos requires a path to the binary. You can specify binary_path or add
+ download-and-extract to your action list."""
+ self.fatal(msg)
+
+ # talos options
+ options = []
+ # talos can't gather data if the process name ends with '.exe'
+ if binary_path.endswith(".exe"):
+ binary_path = binary_path[:-4]
+ # options overwritten from **kw
+ kw_options = {"executablePath": binary_path}
+ if "suite" in self.config:
+ kw_options["suite"] = self.config["suite"]
+ if self.config.get("title"):
+ kw_options["title"] = self.config["title"]
+ if self.symbols_path:
+ kw_options["symbolsPath"] = self.symbols_path
+ if self.config.get("project", None):
+ kw_options["project"] = self.config["project"]
+
+ kw_options.update(kw)
+ # talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
+ tests = kw_options.get("activeTests")
+ if tests and not isinstance(tests, six.string_types):
+ tests = ":".join(tests) # Talos expects this format
+ kw_options["activeTests"] = tests
+ for key, value in kw_options.items():
+ options.extend(["--%s" % key, value])
+ # configure profiling options
+ options.extend(self.query_gecko_profile_options())
+ # extra arguments
+ if args is not None:
+ options += args
+ if "talos_extra_options" in self.config:
+ options += self.config["talos_extra_options"]
+ if self.config.get("code_coverage", False):
+ options.extend(["--code-coverage"])
+
+ # Add extra_prefs defined by individual test suites in talos.json
+ extra_prefs = self.query_suite_extra_prefs()
+ # Add extra_prefs from the configuration
+ if self.config["extra_prefs"]:
+ extra_prefs.extend(self.config["extra_prefs"])
+
+ options.extend(["--setpref={}".format(p) for p in extra_prefs])
+
+ # disabling fission can come from the --disable-fission cmd line argument; or in CI
+ # it comes from a taskcluster transform which adds a --setpref for fission.autostart
+ if (not self.config["fission"]) or "fission.autostart=false" in self.config[
+ "extra_prefs"
+ ]:
+ options.extend(["--disable-fission"])
+
+ return options
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.talos_path = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"], "talos"
+ )
+
+ # need to determine if talos pageset is required to be downloaded
+ if self.config.get("run_local") and "talos_extra_options" in self.config:
+ # talos initiated locally, get and verify test/suite from cmd line
+ self.talos_path = os.path.dirname(self.talos_json)
+ if (
+ "-a" in self.config["talos_extra_options"]
+ or "--activeTests" in self.config["talos_extra_options"]
+ ):
+ # test name (-a or --activeTests) specified, find out what suite it is a part of
+ self.suite = self.get_suite_from_test()
+ elif "--suite" in self.config["talos_extra_options"]:
+ # --suite specified, get suite from cmd line and ensure is valid
+ suite_name_index = (
+ self.config["talos_extra_options"].index("--suite") + 1
+ )
+ if suite_name_index < len(self.config["talos_extra_options"]):
+ self.suite = self.config["talos_extra_options"][suite_name_index]
+ self.validate_suite()
+ else:
+ self.fatal("Suite name not provided")
+ else:
+ # talos initiated in production via mozharness
+ self.suite = self.config["suite"]
+
+ tooltool_artifacts = []
+ src_talos_pageset_dest = os.path.join(self.talos_path, "talos", "tests")
+ # unfortunately this path has to be short and can't be descriptive, because
+ # on Windows we tend to already push the boundaries of the max path length
+ # constraint. This will contain the tp5 pageset, but adjusted to have
+ # absolute URLs on iframes for the purposes of better modeling things for
+ # fission.
+ src_talos_pageset_multidomain_dest = os.path.join(
+ self.talos_path, "talos", "fis"
+ )
+ webextension_dest = os.path.join(self.talos_path, "talos", "webextensions")
+
+ if self.query_pagesets_name():
+ tooltool_artifacts.append(
+ {
+ "name": self.pagesets_name,
+ "manifest": self.pagesets_name_manifest,
+ "dest": src_talos_pageset_dest,
+ }
+ )
+ tooltool_artifacts.append(
+ {
+ "name": self.pagesets_name,
+ "manifest": self.pagesets_name_manifest,
+ "dest": src_talos_pageset_multidomain_dest,
+ "postprocess": self.replace_relative_iframe_paths,
+ }
+ )
+
+ if self.query_benchmark_zip():
+ tooltool_artifacts.append(
+ {
+ "name": self.benchmark_zip,
+ "manifest": self.benchmark_zip_manifest,
+ "dest": src_talos_pageset_dest,
+ }
+ )
+
+ if self.query_webextensions_zip():
+ tooltool_artifacts.append(
+ {
+ "name": self.webextensions_zip,
+ "manifest": self.webextensions_zip_manifest,
+ "dest": webextension_dest,
+ }
+ )
+
+ # now that have the suite name, check if artifact is required, if so download it
+ # the --no-download option will override this
+ for artifact in tooltool_artifacts:
+ if "--no-download" not in self.config.get("talos_extra_options", []):
+ self.info("Downloading %s with tooltool..." % artifact)
+
+ archive = os.path.join(artifact["dest"], artifact["name"])
+ output_dir_path = re.sub(r"\.zip$", "", archive)
+ if not os.path.exists(archive):
+ manifest_file = os.path.join(self.talos_path, artifact["manifest"])
+ self.tooltool_fetch(
+ manifest_file,
+ output_dir=artifact["dest"],
+ cache=self.config.get("tooltool_cache"),
+ )
+ unzip = self.query_exe("unzip")
+ unzip_cmd = [unzip, "-q", "-o", archive, "-d", artifact["dest"]]
+ self.run_command(unzip_cmd, halt_on_failure=True)
+
+ if "postprocess" in artifact:
+ for subdir, dirs, files in os.walk(output_dir_path):
+ for file in files:
+ artifact["postprocess"](subdir, file)
+ else:
+ self.info("%s already available" % artifact)
+
+ else:
+ self.info(
+ "Not downloading %s because the no-download option was specified"
+ % artifact
+ )
+
+ # if running webkit tests locally, need to copy webkit source into talos/tests
+ if self.config.get("run_local") and (
+ "stylebench" in self.suite or "motionmark" in self.suite
+ ):
+ self.get_webkit_source()
+
+ def get_webkit_source(self):
+ # in production the build system auto copies webkit source into place;
+ # but when run locally we need to do this manually, so that talos can find it
+ src = os.path.join(self.repo_path, "third_party", "webkit", "PerformanceTests")
+ dest = os.path.join(
+ self.talos_path, "talos", "tests", "webkit", "PerformanceTests"
+ )
+
+ if os.path.exists(dest):
+ shutil.rmtree(dest)
+
+ self.info("Copying webkit benchmarks from %s to %s" % (src, dest))
+ try:
+ shutil.copytree(src, dest)
+ except Exception:
+ self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
+
+ # Action methods. {{{1
+ # clobber defined in BaseScript
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ # Use in-tree wptserve for Python 3.10 compatibility
+ extract_dirs = [
+ "tools/wptserve/*",
+ "tools/wpt_third_party/pywebsocket3/*",
+ ]
+ return super(Talos, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=["common", "talos"]
+ )
+
+ def create_virtualenv(self, **kwargs):
+ """VirtualenvMixin.create_virtualenv() assuemes we're using
+ self.config['virtualenv_modules']. Since we are installing
+ talos from its source, we have to wrap that method here."""
+ # if virtualenv already exists, just add to path and don't re-install, need it
+ # in path so can import jsonschema later when validating output for perfherder
+ _virtualenv_path = self.config.get("virtualenv_path")
+
+ _python_interp = self.query_exe("python")
+ if "win" in self.platform_name() and os.path.exists(_python_interp):
+ multiprocessing.set_executable(_python_interp)
+
+ if self.run_local and os.path.exists(_virtualenv_path):
+ self.info("Virtualenv already exists, skipping creation")
+
+ if "win" in self.platform_name():
+ _path = os.path.join(_virtualenv_path, "Lib", "site-packages")
+ else:
+ _path = os.path.join(
+ _virtualenv_path,
+ "lib",
+ os.path.basename(_python_interp),
+ "site-packages",
+ )
+
+ sys.path.append(_path)
+ return
+
+ # virtualenv doesn't already exist so create it
+ # install mozbase first, so we use in-tree versions
+ # Additionally, decide where to pull talos requirements from.
+ if not self.run_local:
+ mozbase_requirements = os.path.join(
+ self.query_abs_dirs()["abs_test_install_dir"],
+ "config",
+ "mozbase_requirements.txt",
+ )
+ talos_requirements = os.path.join(self.talos_path, "requirements.txt")
+ else:
+ mozbase_requirements = os.path.join(
+ os.path.dirname(self.talos_path),
+ "config",
+ "mozbase_source_requirements.txt",
+ )
+ talos_requirements = os.path.join(
+ self.talos_path, "source_requirements.txt"
+ )
+ self.register_virtualenv_module(
+ requirements=[mozbase_requirements],
+ two_pass=True,
+ editable=True,
+ )
+ super(Talos, self).create_virtualenv()
+ # talos in harness requires what else is
+ # listed in talos requirements.txt file.
+ self.install_module(requirements=[talos_requirements])
+
+ def _validate_treeherder_data(self, parser):
+ # late import is required, because install is done in create_virtualenv
+ import jsonschema
+
+ if len(parser.found_perf_data) != 1:
+ self.critical(
+ "PERFHERDER_DATA was seen %d times, expected 1."
+ % len(parser.found_perf_data)
+ )
+ parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
+ return
+
+ schema_path = os.path.join(
+ external_tools_path, "performance-artifact-schema.json"
+ )
+ self.info("Validating PERFHERDER_DATA against %s" % schema_path)
+ try:
+ with open(schema_path) as f:
+ schema = json.load(f)
+ data = json.loads(parser.found_perf_data[0])
+ jsonschema.validate(data, schema)
+ except Exception:
+ self.exception("Error while validating PERFHERDER_DATA")
+ parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
+
+ def _artifact_perf_data(self, parser, dest):
+ src = os.path.join(self.query_abs_dirs()["abs_work_dir"], "local.json")
+ try:
+ shutil.copyfile(src, dest)
+ except Exception:
+ self.critical("Error copying results %s to upload dir %s" % (src, dest))
+ parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
+
+ def run_tests(self, args=None, **kw):
+ """run Talos tests"""
+
+ # get talos options
+ options = self.talos_options(args=args, **kw)
+
+ # XXX temporary python version check
+ python = self.query_python_path()
+ self.run_command([python, "--version"])
+ parser = TalosOutputParser(
+ config=self.config, log_obj=self.log_obj, error_list=TalosErrorList
+ )
+ env = {}
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.run_local:
+ env["MINIDUMP_STACKWALK"] = self.query_minidump_stackwalk()
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ env = self.query_env(partial_env=env, log_level=INFO)
+ # adjust PYTHONPATH to be able to use talos as a python package
+ if "PYTHONPATH" in env:
+ env["PYTHONPATH"] = self.talos_path + os.pathsep + env["PYTHONPATH"]
+ else:
+ env["PYTHONPATH"] = self.talos_path
+
+ if self.repo_path is not None:
+ env["MOZ_DEVELOPER_REPO_DIR"] = self.repo_path
+ if self.obj_path is not None:
+ env["MOZ_DEVELOPER_OBJ_DIR"] = self.obj_path
+
+ # TODO: consider getting rid of this as we should be default to stylo now
+ env["STYLO_FORCE_ENABLED"] = "1"
+
+ # sets a timeout for how long talos should run without output
+ output_timeout = self.config.get("talos_output_timeout", 3600)
+ # run talos tests
+ run_tests = os.path.join(self.talos_path, "talos", "run_tests.py")
+
+ mozlog_opts = ["--log-tbpl-level=debug"]
+ if not self.run_local and "suite" in self.config:
+ fname_pattern = "%s_%%s.log" % self.config["suite"]
+ mozlog_opts.append(
+ "--log-errorsummary=%s"
+ % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "errorsummary")
+ )
+ mozlog_opts.append(
+ "--log-raw=%s"
+ % os.path.join(env["MOZ_UPLOAD_DIR"], fname_pattern % "raw")
+ )
+
+ def launch_in_debug_mode(cmdline):
+ cmdline = set(cmdline)
+ debug_opts = {"--debug", "--debugger", "--debugger_args"}
+
+ return bool(debug_opts.intersection(cmdline))
+
+ command = [python, run_tests] + options + mozlog_opts
+ if launch_in_debug_mode(command):
+ talos_process = subprocess.Popen(
+ command, cwd=self.workdir, env=env, bufsize=0
+ )
+ talos_process.wait()
+ else:
+ self.return_code = self.run_command(
+ command,
+ cwd=self.workdir,
+ output_timeout=output_timeout,
+ output_parser=parser,
+ env=env,
+ )
+ if parser.minidump_output:
+ self.info("Looking at the minidump files for debugging purposes...")
+ for item in parser.minidump_output:
+ self.run_command(["ls", "-l", item])
+
+ if self.return_code not in [0]:
+ # update the worst log level and tbpl status
+ log_level = ERROR
+ tbpl_level = TBPL_FAILURE
+ if self.return_code == 1:
+ log_level = WARNING
+ tbpl_level = TBPL_WARNING
+ if self.return_code == 4:
+ log_level = WARNING
+ tbpl_level = TBPL_RETRY
+
+ parser.update_worst_log_and_tbpl_levels(log_level, tbpl_level)
+ elif "--no-upload-results" not in options:
+ if not self.gecko_profile:
+ self._validate_treeherder_data(parser)
+ if not self.run_local:
+ # copy results to upload dir so they are included as an artifact
+ dest = os.path.join(env["MOZ_UPLOAD_DIR"], "perfherder-data.json")
+ self._artifact_perf_data(parser, dest)
+
+ self.record_status(parser.worst_tbpl_status, level=parser.worst_log_level)
diff --git a/testing/mozharness/mozharness/mozilla/testing/testbase.py b/testing/mozharness/mozharness/mozilla/testing/testbase.py
new file mode 100755
index 0000000000..18943a2028
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/testbase.py
@@ -0,0 +1,772 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import json
+import os
+import platform
+import ssl
+
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import FATAL, WARNING
+from mozharness.base.python import (
+ ResourceMonitoringMixin,
+ VirtualenvMixin,
+ virtualenv_config_options,
+)
+from mozharness.lib.python.authentication import get_credentials
+from mozharness.mozilla.automation import TBPL_WARNING, AutomationMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.try_tools import TryToolsMixin, try_config_options
+from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
+from mozharness.mozilla.testing.verify_tools import (
+ VerifyToolsMixin,
+ verify_config_options,
+)
+from mozharness.mozilla.tooltool import TooltoolMixin
+from six.moves import urllib
+from six.moves.urllib.parse import ParseResult, urlparse
+
+INSTALLER_SUFFIXES = (
+ ".apk", # Android
+ ".tar.bz2",
+ ".tar.gz", # Linux
+ ".dmg", # Mac
+ ".installer-stub.exe",
+ ".installer.exe",
+ ".exe",
+ ".zip", # Windows
+)
+
+# https://searchfox.org/mozilla-central/source/testing/config/tooltool-manifests
+TOOLTOOL_PLATFORM_DIR = {
+ "linux": "linux32",
+ "linux64": "linux64",
+ "win32": "win32",
+ "win64": "win32",
+ "macosx": "macosx64",
+}
+
+
+testing_config_options = (
+ [
+ [
+ ["--installer-url"],
+ {
+ "action": "store",
+ "dest": "installer_url",
+ "default": None,
+ "help": "URL to the installer to install",
+ },
+ ],
+ [
+ ["--installer-path"],
+ {
+ "action": "store",
+ "dest": "installer_path",
+ "default": None,
+ "help": "Path to the installer to install. "
+ "This is set automatically if run with --download-and-extract.",
+ },
+ ],
+ [
+ ["--binary-path"],
+ {
+ "action": "store",
+ "dest": "binary_path",
+ "default": None,
+ "help": "Path to installed binary. This is set automatically if run with --install.", # NOQA: E501
+ },
+ ],
+ [
+ ["--exe-suffix"],
+ {
+ "action": "store",
+ "dest": "exe_suffix",
+ "default": None,
+ "help": "Executable suffix for binaries on this platform",
+ },
+ ],
+ [
+ ["--test-url"],
+ {
+ "action": "store",
+ "dest": "test_url",
+ "default": None,
+ "help": "URL to the zip file containing the actual tests",
+ },
+ ],
+ [
+ ["--test-packages-url"],
+ {
+ "action": "store",
+ "dest": "test_packages_url",
+ "default": None,
+ "help": "URL to a json file describing which tests archives to download",
+ },
+ ],
+ [
+ ["--jsshell-url"],
+ {
+ "action": "store",
+ "dest": "jsshell_url",
+ "default": None,
+ "help": "URL to the jsshell to install",
+ },
+ ],
+ [
+ ["--download-symbols"],
+ {
+ "action": "store",
+ "dest": "download_symbols",
+ "type": "choice",
+ "choices": ["ondemand", "true"],
+ "help": "Download and extract crash reporter symbols.",
+ },
+ ],
+ ]
+ + copy.deepcopy(virtualenv_config_options)
+ + copy.deepcopy(try_config_options)
+ + copy.deepcopy(verify_config_options)
+)
+
+
+# TestingMixin {{{1
+class TestingMixin(
+ VirtualenvMixin,
+ AutomationMixin,
+ ResourceMonitoringMixin,
+ TooltoolMixin,
+ TryToolsMixin,
+ VerifyToolsMixin,
+):
+ """
+ The steps to identify + download the proper bits for [browser] unit
+ tests and Talos.
+ """
+
+ installer_url = None
+ installer_path = None
+ binary_path = None
+ test_url = None
+ test_packages_url = None
+ symbols_url = None
+ symbols_path = None
+ jsshell_url = None
+ minidump_stackwalk_path = None
+ ssl_context = None
+
+ def query_build_dir_url(self, file_name):
+ """
+ Resolve a file name to a potential url in the build upload directory where
+ that file can be found.
+ """
+ if self.test_packages_url:
+ reference_url = self.test_packages_url
+ elif self.installer_url:
+ reference_url = self.installer_url
+ else:
+ self.fatal(
+ "Can't figure out build directory urls without an installer_url "
+ "or test_packages_url!"
+ )
+
+ reference_url = urllib.parse.unquote(reference_url)
+ parts = list(urlparse(reference_url))
+
+ last_slash = parts[2].rfind("/")
+ parts[2] = "/".join([parts[2][:last_slash], file_name])
+
+ url = ParseResult(*parts).geturl()
+
+ return url
+
+ def query_prefixed_build_dir_url(self, suffix):
+ """Resolve a file name prefixed with platform and build details to a potential url
+ in the build upload directory where that file can be found.
+ """
+ if self.test_packages_url:
+ reference_suffixes = [".test_packages.json"]
+ reference_url = self.test_packages_url
+ elif self.installer_url:
+ reference_suffixes = INSTALLER_SUFFIXES
+ reference_url = self.installer_url
+ else:
+ self.fatal(
+ "Can't figure out build directory urls without an installer_url "
+ "or test_packages_url!"
+ )
+
+ url = None
+ for reference_suffix in reference_suffixes:
+ if reference_url.endswith(reference_suffix):
+ url = reference_url[: -len(reference_suffix)] + suffix
+ break
+
+ return url
+
+ def query_symbols_url(self, raise_on_failure=False):
+ if self.symbols_url:
+ return self.symbols_url
+
+ elif self.installer_url:
+ symbols_url = self.query_prefixed_build_dir_url(
+ ".crashreporter-symbols.zip"
+ )
+
+ # Check if the URL exists. If not, use none to allow mozcrash to auto-check for symbols
+ try:
+ if symbols_url:
+ self._urlopen(symbols_url, timeout=120)
+ self.symbols_url = symbols_url
+ except Exception as ex:
+ self.warning(
+ "Cannot open symbols url %s (installer url: %s): %s"
+ % (symbols_url, self.installer_url, ex)
+ )
+ if raise_on_failure:
+ raise
+
+ # If no symbols URL can be determined let minidump-stackwalk query the symbols.
+ # As of now this only works for Nightly and release builds.
+ if not self.symbols_url:
+ self.warning(
+ "No symbols_url found. Let minidump-stackwalk query for symbols."
+ )
+
+ return self.symbols_url
+
+ def _pre_config_lock(self, rw_config):
+ for i, (target_file, target_dict) in enumerate(
+ rw_config.all_cfg_files_and_dicts
+ ):
+ if "developer_config" in target_file:
+ self._developer_mode_changes(rw_config)
+
+ def _developer_mode_changes(self, rw_config):
+ """This function is called when you append the config called
+ developer_config.py. This allows you to run a job
+ outside of the Release Engineering infrastructure.
+
+ What this functions accomplishes is:
+ * --installer-url is set
+ * --test-url is set if needed
+ * every url is substituted by another external to the
+ Release Engineering network
+ """
+ c = self.config
+ orig_config = copy.deepcopy(c)
+ self.actions = tuple(rw_config.actions)
+
+ def _replace_url(url, changes):
+ for from_, to_ in changes:
+ if url.startswith(from_):
+ new_url = url.replace(from_, to_)
+ self.info("Replacing url %s -> %s" % (url, new_url))
+ return new_url
+ return url
+
+ if c.get("installer_url") is None:
+ self.exception("You must use --installer-url with developer_config.py")
+ if c.get("require_test_zip"):
+ if not c.get("test_url") and not c.get("test_packages_url"):
+ self.exception(
+ "You must use --test-url or --test-packages-url with "
+ "developer_config.py"
+ )
+
+ c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
+ if c.get("test_url"):
+ c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
+ if c.get("test_packages_url"):
+ c["test_packages_url"] = _replace_url(
+ c["test_packages_url"], c["replace_urls"]
+ )
+
+ for key, value in self.config.items():
+ if type(value) == str and value.startswith("http"):
+ self.config[key] = _replace_url(value, c["replace_urls"])
+
+ # Any changes to c means that we need credentials
+ if not c == orig_config:
+ get_credentials()
+
+ def _urlopen(self, url, **kwargs):
+ """
+ This function helps dealing with downloading files while outside
+ of the releng network.
+ """
+ # Code based on http://code.activestate.com/recipes/305288-http-basic-authentication
+ def _urlopen_basic_auth(url, **kwargs):
+ self.info("We want to download this file %s" % url)
+ if not hasattr(self, "https_username"):
+ self.info(
+ "NOTICE: Files downloaded from outside of "
+ "Release Engineering network require LDAP "
+ "credentials."
+ )
+
+ self.https_username, self.https_password = get_credentials()
+ # This creates a password manager
+ passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
+ # Because we have put None at the start it will use this username/password
+ # combination from here on
+ passman.add_password(None, url, self.https_username, self.https_password)
+ authhandler = urllib.request.HTTPBasicAuthHandler(passman)
+
+ return urllib.request.build_opener(authhandler).open(url, **kwargs)
+
+ # If we have the developer_run flag enabled then we will switch
+ # URLs to the right place and enable http authentication
+ if "developer_config.py" in self.config["config_files"]:
+ return _urlopen_basic_auth(url, **kwargs)
+ else:
+ # windows certificates need to be refreshed (https://bugs.python.org/issue36011)
+ if self.platform_name() in ("win64",) and platform.architecture()[0] in (
+ "x64",
+ ):
+ if self.ssl_context is None:
+ self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+ self.ssl_context.load_default_certs()
+ return urllib.request.urlopen(url, context=self.ssl_context, **kwargs)
+ else:
+ return urllib.request.urlopen(url, **kwargs)
+
+ def _query_binary_version(self, regex, cmd):
+ output = self.get_output_from_command(cmd, silent=False)
+ return regex.search(output).group(0)
+
+ def preflight_download_and_extract(self):
+ message = ""
+ if not self.installer_url:
+ message += """installer_url isn't set!
+
+You can set this by specifying --installer-url URL
+"""
+ if (
+ self.config.get("require_test_zip")
+ and not self.test_url
+ and not self.test_packages_url
+ ):
+ message += """test_url isn't set!
+
+You can set this by specifying --test-url URL
+"""
+ if message:
+ self.fatal(message + "Can't run download-and-extract... exiting")
+
+ def _read_packages_manifest(self):
+ dirs = self.query_abs_dirs()
+ source = self.download_file(
+ self.test_packages_url, parent_dir=dirs["abs_work_dir"], error_level=FATAL
+ )
+
+ with self.opened(os.path.realpath(source)) as (fh, err):
+ package_requirements = json.load(fh)
+ if not package_requirements or err:
+ self.fatal(
+ "There was an error reading test package requirements from %s "
+ "requirements: `%s` - error: `%s`"
+ % (source, package_requirements or "None", err or "No error")
+ )
+ return package_requirements
+
+ def _download_test_packages(self, suite_categories, extract_dirs):
+ # Some platforms define more suite categories/names than others.
+ # This is a difference in the convention of the configs more than
+ # to how these tests are run, so we pave over these differences here.
+ aliases = {
+ "mochitest-chrome": "mochitest",
+ "mochitest-media": "mochitest",
+ "mochitest-plain": "mochitest",
+ "mochitest-plain-gpu": "mochitest",
+ "mochitest-webgl1-core": "mochitest",
+ "mochitest-webgl1-ext": "mochitest",
+ "mochitest-webgl2-core": "mochitest",
+ "mochitest-webgl2-ext": "mochitest",
+ "mochitest-webgl2-deqp": "mochitest",
+ "mochitest-webgpu": "mochitest",
+ "geckoview": "mochitest",
+ "geckoview-junit": "mochitest",
+ "reftest-qr": "reftest",
+ "crashtest": "reftest",
+ "crashtest-qr": "reftest",
+ "reftest-debug": "reftest",
+ "crashtest-debug": "reftest",
+ }
+ suite_categories = [aliases.get(name, name) for name in suite_categories]
+
+ dirs = self.query_abs_dirs()
+ test_install_dir = dirs.get(
+ "abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests")
+ )
+ self.mkdir_p(test_install_dir)
+ package_requirements = self._read_packages_manifest()
+ target_packages = []
+ c = self.config
+ for category in suite_categories:
+ specified_suites = c.get("specified_{}_suites".format(category))
+ if specified_suites:
+ found = False
+ for specified_suite in specified_suites:
+ if specified_suite in package_requirements:
+ target_packages.extend(package_requirements[specified_suite])
+ found = True
+ if found:
+ continue
+
+ if category in package_requirements:
+ target_packages.extend(package_requirements[category])
+ else:
+ # If we don't harness specific requirements, assume the common zip
+ # has everything we need to run tests for this suite.
+ target_packages.extend(package_requirements["common"])
+
+ # eliminate duplicates -- no need to download anything twice
+ target_packages = list(set(target_packages))
+ self.info(
+ "Downloading packages: %s for test suite categories: %s"
+ % (target_packages, suite_categories)
+ )
+ for file_name in target_packages:
+ target_dir = test_install_dir
+ unpack_dirs = extract_dirs
+
+ if "common.tests" in file_name and isinstance(unpack_dirs, list):
+ # Ensure that the following files are always getting extracted
+ required_files = [
+ "mach",
+ "mozinfo.json",
+ ]
+ for req_file in required_files:
+ if req_file not in unpack_dirs:
+ self.info(
+ "Adding '{}' for extraction from common.tests archive".format(
+ req_file
+ )
+ )
+ unpack_dirs.append(req_file)
+
+ if "jsshell-" in file_name or file_name == "target.jsshell.zip":
+ self.info("Special-casing the jsshell zip file")
+ unpack_dirs = None
+ target_dir = dirs["abs_test_bin_dir"]
+
+ if "web-platform" in file_name:
+ self.info("Extracting everything from web-platform archive")
+ unpack_dirs = None
+
+ url = self.query_build_dir_url(file_name)
+ self.download_unpack(url, target_dir, extract_dirs=unpack_dirs)
+
+ def _download_test_zip(self, extract_dirs=None):
+ dirs = self.query_abs_dirs()
+ test_install_dir = dirs.get(
+ "abs_test_install_dir", os.path.join(dirs["abs_work_dir"], "tests")
+ )
+ self.download_unpack(self.test_url, test_install_dir, extract_dirs=extract_dirs)
+
+ def structured_output(self, suite_category):
+ """Defines whether structured logging is in use in this configuration. This
+ may need to be replaced with data from a different config at the resolution
+ of bug 1070041 and related bugs.
+ """
+ return (
+ "structured_suites" in self.config
+ and suite_category in self.config["structured_suites"]
+ )
+
+ def get_test_output_parser(
+ self,
+ suite_category,
+ strict=False,
+ fallback_parser_class=DesktopUnittestOutputParser,
+ **kwargs
+ ):
+ """Derive and return an appropriate output parser, either the structured
+ output parser or a fallback based on the type of logging in use as determined by
+ configuration.
+ """
+ if not self.structured_output(suite_category):
+ if fallback_parser_class is DesktopUnittestOutputParser:
+ return DesktopUnittestOutputParser(
+ suite_category=suite_category, **kwargs
+ )
+ return fallback_parser_class(**kwargs)
+ self.info("Structured output parser in use for %s." % suite_category)
+ return StructuredOutputParser(
+ suite_category=suite_category, strict=strict, **kwargs
+ )
+
+ def _download_installer(self):
+ file_name = None
+ if self.installer_path:
+ file_name = self.installer_path
+ dirs = self.query_abs_dirs()
+ source = self.download_file(
+ self.installer_url,
+ file_name=file_name,
+ parent_dir=dirs["abs_work_dir"],
+ error_level=FATAL,
+ )
+ self.installer_path = os.path.realpath(source)
+
+ def _download_and_extract_symbols(self):
+ dirs = self.query_abs_dirs()
+ if self.config.get("download_symbols") == "ondemand":
+ self.symbols_url = self.retry(
+ action=self.query_symbols_url,
+ kwargs={"raise_on_failure": True},
+ sleeptime=10,
+ failure_status=None,
+ )
+ self.symbols_path = self.symbols_url
+ return
+
+ else:
+ # In the case for 'ondemand', we're OK to proceed without getting a hold of the
+ # symbols right this moment, however, in other cases we need to at least retry
+ # before being unable to proceed (e.g. debug tests need symbols)
+ self.symbols_url = self.retry(
+ action=self.query_symbols_url,
+ kwargs={"raise_on_failure": True},
+ sleeptime=20,
+ error_level=FATAL,
+ error_message="We can't proceed without downloading symbols.",
+ )
+ if not self.symbols_path:
+ self.symbols_path = os.path.join(dirs["abs_work_dir"], "symbols")
+
+ if self.symbols_url:
+ self.download_unpack(self.symbols_url, self.symbols_path)
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ """
+ download and extract test zip / download installer
+ """
+ # Swap plain http for https when we're downloading from ftp
+ # See bug 957502 and friends
+ from_ = "http://ftp.mozilla.org"
+ to_ = "https://ftp-ssl.mozilla.org"
+ for attr in "symbols_url", "installer_url", "test_packages_url", "test_url":
+ url = getattr(self, attr)
+ if url and url.startswith(from_):
+ new_url = url.replace(from_, to_)
+ self.info("Replacing url %s -> %s" % (url, new_url))
+ setattr(self, attr, new_url)
+
+ if "test_url" in self.config:
+ # A user has specified a test_url directly, any test_packages_url will
+ # be ignored.
+ if self.test_packages_url:
+ self.error(
+ 'Test data will be downloaded from "%s", the specified test '
+ ' package data at "%s" will be ignored.'
+ % (self.config.get("test_url"), self.test_packages_url)
+ )
+
+ self._download_test_zip(extract_dirs)
+ else:
+ if not self.test_packages_url:
+ # The caller intends to download harness specific packages, but doesn't know
+ # where the packages manifest is located. This is the case when the
+ # test package manifest isn't set as a property, which is true
+ # for some self-serve jobs and platforms using parse_make_upload.
+ self.test_packages_url = self.query_prefixed_build_dir_url(
+ ".test_packages.json"
+ )
+
+ suite_categories = suite_categories or ["common"]
+ self._download_test_packages(suite_categories, extract_dirs)
+
+ self._download_installer()
+ if self.config.get("download_symbols"):
+ self._download_and_extract_symbols()
+
+ # create_virtualenv is in VirtualenvMixin.
+
+ def preflight_install(self):
+ if not self.installer_path:
+ if self.config.get("installer_path"):
+ self.installer_path = self.config["installer_path"]
+ else:
+ self.fatal(
+ """installer_path isn't set!
+
+You can set this by:
+
+1. specifying --installer-path PATH, or
+2. running the download-and-extract action
+"""
+ )
+ if not self.is_python_package_installed("mozInstall"):
+ self.fatal(
+ """Can't call install() without mozinstall!
+Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?"""
+ )
+
+ def install_app(self, app=None, target_dir=None, installer_path=None):
+ """Dependent on mozinstall"""
+ # install the application
+ cmd = [self.query_python_path("mozinstall")]
+ if app:
+ cmd.extend(["--app", app])
+ # Remove the below when we no longer need to support mozinstall 0.3
+ self.info("Detecting whether we're running mozinstall >=1.0...")
+ output = self.get_output_from_command(cmd + ["-h"])
+ if "--source" in output:
+ cmd.append("--source")
+ # End remove
+ dirs = self.query_abs_dirs()
+ if not target_dir:
+ target_dir = dirs.get(
+ "abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application")
+ )
+ self.mkdir_p(target_dir)
+ if not installer_path:
+ installer_path = self.installer_path
+ cmd.extend([installer_path, "--destination", target_dir])
+ # TODO we'll need some error checking here
+ return self.get_output_from_command(
+ cmd, halt_on_failure=True, fatal_exit_code=3
+ )
+
+ def install(self):
+ self.binary_path = self.install_app(app=self.config.get("application"))
+ self.install_dir = os.path.dirname(self.binary_path)
+
+ def uninstall_app(self, install_dir=None):
+ """Dependent on mozinstall"""
+ # uninstall the application
+ cmd = self.query_exe(
+ "mozuninstall",
+ default=self.query_python_path("mozuninstall"),
+ return_type="list",
+ )
+ dirs = self.query_abs_dirs()
+ if not install_dir:
+ install_dir = dirs.get(
+ "abs_app_install_dir", os.path.join(dirs["abs_work_dir"], "application")
+ )
+ cmd.append(install_dir)
+ # TODO we'll need some error checking here
+ self.get_output_from_command(cmd, halt_on_failure=True, fatal_exit_code=3)
+
+ def uninstall(self):
+ self.uninstall_app()
+
+ def query_minidump_stackwalk(self, manifest=None):
+ if self.minidump_stackwalk_path:
+ return self.minidump_stackwalk_path
+
+ minidump_stackwalk_path = None
+
+ if "MOZ_FETCHES_DIR" in os.environ:
+ minidump_stackwalk_path = os.path.join(
+ os.environ["MOZ_FETCHES_DIR"],
+ "minidump-stackwalk",
+ "minidump-stackwalk",
+ )
+
+ if self.platform_name() in ("win32", "win64"):
+ minidump_stackwalk_path += ".exe"
+
+ if not minidump_stackwalk_path or not os.path.isfile(minidump_stackwalk_path):
+ self.error("minidump-stackwalk path was not fetched?")
+ # don't burn the job but we should at least turn them orange so it is caught
+ self.record_status(TBPL_WARNING, WARNING)
+ return None
+
+ self.minidump_stackwalk_path = minidump_stackwalk_path
+ return self.minidump_stackwalk_path
+
+ def query_options(self, *args, **kwargs):
+ if "str_format_values" in kwargs:
+ str_format_values = kwargs.pop("str_format_values")
+ else:
+ str_format_values = {}
+
+ arguments = []
+
+ for arg in args:
+ if arg is not None:
+ arguments.extend(argument % str_format_values for argument in arg)
+
+ return arguments
+
+ def query_tests_args(self, *args, **kwargs):
+ if "str_format_values" in kwargs:
+ str_format_values = kwargs.pop("str_format_values")
+ else:
+ str_format_values = {}
+
+ arguments = []
+
+ for arg in reversed(args):
+ if arg:
+ arguments.append("--")
+ arguments.extend(argument % str_format_values for argument in arg)
+ break
+
+ return arguments
+
+ def _run_cmd_checks(self, suites):
+ if not suites:
+ return
+ dirs = self.query_abs_dirs()
+ for suite in suites:
+ # XXX platform.architecture() may give incorrect values for some
+ # platforms like mac as excutable files may be universal
+ # files containing multiple architectures
+ # NOTE 'enabled' is only here while we have unconsolidated configs
+ if not suite["enabled"]:
+ continue
+ if suite.get("architectures"):
+ arch = platform.architecture()[0]
+ if arch not in suite["architectures"]:
+ continue
+ cmd = suite["cmd"]
+ name = suite["name"]
+ self.info(
+ "Running pre test command %(name)s with '%(cmd)s'"
+ % {"name": name, "cmd": " ".join(cmd)}
+ )
+ self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ error_list=BaseErrorList,
+ halt_on_failure=suite["halt_on_failure"],
+ fatal_exit_code=suite.get("fatal_exit_code", 3),
+ )
+
+ def preflight_run_tests(self):
+ """preflight commands for all tests"""
+ c = self.config
+ if c.get("skip_preflight"):
+ self.info("skipping preflight")
+ return
+
+ if c.get("run_cmd_checks_enabled"):
+ self._run_cmd_checks(c.get("preflight_run_cmd_suites", []))
+ elif c.get("preflight_run_cmd_suites"):
+ self.warning(
+ "Proceeding without running prerun test commands."
+ " These are often OS specific and disabling them may"
+ " result in spurious test results!"
+ )
+
+ def postflight_run_tests(self):
+ """preflight commands for all tests"""
+ c = self.config
+ if c.get("run_cmd_checks_enabled"):
+ self._run_cmd_checks(c.get("postflight_run_cmd_suites", []))
+
+ def query_abs_dirs(self):
+ abs_dirs = super(TestingMixin, self).query_abs_dirs()
+ if "MOZ_FETCHES_DIR" in os.environ:
+ abs_dirs["abs_fetches_dir"] = os.environ["MOZ_FETCHES_DIR"]
+ return abs_dirs
diff --git a/testing/mozharness/mozharness/mozilla/testing/try_tools.py b/testing/mozharness/mozharness/mozilla/testing/try_tools.py
new file mode 100644
index 0000000000..463bd1ad9c
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/try_tools.py
@@ -0,0 +1,245 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import argparse
+import os
+import re
+from collections import defaultdict
+
+import six
+from mozharness.base.script import PostScriptAction
+from mozharness.base.transfer import TransferMixin
+
+try_config_options = [
+ [
+ ["--try-message"],
+ {
+ "action": "store",
+ "dest": "try_message",
+ "default": None,
+ "help": "try syntax string to select tests to run",
+ },
+ ],
+]
+
+test_flavors = {
+ "browser-chrome": {},
+ "browser-a11y": {},
+ "browser-media": {},
+ "chrome": {},
+ "devtools-chrome": {},
+ "mochitest": {},
+ "xpcshell": {},
+ "reftest": {"path": lambda x: os.path.join("tests", "reftest", "tests", x)},
+ "crashtest": {"path": lambda x: os.path.join("tests", "reftest", "tests", x)},
+ "remote": {"path": lambda x: os.path.join("remote", "test", "browser", x)},
+ "web-platform-tests": {
+ "path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
+ },
+ "web-platform-tests-reftests": {
+ "path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
+ },
+ "web-platform-tests-wdspec": {
+ "path": lambda x: os.path.join("tests", x.split("testing" + os.path.sep)[1])
+ },
+}
+
+
+class TryToolsMixin(TransferMixin):
+ """Utility functions for an interface between try syntax and out test harnesses.
+ Requires log and script mixins."""
+
+ harness_extra_args = None
+ try_test_paths = {}
+ known_try_arguments = {
+ "--tag": (
+ {
+ "action": "append",
+ "dest": "tags",
+ "default": None,
+ },
+ (
+ "browser-chrome",
+ "browser-a11y",
+ "browser-media",
+ "chrome",
+ "devtools-chrome",
+ "marionette",
+ "mochitest",
+ "web-plaftform-tests",
+ "xpcshell",
+ ),
+ ),
+ }
+
+ def _extract_try_message(self):
+ msg = None
+ if "try_message" in self.config and self.config["try_message"]:
+ msg = self.config["try_message"]
+ elif "TRY_COMMIT_MSG" in os.environ:
+ msg = os.environ["TRY_COMMIT_MSG"]
+
+ if not msg:
+ self.warning("Try message not found.")
+ return msg
+
+ def _extract_try_args(self, msg):
+ """Returns a list of args from a try message, for parsing"""
+ if not msg:
+ return None
+ all_try_args = None
+ for line in msg.splitlines():
+ if "try: " in line:
+ # Autoland adds quotes to try strings that will confuse our
+ # args later on.
+ if line.startswith('"') and line.endswith('"'):
+ line = line[1:-1]
+ # Allow spaces inside of [filter expressions]
+ try_message = line.strip().split("try: ", 1)
+ all_try_args = re.findall(r"(?:\[.*?\]|\S)+", try_message[1])
+ break
+ if not all_try_args:
+ self.warning("Try syntax not found in: %s." % msg)
+ return all_try_args
+
+ def try_message_has_flag(self, flag, message=None):
+ """
+ Returns True if --`flag` is present in message.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--" + flag, action="store_true")
+ message = message or self._extract_try_message()
+ if not message:
+ return False
+ msg_list = self._extract_try_args(message)
+ args, _ = parser.parse_known_args(msg_list)
+ return getattr(args, flag, False)
+
+ def _is_try(self):
+ repo_path = None
+ get_branch = self.config.get("branch", repo_path)
+ if get_branch is not None:
+ on_try = "try" in get_branch or "Try" in get_branch
+ elif os.environ is not None:
+ on_try = "TRY_COMMIT_MSG" in os.environ
+ else:
+ on_try = False
+ return on_try
+
+ @PostScriptAction("download-and-extract")
+ def set_extra_try_arguments(self, action, success=None):
+ """Finds a commit message and parses it for extra arguments to pass to the test
+ harness command line and test paths used to filter manifests.
+
+ Extracting arguments from a commit message taken directly from the try_parser.
+ """
+ if not self._is_try():
+ return
+
+ msg = self._extract_try_message()
+ if not msg:
+ return
+
+ all_try_args = self._extract_try_args(msg)
+ if not all_try_args:
+ return
+
+ parser = argparse.ArgumentParser(
+ description=(
+ "Parse an additional subset of arguments passed to try syntax"
+ " and forward them to the underlying test harness command."
+ )
+ )
+
+ label_dict = {}
+
+ def label_from_val(val):
+ if val in label_dict:
+ return label_dict[val]
+ return "--%s" % val.replace("_", "-")
+
+ for label, (opts, _) in six.iteritems(self.known_try_arguments):
+ if "action" in opts and opts["action"] not in (
+ "append",
+ "store",
+ "store_true",
+ "store_false",
+ ):
+ self.fatal(
+ "Try syntax does not support passing custom or store_const "
+ "arguments to the harness process."
+ )
+ if "dest" in opts:
+ label_dict[opts["dest"]] = label
+
+ parser.add_argument(label, **opts)
+
+ parser.add_argument("--try-test-paths", nargs="*")
+ (args, _) = parser.parse_known_args(all_try_args)
+ self.try_test_paths = self._group_test_paths(args.try_test_paths)
+ del args.try_test_paths
+
+ out_args = defaultdict(list)
+ # This is a pretty hacky way to echo arguments down to the harness.
+ # Hopefully this can be improved once we have a configuration system
+ # in tree for harnesses that relies less on a command line.
+ for arg, value in six.iteritems(vars(args)):
+ if value:
+ label = label_from_val(arg)
+ _, flavors = self.known_try_arguments[label]
+
+ for f in flavors:
+ if isinstance(value, bool):
+ # A store_true or store_false argument.
+ out_args[f].append(label)
+ elif isinstance(value, list):
+ out_args[f].extend(["%s=%s" % (label, el) for el in value])
+ else:
+ out_args[f].append("%s=%s" % (label, value))
+
+ self.harness_extra_args = dict(out_args)
+
+ def _group_test_paths(self, args):
+ rv = defaultdict(list)
+
+ if args is None:
+ return rv
+
+ for item in args:
+ suite, path = item.split(":", 1)
+ rv[suite].append(path)
+ return rv
+
+ def try_args(self, flavor):
+ """Get arguments, test_list derived from try syntax to apply to a command"""
+ args = []
+ if self.harness_extra_args:
+ args = self.harness_extra_args.get(flavor, [])[:]
+
+ if self.try_test_paths.get(flavor):
+ self.info(
+ "TinderboxPrint: Tests will be run from the following "
+ "files: %s." % ",".join(self.try_test_paths[flavor])
+ )
+ args.extend(["--this-chunk=1", "--total-chunks=1"])
+
+ path_func = test_flavors[flavor].get("path", lambda x: x)
+ tests = [
+ path_func(os.path.normpath(item))
+ for item in self.try_test_paths[flavor]
+ ]
+ else:
+ tests = []
+
+ if args or tests:
+ self.info(
+ "TinderboxPrint: The following arguments were forwarded from mozharness "
+ "to the test command:\nTinderboxPrint: \t%s -- %s"
+ % (" ".join(args), " ".join(tests))
+ )
+
+ return args, tests
diff --git a/testing/mozharness/mozharness/mozilla/testing/unittest.py b/testing/mozharness/mozharness/mozilla/testing/unittest.py
new file mode 100755
index 0000000000..be144bbe1f
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/unittest.py
@@ -0,0 +1,255 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import os
+import re
+
+from mozharness.base.log import CRITICAL, ERROR, INFO, WARNING, OutputParser
+from mozharness.mozilla.automation import (
+ TBPL_FAILURE,
+ TBPL_RETRY,
+ TBPL_SUCCESS,
+ TBPL_WARNING,
+ TBPL_WORST_LEVEL_TUPLE,
+)
+from mozharness.mozilla.testing.errors import TinderBoxPrintRe
+
+SUITE_CATEGORIES = ["mochitest", "reftest", "xpcshell"]
+
+
+def tbox_print_summary(
+ pass_count, fail_count, known_fail_count=None, crashed=False, leaked=False
+):
+ emphasize_fail_text = '<em class="testfail">%s</em>'
+
+ if (
+ pass_count < 0
+ or fail_count < 0
+ or (known_fail_count is not None and known_fail_count < 0)
+ ):
+ summary = emphasize_fail_text % "T-FAIL"
+ elif (
+ pass_count == 0
+ and fail_count == 0
+ and (known_fail_count == 0 or known_fail_count is None)
+ ):
+ summary = emphasize_fail_text % "T-FAIL"
+ else:
+ str_fail_count = str(fail_count)
+ if fail_count > 0:
+ str_fail_count = emphasize_fail_text % str_fail_count
+ summary = "%d/%s" % (pass_count, str_fail_count)
+ if known_fail_count is not None:
+ summary += "/%d" % known_fail_count
+ # Format the crash status.
+ if crashed:
+ summary += "&nbsp;%s" % emphasize_fail_text % "CRASH"
+ # Format the leak status.
+ if leaked is not False:
+ summary += "&nbsp;%s" % emphasize_fail_text % ((leaked and "LEAK") or "L-FAIL")
+ return summary
+
+
+class TestSummaryOutputParserHelper(OutputParser):
+ def __init__(self, regex=re.compile(r"(passed|failed|todo): (\d+)"), **kwargs):
+ self.regex = regex
+ self.failed = 0
+ self.passed = 0
+ self.todo = 0
+ self.last_line = None
+ self.tbpl_status = TBPL_SUCCESS
+ self.worst_log_level = INFO
+ super(TestSummaryOutputParserHelper, self).__init__(**kwargs)
+
+ def parse_single_line(self, line):
+ super(TestSummaryOutputParserHelper, self).parse_single_line(line)
+ self.last_line = line
+ m = self.regex.search(line)
+ if m:
+ try:
+ setattr(self, m.group(1), int(m.group(2)))
+ except ValueError:
+ # ignore bad values
+ pass
+
+ def evaluate_parser(self, return_code, success_codes=None, previous_summary=None):
+ # TestSummaryOutputParserHelper is for Marionette, which doesn't support test-verify
+ # When it does we can reset the internal state variables as needed
+ joined_summary = previous_summary
+
+ if return_code == 0 and self.passed > 0 and self.failed == 0:
+ self.tbpl_status = TBPL_SUCCESS
+ elif return_code == 10 and self.failed > 0:
+ self.tbpl_status = TBPL_WARNING
+ else:
+ self.tbpl_status = TBPL_FAILURE
+ self.worst_log_level = ERROR
+
+ return (self.tbpl_status, self.worst_log_level, joined_summary)
+
+ def print_summary(self, suite_name):
+ # generate the TinderboxPrint line for TBPL
+ emphasize_fail_text = '<em class="testfail">%s</em>'
+ failed = "0"
+ if self.passed == 0 and self.failed == 0:
+ self.tsummary = emphasize_fail_text % "T-FAIL"
+ else:
+ if self.failed > 0:
+ failed = emphasize_fail_text % str(self.failed)
+ self.tsummary = "%d/%s/%d" % (self.passed, failed, self.todo)
+
+ self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, self.tsummary))
+
+ def append_tinderboxprint_line(self, suite_name):
+ self.print_summary(suite_name)
+
+
+class DesktopUnittestOutputParser(OutputParser):
+ """
+ A class that extends OutputParser such that it can parse the number of
+ passed/failed/todo tests from the output.
+ """
+
+ def __init__(self, suite_category, **kwargs):
+ # worst_log_level defined already in DesktopUnittestOutputParser
+ # but is here to make pylint happy
+ self.worst_log_level = INFO
+ super(DesktopUnittestOutputParser, self).__init__(**kwargs)
+ self.summary_suite_re = TinderBoxPrintRe.get("%s_summary" % suite_category, {})
+ self.harness_error_re = TinderBoxPrintRe["harness_error"]["minimum_regex"]
+ self.full_harness_error_re = TinderBoxPrintRe["harness_error"]["full_regex"]
+ self.harness_retry_re = TinderBoxPrintRe["harness_error"]["retry_regex"]
+ self.fail_count = -1
+ self.pass_count = -1
+ # known_fail_count does not exist for some suites
+ self.known_fail_count = self.summary_suite_re.get("known_fail_group") and -1
+ self.crashed, self.leaked = False, False
+ self.tbpl_status = TBPL_SUCCESS
+
+ def parse_single_line(self, line):
+ if self.summary_suite_re:
+ summary_m = self.summary_suite_re["regex"].match(line) # pass/fail/todo
+ if summary_m:
+ message = " %s" % line
+ log_level = INFO
+ # remove all the none values in groups() so this will work
+ # with all suites including mochitest browser-chrome
+ summary_match_list = [
+ group for group in summary_m.groups() if group is not None
+ ]
+ r = summary_match_list[0]
+ if self.summary_suite_re["pass_group"] in r:
+ if len(summary_match_list) > 1:
+ self.pass_count = int(summary_match_list[-1])
+ else:
+ # This handles suites that either pass or report
+ # number of failures. We need to set both
+ # pass and fail count in the pass case.
+ self.pass_count = 1
+ self.fail_count = 0
+ elif self.summary_suite_re["fail_group"] in r:
+ self.fail_count = int(summary_match_list[-1])
+ if self.fail_count > 0:
+ message += "\n One or more unittests failed."
+ log_level = WARNING
+ # If self.summary_suite_re['known_fail_group'] == None,
+ # then r should not match it, # so this test is fine as is.
+ elif self.summary_suite_re["known_fail_group"] in r:
+ self.known_fail_count = int(summary_match_list[-1])
+ self.log(message, log_level)
+ return # skip harness check and base parse_single_line
+ harness_match = self.harness_error_re.search(line)
+ if harness_match:
+ self.warning(" %s" % line)
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_WARNING, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+ full_harness_match = self.full_harness_error_re.search(line)
+ if full_harness_match:
+ r = full_harness_match.group(1)
+ if r == "application crashed":
+ self.crashed = True
+ elif r == "missing output line for total leaks!":
+ self.leaked = None
+ else:
+ self.leaked = True
+ return # skip base parse_single_line
+ if self.harness_retry_re.search(line):
+ self.critical(" %s" % line)
+ self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_RETRY, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+ return # skip base parse_single_line
+ super(DesktopUnittestOutputParser, self).parse_single_line(line)
+
+ def evaluate_parser(self, return_code, success_codes=None, previous_summary=None):
+ success_codes = success_codes or [0]
+
+ if self.num_errors: # mozharness ran into a script error
+ self.tbpl_status = self.worst_level(
+ TBPL_FAILURE, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ """
+ We can run evaluate_parser multiple times, it will duplicate failures
+ and status which can mean that future tests will fail if a previous test fails.
+ When we have a previous summary, we want to do:
+ 1) reset state so we only evaluate the current results
+ """
+ joined_summary = {"pass_count": self.pass_count}
+ if previous_summary:
+ self.tbpl_status = TBPL_SUCCESS
+ self.worst_log_level = INFO
+ self.crashed = False
+ self.leaked = False
+
+ # I have to put this outside of parse_single_line because this checks not
+ # only if fail_count was more then 0 but also if fail_count is still -1
+ # (no fail summary line was found)
+ if self.fail_count != 0:
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_WARNING, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ # Account for the possibility that no test summary was output.
+ if (
+ self.pass_count <= 0
+ and self.fail_count <= 0
+ and (self.known_fail_count is None or self.known_fail_count <= 0)
+ and os.environ.get("TRY_SELECTOR") != "coverage"
+ ):
+ self.error("No tests run or test summary not found")
+ self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
+ self.tbpl_status = self.worst_level(
+ TBPL_WARNING, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ if return_code not in success_codes:
+ self.tbpl_status = self.worst_level(
+ TBPL_FAILURE, self.tbpl_status, levels=TBPL_WORST_LEVEL_TUPLE
+ )
+
+ # we can trust in parser.worst_log_level in either case
+ return (self.tbpl_status, self.worst_log_level, joined_summary)
+
+ def append_tinderboxprint_line(self, suite_name):
+ # We are duplicating a condition (fail_count) from evaluate_parser and
+ # parse parse_single_line but at little cost since we are not parsing
+ # the log more then once. I figured this method should stay isolated as
+ # it is only here for tbpl highlighted summaries and is not part of
+ # result status IIUC.
+ summary = tbox_print_summary(
+ self.pass_count,
+ self.fail_count,
+ self.known_fail_count,
+ self.crashed,
+ self.leaked,
+ )
+ self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, summary))
diff --git a/testing/mozharness/mozharness/mozilla/testing/verify_tools.py b/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
new file mode 100644
index 0000000000..3cf19351c5
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/testing/verify_tools.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+from mozharness.base.script import PostScriptAction
+from mozharness.mozilla.testing.per_test_base import SingleTestMixin
+
+verify_config_options = [
+ [
+ ["--verify"],
+ {
+ "action": "store_true",
+ "dest": "verify",
+ "default": False,
+ "help": "Run additional verification on modified tests.",
+ },
+ ],
+]
+
+
+class VerifyToolsMixin(SingleTestMixin):
+ """Utility functions for test verification."""
+
+ def __init__(self):
+ super(VerifyToolsMixin, self).__init__()
+
+ @property
+ def verify_enabled(self):
+ try:
+ return bool(self.config.get("verify"))
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ @PostScriptAction("download-and-extract")
+ def find_tests_for_verification(self, action, success=None):
+ """
+ For each file modified on this push, determine if the modified file
+ is a test, by searching test manifests. Populate self.verify_suites
+ with test files, organized by suite.
+
+ This depends on test manifests, so can only run after test zips have
+ been downloaded and extracted.
+ """
+
+ if not self.verify_enabled:
+ return
+
+ self.find_modified_tests()
+
+ @property
+ def verify_args(self):
+ if not self.verify_enabled:
+ return []
+
+ # Limit each test harness run to 15 minutes, to avoid task timeouts
+ # when executing long-running tests.
+ MAX_TIME_PER_TEST = 900
+
+ if self.config.get("per_test_category") == "web-platform":
+ args = ["--verify-log-full"]
+ else:
+ args = ["--verify-max-time=%d" % MAX_TIME_PER_TEST]
+
+ args.append("--verify")
+
+ return args
diff --git a/testing/mozharness/mozharness/mozilla/tooltool.py b/testing/mozharness/mozharness/mozilla/tooltool.py
new file mode 100644
index 0000000000..db43071e50
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/tooltool.py
@@ -0,0 +1,86 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""module for tooltool operations"""
+import os
+import sys
+
+from mozharness.base.errors import PythonErrorList
+from mozharness.base.log import ERROR, FATAL
+
+TooltoolErrorList = PythonErrorList + [{"substr": "ERROR - ", "level": ERROR}]
+
+
+_here = os.path.abspath(os.path.dirname(__file__))
+_external_tools_path = os.path.normpath(
+ os.path.join(_here, "..", "..", "external_tools")
+)
+
+
+class TooltoolMixin(object):
+ """Mixin class for handling tooltool manifests.
+ To use a tooltool server other than the Mozilla server, set
+ TOOLTOOL_HOST in the environment.
+ """
+
+ def tooltool_fetch(self, manifest, output_dir=None, privileged=False, cache=None):
+ """docstring for tooltool_fetch"""
+ if cache is None:
+ cache = os.environ.get("TOOLTOOL_CACHE")
+
+ for d in (output_dir, cache):
+ if d is not None and not os.path.exists(d):
+ self.mkdir_p(d)
+ if self.topsrcdir:
+ cmd = [
+ sys.executable,
+ "-u",
+ os.path.join(self.topsrcdir, "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ ]
+ else:
+ cmd = [
+ sys.executable,
+ "-u",
+ os.path.join(_external_tools_path, "tooltool.py"),
+ ]
+
+ if self.topsrcdir:
+ cmd.extend(["--tooltool-manifest", manifest])
+ cmd.extend(
+ ["--artifact-manifest", os.path.join(self.topsrcdir, "toolchains.json")]
+ )
+ else:
+ cmd.extend(["fetch", "-m", manifest, "-o"])
+
+ if cache:
+ cmd.extend(["--cache-dir" if self.topsrcdir else "-c", cache])
+
+ timeout = self.config.get("tooltool_timeout", 10 * 60)
+
+ self.retry(
+ self.run_command,
+ args=(cmd,),
+ kwargs={
+ "cwd": output_dir,
+ "error_list": TooltoolErrorList,
+ "privileged": privileged,
+ "output_timeout": timeout,
+ },
+ good_statuses=(0,),
+ error_message="Tooltool %s fetch failed!" % manifest,
+ error_level=FATAL,
+ )
+
+ def create_tooltool_manifest(self, contents, path=None):
+ """Currently just creates a manifest, given the contents.
+ We may want a template and individual values in the future?
+ """
+ if path is None:
+ dirs = self.query_abs_dirs()
+ path = os.path.join(dirs["abs_work_dir"], "tooltool.tt")
+ self.write_to_file(path, contents, error_level=FATAL)
+ return path
diff --git a/testing/mozharness/mozharness/mozilla/vcstools.py b/testing/mozharness/mozharness/mozilla/vcstools.py
new file mode 100644
index 0000000000..974923b6ec
--- /dev/null
+++ b/testing/mozharness/mozharness/mozilla/vcstools.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""vcstools.py
+
+Author: Armen Zambrano G.
+"""
+import os
+
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import VCSScript
+
+VCS_TOOLS = ("gittool.py",)
+
+
+class VCSToolsScript(VCSScript):
+ """This script allows us to fetch gittool.py if
+ we're running the script on developer mode.
+ """
+
+ @PreScriptAction("checkout")
+ def _pre_checkout(self, action):
+ if self.config.get("developer_mode"):
+ # We put them on base_work_dir to prevent the clobber action
+ # to delete them before we use them
+ for vcs_tool in VCS_TOOLS:
+ file_path = self.query_exe(vcs_tool)
+ if not os.path.exists(file_path):
+ self.download_file(
+ url=self.config[vcs_tool],
+ file_name=file_path,
+ parent_dir=os.path.dirname(file_path),
+ create_parent_dir=True,
+ )
+ self.chmod(file_path, 0o755)
+ else:
+ # We simply verify that everything is in order
+ # or if the user forgot to specify developer mode
+ for vcs_tool in VCS_TOOLS:
+ file_path = self.which(vcs_tool)
+
+ if not file_path:
+ file_path = self.query_exe(vcs_tool)
+
+ # If the tool is specified and it is a list is
+ # because we're running on Windows and we won't check
+ if type(self.query_exe(vcs_tool)) is list:
+ continue
+
+ if file_path is None:
+ self.fatal(
+ "This machine is missing %s, if this is your "
+ "local machine you can use --cfg "
+ "developer_config.py" % vcs_tool
+ )
+ elif not self.is_exe(file_path):
+ self.critical("%s is not executable." % file_path)
diff --git a/testing/mozharness/requirements.txt b/testing/mozharness/requirements.txt
new file mode 100644
index 0000000000..280fb749e0
--- /dev/null
+++ b/testing/mozharness/requirements.txt
@@ -0,0 +1,26 @@
+# These packages are needed for mozharness unit tests.
+# Output from 'pip freeze'; we may be able to use other versions of the below packages.
+Cython==0.14.1
+Fabric==1.6.0
+PyYAML==6.0
+coverage==3.6
+distribute==0.6.35
+dulwich==0.19.6
+hg-git==0.4.0
+logilab-astng==0.24.2
+logilab-common==1.4.2
+mercurial==4.3.1
+mock==1.0.1
+nose==1.2.1
+ordereddict==1.1
+paramiko==1.10.0
+pycrypto==2.6.1
+pyflakes==0.6.1
+pylint==0.27.0
+simplejson==2.1.1
+unittest2==0.5.1
+virtualenv==1.5.1
+wsgiref==0.1.2
+urllib3==1.9.1
+google-api-python-client==1.5.1
+oauth2client==1.4.2
diff --git a/testing/mozharness/scripts/android_emulator_pgo.py b/testing/mozharness/scripts/android_emulator_pgo.py
new file mode 100644
index 0000000000..4c8309b303
--- /dev/null
+++ b/testing/mozharness/scripts/android_emulator_pgo.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import glob
+import json
+import os
+import posixpath
+import subprocess
+import sys
+import time
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+PAGES = [
+ "js-input/webkit/PerformanceTests/Speedometer/index.html",
+ "blueprint/sample.html",
+ "blueprint/forms.html",
+ "blueprint/grid.html",
+ "blueprint/elements.html",
+ "js-input/3d-thingy.html",
+ "js-input/crypto-otp.html",
+ "js-input/sunspider/3d-cube.html",
+ "js-input/sunspider/3d-morph.html",
+ "js-input/sunspider/3d-raytrace.html",
+ "js-input/sunspider/access-binary-trees.html",
+ "js-input/sunspider/access-fannkuch.html",
+ "js-input/sunspider/access-nbody.html",
+ "js-input/sunspider/access-nsieve.html",
+ "js-input/sunspider/bitops-3bit-bits-in-byte.html",
+ "js-input/sunspider/bitops-bits-in-byte.html",
+ "js-input/sunspider/bitops-bitwise-and.html",
+ "js-input/sunspider/bitops-nsieve-bits.html",
+ "js-input/sunspider/controlflow-recursive.html",
+ "js-input/sunspider/crypto-aes.html",
+ "js-input/sunspider/crypto-md5.html",
+ "js-input/sunspider/crypto-sha1.html",
+ "js-input/sunspider/date-format-tofte.html",
+ "js-input/sunspider/date-format-xparb.html",
+ "js-input/sunspider/math-cordic.html",
+ "js-input/sunspider/math-partial-sums.html",
+ "js-input/sunspider/math-spectral-norm.html",
+ "js-input/sunspider/regexp-dna.html",
+ "js-input/sunspider/string-base64.html",
+ "js-input/sunspider/string-fasta.html",
+ "js-input/sunspider/string-tagcloud.html",
+ "js-input/sunspider/string-unpack-code.html",
+ "js-input/sunspider/string-validate-input.html",
+]
+
+
+class AndroidProfileRun(TestingMixin, BaseScript, MozbaseMixin, AndroidMixin):
+ """
+ Mozharness script to generate an android PGO profile using the emulator
+ """
+
+ config_options = copy.deepcopy(testing_config_options)
+
+ def __init__(self, require_config_file=False):
+ super(AndroidProfileRun, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "download",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ "mozbase_requirements": "mozbase_source_requirements.txt",
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_path = c.get("installer_path")
+ self.device_serial = "emulator-5554"
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidProfileRun, self).query_abs_dirs()
+ dirs = {}
+
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_src_dir"], "testing")
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_blob_upload_dir"] = "/builds/worker/artifacts/blobber_upload_dir"
+ work_dir = os.environ.get("MOZ_FETCHES_DIR") or abs_dirs["abs_work_dir"]
+ dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ ##########################################
+ # Actions for AndroidProfileRun #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ self.register_virtualenv_module(
+ "marionette",
+ os.path.join(dirs["abs_test_install_dir"], "marionette", "client"),
+ )
+
+ def download(self):
+ """
+ Download host utilities
+ """
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.install_android_app(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_serial)
+
+ def run_tests(self):
+ """
+ Generate the PGO profile data
+ """
+ from marionette_driver.marionette import Marionette
+ from mozdevice import ADBDeviceFactory, ADBTimeoutError
+ from mozhttpd import MozHttpd
+ from mozprofile import Preferences
+ from six import string_types
+
+ app = self.query_package_name()
+
+ IP = "10.0.2.2"
+ PORT = 8888
+
+ PATH_MAPPINGS = {
+ "/js-input/webkit/PerformanceTests": "third_party/webkit/PerformanceTests",
+ }
+
+ dirs = self.query_abs_dirs()
+ topsrcdir = dirs["abs_src_dir"]
+ adb = self.query_exe("adb")
+
+ path_mappings = {
+ k: os.path.join(topsrcdir, v) for k, v in PATH_MAPPINGS.items()
+ }
+ httpd = MozHttpd(
+ port=PORT,
+ docroot=os.path.join(topsrcdir, "build", "pgo"),
+ path_mappings=path_mappings,
+ )
+ httpd.start(block=False)
+
+ profile_data_dir = os.path.join(topsrcdir, "testing", "profiles")
+ with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
+ base_profiles = json.load(fh)["profileserver"]
+
+ prefpaths = [
+ os.path.join(profile_data_dir, profile, "user.js")
+ for profile in base_profiles
+ ]
+
+ prefs = {}
+ for path in prefpaths:
+ prefs.update(Preferences.read_prefs(path))
+
+ interpolation = {"server": "%s:%d" % httpd.httpd.server_address, "OOP": "false"}
+ for k, v in prefs.items():
+ if isinstance(v, string_types):
+ v = v.format(**interpolation)
+ prefs[k] = Preferences.cast(v)
+
+ outputdir = self.config.get("output_directory", "/sdcard/pgo_profile")
+ jarlog = posixpath.join(outputdir, "en-US.log")
+ profdata = posixpath.join(outputdir, "default_%p_random_%m.profraw")
+
+ env = {}
+ env["XPCOM_DEBUG_BREAK"] = "warn"
+ env["MOZ_IN_AUTOMATION"] = "1"
+ env["MOZ_JAR_LOG_FILE"] = jarlog
+ env["LLVM_PROFILE_FILE"] = profdata
+
+ if self.query_minidump_stackwalk():
+ os.environ["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ os.environ["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ if not self.symbols_path:
+ self.symbols_path = os.environ.get("MOZ_FETCHES_DIR")
+
+ # Force test_root to be on the sdcard for android pgo
+ # builds which fail for Android 4.3 when profiles are located
+ # in /data/local/tmp/test_root with
+ # E AndroidRuntime: FATAL EXCEPTION: Gecko
+ # E AndroidRuntime: java.lang.IllegalArgumentException: \
+ # Profile directory must be writable if specified: /data/local/tmp/test_root/profile
+ # This occurs when .can-write-sentinel is written to
+ # the profile in
+ # mobile/android/geckoview/src/main/java/org/mozilla/gecko/GeckoProfile.java.
+ # This is not a problem on later versions of Android. This
+ # over-ride of test_root should be removed when Android 4.3 is no
+ # longer supported.
+ sdcard_test_root = "/sdcard/test_root"
+ adbdevice = ADBDeviceFactory(
+ adb=adb, device="emulator-5554", test_root=sdcard_test_root
+ )
+ if adbdevice.test_root != sdcard_test_root:
+ # If the test_root was previously set and shared
+ # the initializer will not have updated the shared
+ # value. Force it to match the sdcard_test_root.
+ adbdevice.test_root = sdcard_test_root
+ adbdevice.mkdir(outputdir, parents=True)
+
+ try:
+ # Run Fennec a first time to initialize its profile
+ driver = Marionette(
+ app="fennec",
+ package_name=app,
+ adb_path=adb,
+ bin="geckoview-androidTest.apk",
+ prefs=prefs,
+ connect_to_running_emulator=True,
+ startup_timeout=1000,
+ env=env,
+ symbols_path=self.symbols_path,
+ )
+ driver.start_session()
+
+ # Now generate the profile and wait for it to complete
+ for page in PAGES:
+ driver.navigate("http://%s:%d/%s" % (IP, PORT, page))
+ timeout = 2
+ if "Speedometer/index.html" in page:
+ # The Speedometer test actually runs many tests internally in
+ # javascript, so it needs extra time to run through them. The
+ # emulator doesn't get very far through the whole suite, but
+ # this extra time at least lets some of them process.
+ timeout = 360
+ time.sleep(timeout)
+
+ driver.set_context("chrome")
+ driver.execute_script(
+ """
+ let cancelQuit = Components.classes["@mozilla.org/supports-PRBool;1"]
+ .createInstance(Components.interfaces.nsISupportsPRBool);
+ Services.obs.notifyObservers(cancelQuit, "quit-application-requested", null);
+ return cancelQuit.data;
+ """
+ )
+ driver.execute_script(
+ """
+ Services.startup.quit(Ci.nsIAppStartup.eAttemptQuit)
+ """
+ )
+
+ # There is a delay between execute_script() returning and the profile data
+ # actually getting written out, so poll the device until we get a profile.
+ for i in range(50):
+ if not adbdevice.process_exist(app):
+ break
+ time.sleep(2)
+ else:
+ raise Exception("Android App (%s) never quit" % app)
+
+ # Pull all the profraw files and en-US.log
+ adbdevice.pull(outputdir, "/builds/worker/workspace/")
+ except ADBTimeoutError:
+ self.fatal(
+ "INFRA-ERROR: Failed with an ADBTimeoutError",
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ profraw_files = glob.glob("/builds/worker/workspace/*.profraw")
+ if not profraw_files:
+ self.fatal("Could not find any profraw files in /builds/worker/workspace")
+ merge_cmd = [
+ os.path.join(os.environ["MOZ_FETCHES_DIR"], "clang/bin/llvm-profdata"),
+ "merge",
+ "-o",
+ "/builds/worker/workspace/merged.profdata",
+ ] + profraw_files
+ rc = subprocess.call(merge_cmd)
+ if rc != 0:
+ self.fatal(
+ "INFRA-ERROR: Failed to merge profile data. Corrupt profile?",
+ EXIT_STATUS_DICT[TBPL_RETRY],
+ )
+
+ # tarfile doesn't support xz in this version of Python
+ tar_cmd = [
+ "tar",
+ "-acvf",
+ "/builds/worker/artifacts/profdata.tar.xz",
+ "-C",
+ "/builds/worker/workspace",
+ "merged.profdata",
+ "en-US.log",
+ ]
+ subprocess.check_call(tar_cmd)
+
+ httpd.stop()
+
+
+if __name__ == "__main__":
+ test = AndroidProfileRun()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_emulator_unittest.py b/testing/mozharness/scripts/android_emulator_unittest.py
new file mode 100644
index 0000000000..44988ca683
--- /dev/null
+++ b/testing/mozharness/scripts/android_emulator_unittest.py
@@ -0,0 +1,550 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import datetime
+import json
+import os
+import subprocess
+import sys
+
+# load modules from parent dir
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(1, os.path.dirname(here))
+
+from mozharness.base.log import WARNING
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+SUITE_DEFAULT_E10S = ["geckoview-junit", "mochitest", "reftest"]
+SUITE_NO_E10S = ["cppunittest", "gtest", "jittest", "xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest"]
+
+
+class AndroidEmulatorTest(
+ TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMixin, AndroidMixin
+):
+ """
+ A mozharness script for Android functional tests (like mochitests and reftests)
+ run on an Android emulator. This script starts and manages an Android emulator
+ for the duration of the required tests. This is like desktop_unittest.py, but
+ for Android emulator test platforms.
+ """
+
+ config_options = (
+ [
+ [
+ ["--test-suite"],
+ {"action": "store", "dest": "test_suite", "default": None},
+ ],
+ [
+ ["--total-chunk"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "default": None,
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "default": None,
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--enable-xorigin-tests"],
+ {
+ "action": "store_true",
+ "dest": "enable_xorigin_tests",
+ "default": False,
+ "help": "Run tests in a cross origin iframe.",
+ },
+ ],
+ [
+ ["--gpu-required"],
+ {
+ "action": "store_true",
+ "dest": "gpu_required",
+ "default": False,
+ "help": "Run additional verification on modified tests using gpu instances.",
+ },
+ ],
+ [
+ ["--log-raw-level"],
+ {
+ "action": "store",
+ "dest": "log_raw_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--log-tbpl-level"],
+ {
+ "action": "store",
+ "dest": "log_tbpl_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run without Fission enabled.",
+ },
+ ],
+ [
+ ["--web-content-isolation-strategy"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "web_content_isolation_strategy",
+ "help": "Strategy used to determine whether or not a particular site should"
+ "load into a webIsolated content process, see "
+ "fission.webContentIsolationStrategy.",
+ },
+ ],
+ [
+ ["--repeat"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=False):
+ super(AndroidEmulatorTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.test_manifest = c.get("test_manifest")
+ suite = c.get("test_suite")
+ self.test_suite = suite
+ self.this_chunk = c.get("this_chunk")
+ self.total_chunks = c.get("total_chunks")
+ self.xre_path = None
+ self.device_serial = "emulator-5554"
+ self.log_raw_level = c.get("log_raw_level")
+ self.log_tbpl_level = c.get("log_tbpl_level")
+ # AndroidMixin uses this when launching the emulator. We only want
+ # GLES3 if we're running WebRender (default)
+ self.use_gles3 = True
+ self.disable_e10s = c.get("disable_e10s")
+ self.disable_fission = c.get("disable_fission")
+ self.web_content_isolation_strategy = c.get("web_content_isolation_strategy")
+ self.extra_prefs = c.get("extra_prefs")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidEmulatorTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests", "bin"
+ )
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_modules_dir"] = os.path.join(dirs["abs_test_install_dir"], "modules")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+ work_dir = os.environ.get("MOZ_FETCHES_DIR") or abs_dirs["abs_work_dir"]
+ dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def _query_tests_dir(self, test_suite):
+ dirs = self.query_abs_dirs()
+ try:
+ test_dir = self.config["suite_definitions"][test_suite]["testsdir"]
+ except Exception:
+ test_dir = test_suite
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def _get_mozharness_test_paths(self, suite):
+ test_paths = os.environ.get("MOZHARNESS_TEST_PATHS")
+ if not test_paths:
+ return
+
+ return json.loads(test_paths).get(suite)
+
+ def _build_command(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("Key '%s' not defined in the config!" % self.test_suite)
+
+ cmd = [
+ self.query_python_path("python"),
+ "-u",
+ os.path.join(
+ self._query_tests_dir(self.test_suite),
+ self.config["suite_definitions"][self.test_suite]["run_filename"],
+ ),
+ ]
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], self.test_suite
+ )
+
+ str_format_values = {
+ "device_serial": self.device_serial,
+ # IP address of the host as seen from the emulator
+ "remote_webserver": "10.0.2.2",
+ "xre_path": self.xre_path,
+ "utility_path": self.xre_path,
+ "http_port": "8854", # starting http port to use for the mochitest server
+ "ssl_port": "4454", # starting ssl port to use for the server
+ "certs_path": os.path.join(dirs["abs_work_dir"], "tests/certs"),
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ "symbols_path": self.symbols_path,
+ "modules_dir": dirs["abs_modules_dir"],
+ "installer_path": self.installer_path,
+ "raw_log_file": raw_log_file,
+ "log_tbpl_level": self.log_tbpl_level,
+ "log_raw_level": self.log_raw_level,
+ "error_summary_file": error_summary_file,
+ "xpcshell_extra": c.get("xpcshell_extra", ""),
+ "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"),
+ }
+
+ user_paths = self._get_mozharness_test_paths(self.test_suite)
+
+ for option in self.config["suite_definitions"][self.test_suite]["options"]:
+ opt = option.split("=")[0]
+ # override configured chunk options with script args, if specified
+ if opt in ("--this-chunk", "--total-chunks"):
+ if (
+ user_paths
+ or getattr(self, opt.replace("-", "_").strip("_"), None) is not None
+ ):
+ continue
+
+ if "%(app)" in option:
+ # only query package name if requested
+ cmd.extend([option % {"app": self.query_package_name()}])
+ else:
+ option = option % str_format_values
+ if option:
+ cmd.extend([option])
+
+ if "mochitest" in self.test_suite:
+ category = "mochitest"
+ elif "reftest" in self.test_suite or "crashtest" in self.test_suite:
+ category = "reftest"
+ else:
+ category = self.test_suite
+ if c.get("repeat"):
+ if category in SUITE_REPEATABLE:
+ cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log("--repeat not supported in {}".format(category), level=WARNING)
+
+ # do not add --disable fission if we don't have --disable-e10s
+ if c["disable_fission"] and category not in ["gtest", "cppunittest"]:
+ cmd.append("--disable-fission")
+
+ if "web_content_isolation_strategy" in c:
+ cmd.append(
+ "--web-content-isolation-strategy=%s"
+ % c["web_content_isolation_strategy"]
+ )
+ cmd.extend(["--setpref={}".format(p) for p in self.extra_prefs])
+
+ if not (self.verify_enabled or self.per_test_coverage):
+ if user_paths:
+ cmd.extend(user_paths)
+ elif not (self.verify_enabled or self.per_test_coverage):
+ if self.this_chunk is not None:
+ cmd.extend(["--this-chunk", self.this_chunk])
+ if self.total_chunks is not None:
+ cmd.extend(["--total-chunks", self.total_chunks])
+
+ if category not in SUITE_NO_E10S:
+ if category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ cmd.append("--disable-e10s")
+ elif category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ cmd.append("--e10s")
+
+ if c.get("enable_xorigin_tests"):
+ cmd.extend(["--enable-xorigin-tests"])
+
+ try_options, try_tests = self.try_args(self.test_suite)
+ cmd.extend(try_options)
+ if not self.verify_enabled and not self.per_test_coverage:
+ cmd.extend(
+ self.query_tests_args(
+ self.config["suite_definitions"][self.test_suite].get("tests"),
+ None,
+ try_tests,
+ )
+ )
+
+ if self.java_code_coverage_enabled:
+ cmd.extend(
+ [
+ "--enable-coverage",
+ "--coverage-output-dir",
+ self.java_coverage_output_dir,
+ ]
+ )
+
+ return cmd
+
+ def _query_suites(self):
+ if self.test_suite:
+ return [(self.test_suite, self.test_suite)]
+ # per-test mode: determine test suites to run
+
+ # For each test category, provide a list of supported sub-suites and a mapping
+ # between the per_test_base suite name and the android suite name.
+ all = [
+ (
+ "mochitest",
+ {
+ "mochitest-plain": "mochitest-plain",
+ "mochitest-media": "mochitest-media",
+ "mochitest-plain-gpu": "mochitest-plain-gpu",
+ },
+ ),
+ (
+ "reftest",
+ {
+ "reftest": "reftest",
+ "crashtest": "crashtest",
+ "jsreftest": "jsreftest",
+ },
+ ),
+ ("xpcshell", {"xpcshell": "xpcshell"}),
+ ]
+ suites = []
+ for (category, all_suites) in all:
+ cat_suites = self.query_per_test_category_suites(category, all_suites)
+ for k in cat_suites.keys():
+ suites.append((k, cat_suites[k]))
+ return suites
+
+ def _query_suite_categories(self):
+ if self.test_suite:
+ categories = [self.test_suite]
+ else:
+ # per-test mode
+ categories = ["mochitest", "reftest", "xpcshell"]
+ return categories
+
+ ##########################################
+ # Actions for AndroidEmulatorTest #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = None
+ suites = self._query_suites()
+ if ("mochitest-media", "mochitest-media") in suites:
+ # mochitest-media is the only thing that needs this
+ requirements = os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ "websocketprocessbridge_requirements_3.txt",
+ )
+ if requirements:
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """
+ Download and extract product APK, tests.zip, and host utils.
+ """
+ super(AndroidEmulatorTest, self).download_and_extract(
+ suite_categories=self._query_suite_categories()
+ )
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ install_needed = (not self.test_suite) or self.config["suite_definitions"][
+ self.test_suite
+ ].get("install")
+ if install_needed is False:
+ self.info("Skipping apk installation for %s" % self.test_suite)
+ return
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.install_android_app(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_serial)
+
+ def run_tests(self):
+ """
+ Run the tests
+ """
+ self.start_time = datetime.datetime.now()
+ max_per_test_time = datetime.timedelta(minutes=60)
+
+ per_test_args = []
+ suites = self._query_suites()
+ minidump = self.query_minidump_stackwalk()
+ for (per_test_suite, suite) in suites:
+ self.test_suite = suite
+
+ try:
+ cwd = self._query_tests_dir(self.test_suite)
+ except Exception:
+ self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
+
+ env = self.query_env()
+ if minidump:
+ env["MINIDUMP_STACKWALK"] = minidump
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if self.config["nodejs_path"]:
+ env["MOZ_NODE_PATH"] = self.config["nodejs_path"]
+
+ summary = {}
+ for per_test_args in self.query_args(per_test_suite):
+ if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: "
+ "Not all tests were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return
+
+ cmd = self._build_command()
+ final_cmd = copy.copy(cmd)
+ if len(per_test_args) > 0:
+ # in per-test mode, remove any chunk arguments from command
+ for arg in final_cmd:
+ if "total-chunk" in arg or "this-chunk" in arg:
+ final_cmd.remove(arg)
+ final_cmd.extend(per_test_args)
+
+ self.info("Running the command %s" % subprocess.list2cmdline(final_cmd))
+ self.info("##### %s log begins" % self.test_suite)
+
+ suite_category = self.test_suite
+ parser = self.get_test_output_parser(
+ suite_category,
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=[],
+ )
+ self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ 0, previous_summary=summary
+ )
+ parser.append_tinderboxprint_line(self.test_suite)
+
+ self.info("##### %s log ends" % self.test_suite)
+
+ if len(per_test_args) > 0:
+ self.record_status(tbpl_status, level=log_level)
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+ else:
+ self.record_status(tbpl_status, level=log_level)
+ self.log(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ level=log_level,
+ )
+
+
+if __name__ == "__main__":
+ test = AndroidEmulatorTest()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_hardware_unittest.py b/testing/mozharness/scripts/android_hardware_unittest.py
new file mode 100644
index 0000000000..a5ec2c7d3a
--- /dev/null
+++ b/testing/mozharness/scripts/android_hardware_unittest.py
@@ -0,0 +1,477 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import datetime
+import json
+import os
+import subprocess
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.log import WARNING
+from mozharness.base.script import BaseScript, PreScriptAction
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import CodeCoverageMixin
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+SUITE_DEFAULT_E10S = ["geckoview-junit", "mochitest", "reftest"]
+SUITE_NO_E10S = ["cppunittest", "gtest", "jittest"]
+SUITE_REPEATABLE = ["mochitest", "reftest"]
+
+
+class AndroidHardwareTest(
+ TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMixin, AndroidMixin
+):
+ config_options = [
+ [["--test-suite"], {"action": "store", "dest": "test_suite", "default": None}],
+ [
+ ["--adb-path"],
+ {
+ "action": "store",
+ "dest": "adb_path",
+ "default": None,
+ "help": "Path to adb",
+ },
+ ],
+ [
+ ["--total-chunk"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "default": None,
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "default": None,
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--log-raw-level"],
+ {
+ "action": "store",
+ "dest": "log_raw_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--log-tbpl-level"],
+ {
+ "action": "store",
+ "dest": "log_tbpl_level",
+ "default": "info",
+ "help": "Set log level (debug|info|warning|error|critical|fatal)",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run with Fission disabled.",
+ },
+ ],
+ [
+ ["--repeat"],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ [
+ "--setpref",
+ ],
+ {
+ "action": "append",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--jittest-flags"],
+ {
+ "action": "store",
+ "dest": "jittest_flags",
+ "default": "debug",
+ "help": "Flags to run with jittest (all, debug, etc.).",
+ },
+ ],
+ ] + copy.deepcopy(testing_config_options)
+
+ def __init__(self, require_config_file=False):
+ super(AndroidHardwareTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={
+ "virtualenv_modules": [],
+ "virtualenv_requirements": [],
+ "require_test_zip": True,
+ # IP address of the host as seen from the device.
+ "remote_webserver": os.environ["HOST_IP"],
+ },
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.test_manifest = c.get("test_manifest")
+ suite = c.get("test_suite")
+ self.test_suite = suite
+ self.this_chunk = c.get("this_chunk")
+ self.total_chunks = c.get("total_chunks")
+ self.xre_path = None
+ self.log_raw_level = c.get("log_raw_level")
+ self.log_tbpl_level = c.get("log_tbpl_level")
+ self.disable_e10s = c.get("disable_e10s")
+ self.disable_fission = c.get("disable_fission")
+ self.extra_prefs = c.get("extra_prefs")
+ self.jittest_flags = c.get("jittest_flags")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AndroidHardwareTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "tests", "bin"
+ )
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ dirs["abs_modules_dir"] = os.path.join(dirs["abs_test_install_dir"], "modules")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def _query_tests_dir(self):
+ dirs = self.query_abs_dirs()
+ try:
+ test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
+ except Exception:
+ test_dir = self.test_suite
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def _build_command(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("Key '%s' not defined in the config!" % self.test_suite)
+
+ cmd = [
+ self.query_python_path("python"),
+ "-u",
+ os.path.join(
+ self._query_tests_dir(),
+ self.config["suite_definitions"][self.test_suite]["run_filename"],
+ ),
+ ]
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], self.test_suite
+ )
+
+ str_format_values = {
+ "device_serial": self.device_serial,
+ "remote_webserver": c["remote_webserver"],
+ "xre_path": self.xre_path,
+ "utility_path": self.xre_path,
+ "http_port": "8854", # starting http port to use for the mochitest server
+ "ssl_port": "4454", # starting ssl port to use for the server
+ "certs_path": os.path.join(dirs["abs_work_dir"], "tests/certs"),
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ "symbols_path": self.symbols_path,
+ "modules_dir": dirs["abs_modules_dir"],
+ "installer_path": self.installer_path,
+ "raw_log_file": raw_log_file,
+ "log_tbpl_level": self.log_tbpl_level,
+ "log_raw_level": self.log_raw_level,
+ "error_summary_file": error_summary_file,
+ "xpcshell_extra": c.get("xpcshell_extra", ""),
+ "jittest_flags": self.jittest_flags,
+ }
+
+ user_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ for option in self.config["suite_definitions"][self.test_suite]["options"]:
+ opt = option.split("=")[0]
+ # override configured chunk options with script args, if specified
+ if opt in ("--this-chunk", "--total-chunks"):
+ if (
+ user_paths
+ or getattr(self, opt.replace("-", "_").strip("_"), None) is not None
+ ):
+ continue
+
+ if "%(app)" in option:
+ # only query package name if requested
+ cmd.extend([option % {"app": self.query_package_name()}])
+ else:
+ option = option % str_format_values
+ if option:
+ cmd.extend([option])
+
+ if user_paths:
+ if self.test_suite in user_paths:
+ cmd.extend(user_paths[self.test_suite])
+ elif not self.verify_enabled:
+ if self.this_chunk is not None:
+ cmd.extend(["--this-chunk", self.this_chunk])
+ if self.total_chunks is not None:
+ cmd.extend(["--total-chunks", self.total_chunks])
+
+ if "mochitest" in self.test_suite:
+ category = "mochitest"
+ elif "reftest" in self.test_suite or "crashtest" in self.test_suite:
+ category = "reftest"
+ else:
+ category = self.test_suite
+ if c.get("repeat"):
+ if category in SUITE_REPEATABLE:
+ cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log("--repeat not supported in {}".format(category), level=WARNING)
+
+ if category not in SUITE_NO_E10S:
+ if category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ cmd.append("--disable-e10s")
+ elif category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ cmd.append("--e10s")
+
+ if self.disable_fission and category not in SUITE_NO_E10S:
+ cmd.append("--disable-fission")
+
+ cmd.extend(["--setpref={}".format(p) for p in self.extra_prefs])
+
+ try_options, try_tests = self.try_args(self.test_suite)
+ cmd.extend(try_options)
+ if not self.verify_enabled and not self.per_test_coverage:
+ cmd.extend(
+ self.query_tests_args(
+ self.config["suite_definitions"][self.test_suite].get("tests"),
+ None,
+ try_tests,
+ )
+ )
+
+ return cmd
+
+ def _query_suites(self):
+ if self.test_suite:
+ return [(self.test_suite, self.test_suite)]
+ # per-test mode: determine test suites to run
+ all = [
+ (
+ "mochitest",
+ {
+ "mochitest-plain": "mochitest-plain",
+ "mochitest-plain-gpu": "mochitest-plain-gpu",
+ },
+ ),
+ ("reftest", {"reftest": "reftest", "crashtest": "crashtest"}),
+ ("xpcshell", {"xpcshell": "xpcshell"}),
+ ]
+ suites = []
+ for (category, all_suites) in all:
+ cat_suites = self.query_per_test_category_suites(category, all_suites)
+ for k in cat_suites.keys():
+ suites.append((k, cat_suites[k]))
+ return suites
+
+ def _query_suite_categories(self):
+ if self.test_suite:
+ categories = [self.test_suite]
+ else:
+ # per-test mode
+ categories = ["mochitest", "reftest", "xpcshell"]
+ return categories
+
+ ##########################################
+ # Actions for AndroidHardwareTest #
+ ##########################################
+
+ def preflight_install(self):
+ # in the base class, this checks for mozinstall, but we don't use it
+ pass
+
+ @PreScriptAction("create-virtualenv")
+ def pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = None
+ suites = self._query_suites()
+ if ("mochitest-media", "mochitest-media") in suites:
+ # mochitest-media is the only thing that needs this
+ requirements = os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ "websocketprocessbridge_requirements_3.txt",
+ )
+ if requirements:
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """
+ Download and extract product APK, tests.zip, and host utils.
+ """
+ super(AndroidHardwareTest, self).download_and_extract(
+ suite_categories=self._query_suite_categories()
+ )
+ dirs = self.query_abs_dirs()
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+
+ def install(self):
+ """
+ Install APKs on the device.
+ """
+ install_needed = (not self.test_suite) or self.config["suite_definitions"][
+ self.test_suite
+ ].get("install")
+ if install_needed is False:
+ self.info("Skipping apk installation for %s" % self.test_suite)
+ return
+ assert (
+ self.installer_path is not None
+ ), "Either add installer_path to the config or use --installer-path."
+ self.uninstall_android_app()
+ self.install_android_app(self.installer_path)
+ self.info("Finished installing apps for %s" % self.device_name)
+
+ def run_tests(self):
+ """
+ Run the tests
+ """
+ self.start_time = datetime.datetime.now()
+ max_per_test_time = datetime.timedelta(minutes=60)
+
+ per_test_args = []
+ suites = self._query_suites()
+ minidump = self.query_minidump_stackwalk()
+ for (per_test_suite, suite) in suites:
+ self.test_suite = suite
+
+ try:
+ cwd = self._query_tests_dir()
+ except Exception:
+ self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite)
+ env = self.query_env()
+ if minidump:
+ env["MINIDUMP_STACKWALK"] = minidump
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+
+ summary = None
+ for per_test_args in self.query_args(per_test_suite):
+ if (datetime.datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: "
+ "Not all tests were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return
+
+ cmd = self._build_command()
+ final_cmd = copy.copy(cmd)
+ if len(per_test_args) > 0:
+ # in per-test mode, remove any chunk arguments from command
+ for arg in final_cmd:
+ if "total-chunk" in arg or "this-chunk" in arg:
+ final_cmd.remove(arg)
+ final_cmd.extend(per_test_args)
+
+ self.info(
+ "Running on %s the command %s"
+ % (self.device_name, subprocess.list2cmdline(final_cmd))
+ )
+ self.info("##### %s log begins" % self.test_suite)
+
+ suite_category = self.test_suite
+ parser = self.get_test_output_parser(
+ suite_category,
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=[],
+ )
+ self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser)
+ tbpl_status, log_level, summary = parser.evaluate_parser(0, summary)
+ parser.append_tinderboxprint_line(self.test_suite)
+
+ self.info("##### %s log ends" % self.test_suite)
+
+ if len(per_test_args) > 0:
+ self.record_status(tbpl_status, level=log_level)
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+ else:
+ self.record_status(tbpl_status, level=log_level)
+ self.log(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ level=log_level,
+ )
+
+
+if __name__ == "__main__":
+ test = AndroidHardwareTest()
+ test.run_and_exit()
diff --git a/testing/mozharness/scripts/android_wrench.py b/testing/mozharness/scripts/android_wrench.py
new file mode 100644
index 0000000000..2162eb9df5
--- /dev/null
+++ b/testing/mozharness/scripts/android_wrench.py
@@ -0,0 +1,283 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import datetime
+import enum
+import os
+import subprocess
+import sys
+import tempfile
+import time
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_FAILURE
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.testbase import TestingMixin
+
+
+class TestMode(enum.Enum):
+ OPTIMIZED_SHADER_COMPILATION = 0
+ UNOPTIMIZED_SHADER_COMPILATION = 1
+ SHADER_TEST = 2
+ REFTEST = 3
+
+
+class AndroidWrench(TestingMixin, BaseScript, MozbaseMixin, AndroidMixin):
+ def __init__(self, require_config_file=False):
+ # code in BaseScript.__init__ iterates all the properties to attach
+ # pre- and post-flight listeners, so we need _is_emulator be defined
+ # before that happens. Doesn't need to be a real value though.
+ self._is_emulator = None
+
+ # Directory for wrench input and output files. Note that we hard-code
+ # the path here, rather than using something like self.device.test_root,
+ # because it needs to be kept in sync with the path hard-coded inside
+ # the wrench source code.
+ self.wrench_dir = (
+ "/storage/emulated/0/Android/data/org.mozilla.wrench/files/wrench"
+ )
+
+ super(AndroidWrench, self).__init__()
+ if self.device_serial is None:
+ # Running on an emulator.
+ self._is_emulator = True
+ self.device_serial = "emulator-5554"
+ self.use_gles3 = True
+ else:
+ # Running on a device, ensure self.is_emulator returns False.
+ # The adb binary is preinstalled on the bitbar image and is
+ # already on the $PATH.
+ self._is_emulator = False
+ self._adb_path = "adb"
+ self._errored = False
+
+ @property
+ def is_emulator(self):
+ """Overrides the is_emulator property on AndroidMixin."""
+ if self._is_emulator is None:
+ self._is_emulator = self.device_serial is None
+ return self._is_emulator
+
+ def activate_virtualenv(self):
+ """Overrides the method on AndroidMixin to be a no-op, because the
+ setup for wrench doesn't require a special virtualenv."""
+ pass
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = {}
+
+ abs_dirs["abs_work_dir"] = os.path.expanduser("~/.wrench")
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ # In automation use the standard work dir if there is one
+ parent_abs_dirs = super(AndroidWrench, self).query_abs_dirs()
+ if "abs_work_dir" in parent_abs_dirs:
+ abs_dirs["abs_work_dir"] = parent_abs_dirs["abs_work_dir"]
+
+ abs_dirs["abs_blob_upload_dir"] = os.path.join(abs_dirs["abs_work_dir"], "logs")
+ abs_dirs["abs_apk_path"] = os.environ.get(
+ "WRENCH_APK", "gfx/wr/target/debug/apk/wrench.apk"
+ )
+ abs_dirs["abs_reftests_path"] = os.environ.get(
+ "WRENCH_REFTESTS", "gfx/wr/wrench/reftests"
+ )
+ if os.environ.get("MOZ_AUTOMATION", "0") == "1":
+ fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
+ work_dir = (
+ fetches_dir
+ if fetches_dir and self.is_emulator
+ else abs_dirs["abs_work_dir"]
+ )
+ abs_dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ abs_dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ abs_dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+ else:
+ mozbuild_path = os.environ.get(
+ "MOZBUILD_STATE_PATH", os.path.expanduser("~/.mozbuild")
+ )
+ mozbuild_sdk = os.environ.get(
+ "ANDROID_SDK_HOME", os.path.join(mozbuild_path, "android-sdk-linux")
+ )
+ abs_dirs["abs_sdk_dir"] = mozbuild_sdk
+ avds_dir = os.environ.get(
+ "ANDROID_EMULATOR_HOME", os.path.join(mozbuild_path, "android-device")
+ )
+ abs_dirs["abs_avds_dir"] = avds_dir
+ abs_dirs["abs_bundletool_path"] = os.path.join(
+ mozbuild_path, "bundletool.jar"
+ )
+
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def logcat_start(self):
+ """Overrides logcat_start in android.py - ensures any pre-existing logcat
+ is cleared before starting to record the new logcat. This is helpful
+ when running multiple times in a local emulator."""
+ logcat_cmd = [self.adb_path, "-s", self.device_serial, "logcat", "-c"]
+ self.info(" ".join(logcat_cmd))
+ subprocess.check_call(logcat_cmd)
+ super(AndroidWrench, self).logcat_start()
+
+ def wait_until_process_done(self, process_name, timeout):
+ """Waits until the specified process has exited. Polls the process list
+ every 5 seconds until the process disappears.
+
+ :param process_name: string containing the package name of the
+ application.
+ :param timeout: integer specifying the maximum time in seconds
+ to wait for the application to finish.
+ :returns: boolean - True if the process exited within the indicated
+ timeout, False if the process had not exited by the timeout.
+ """
+ end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
+ while self.device.process_exist(process_name, timeout=timeout):
+ if datetime.datetime.now() > end_time:
+ stop_cmd = [
+ self.adb_path,
+ "-s",
+ self.device_serial,
+ "shell",
+ "am",
+ "force-stop",
+ process_name,
+ ]
+ subprocess.check_call(stop_cmd)
+ return False
+ time.sleep(5)
+
+ return True
+
+ def setup_sdcard(self, test_mode):
+ self.device.rm(self.wrench_dir, recursive=True, force=True)
+ self.device.mkdir(self.wrench_dir, parents=True)
+ if test_mode == TestMode.REFTEST:
+ self.device.push(
+ self.query_abs_dirs()["abs_reftests_path"],
+ self.wrench_dir + "/reftests",
+ )
+ args_file = os.path.join(self.query_abs_dirs()["abs_work_dir"], "wrench_args")
+ with open(args_file, "w") as argfile:
+ if self.is_emulator:
+ argfile.write("env: WRENCH_REFTEST_CONDITION_EMULATOR=1\n")
+ else:
+ argfile.write("env: WRENCH_REFTEST_CONDITION_DEVICE=1\n")
+ if test_mode == TestMode.OPTIMIZED_SHADER_COMPILATION:
+ argfile.write("--precache test_init")
+ elif test_mode == TestMode.UNOPTIMIZED_SHADER_COMPILATION:
+ argfile.write("--precache --use-unoptimized-shaders test_init")
+ elif test_mode == TestMode.SHADER_TEST:
+ argfile.write("--precache test_shaders")
+ elif test_mode == TestMode.REFTEST:
+ argfile.write("reftest")
+ self.device.push(args_file, self.wrench_dir + "/args")
+
+ def run_tests(self, timeout):
+ self.timed_screenshots(None)
+ self.device.launch_application(
+ app_name="org.mozilla.wrench",
+ activity_name="android.app.NativeActivity",
+ intent=None,
+ )
+ self.info("App launched")
+ done = self.wait_until_process_done("org.mozilla.wrench", timeout=timeout)
+ if not done:
+ self._errored = True
+ self.error("Wrench still running after timeout")
+
+ def scrape_log(self):
+ """Wrench dumps stdout to a file rather than logcat because logcat
+ truncates long lines, and the base64 reftest images therefore get
+ truncated. In the past we split long lines and stitched them together
+ again, but this was unreliable. This scrapes the output file and dumps
+ it into our main log.
+ """
+ logfile = tempfile.NamedTemporaryFile()
+ self.device.pull(self.wrench_dir + "/stdout", logfile.name)
+ with open(logfile.name, "r", encoding="utf-8") as f:
+ self.info("=== scraped log output ===")
+ for line in f:
+ if "UNEXPECTED-FAIL" in line or "panicked" in line:
+ self._errored = True
+ self.error(line)
+ else:
+ self.info(line)
+ self.info("=== end scraped log output ===")
+
+ def setup_emulator(self):
+ avds_dir = self.query_abs_dirs()["abs_avds_dir"]
+ if not os.path.exists(avds_dir):
+ self.error("Unable to find android AVDs at %s" % avds_dir)
+ return
+
+ sdk_path = self.query_abs_dirs()["abs_sdk_dir"]
+ if not os.path.exists(sdk_path):
+ self.error("Unable to find android SDK at %s" % sdk_path)
+ return
+ self.start_emulator()
+
+ def do_test(self):
+ if self.is_emulator:
+ self.setup_emulator()
+
+ self.verify_device()
+ self.info("Logging device properties...")
+ self.info(self.shell_output("getprop"))
+ self.info("Installing APK...")
+ self.install_android_app(self.query_abs_dirs()["abs_apk_path"], replace=True)
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.OPTIMIZED_SHADER_COMPILATION)
+ self.info("Running optimized shader compilation tests...")
+ self.run_tests(60)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.UNOPTIMIZED_SHADER_COMPILATION)
+ self.info("Running unoptimized shader compilation tests...")
+ self.run_tests(60)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.SHADER_TEST)
+ self.info("Running shader tests...")
+ self.run_tests(60 * 5)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ if not self._errored:
+ self.info("Setting up SD card...")
+ self.setup_sdcard(TestMode.REFTEST)
+ self.info("Running reftests...")
+ self.run_tests(60 * 30)
+ self.info("Tests done; parsing log...")
+ self.scrape_log()
+
+ self.logcat_stop()
+ self.info("All done!")
+
+ def check_errors(self):
+ if self._errored:
+ self.info("Errors encountered, terminating with error code...")
+ exit(EXIT_STATUS_DICT[TBPL_FAILURE])
+
+
+if __name__ == "__main__":
+ test = AndroidWrench()
+ test.do_test()
+ test.check_errors()
diff --git a/testing/mozharness/scripts/awsy_script.py b/testing/mozharness/scripts/awsy_script.py
new file mode 100644
index 0000000000..a349c3929b
--- /dev/null
+++ b/testing/mozharness/scripts/awsy_script.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""
+run awsy tests in a virtualenv
+"""
+
+import copy
+import json
+import os
+import re
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozharness
+import mozinfo
+from mozharness.base.log import ERROR, INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+PY2 = sys.version_info.major == 2
+scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
+external_tools_path = os.path.join(scripts_path, "external_tools")
+
+
+class AWSY(TestingMixin, MercurialScript, TooltoolMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s). (Desktop builds only)",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--base"],
+ {
+ "action": "store_true",
+ "dest": "test_about_blank",
+ "default": False,
+ "help": "Runs the about:blank base case memory test.",
+ },
+ ],
+ [
+ ["--dmd"],
+ {
+ "action": "store_true",
+ "dest": "dmd",
+ "default": False,
+ "help": "Runs tests with DMD enabled.",
+ },
+ ],
+ [
+ ["--tp6"],
+ {
+ "action": "store_true",
+ "dest": "tp6",
+ "default": False,
+ "help": "Runs tests with the tp6 pageset.",
+ },
+ ],
+ ]
+ + testing_config_options
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ error_list = [
+ {"regex": re.compile(r"""(TEST-UNEXPECTED|PROCESS-CRASH)"""), "level": ERROR},
+ ]
+
+ def __init__(self, **kwargs):
+
+ kwargs.setdefault("config_options", self.config_options)
+ kwargs.setdefault(
+ "all_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault(
+ "default_actions",
+ [
+ "clobber",
+ "download-and-extract",
+ "populate-webroot",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ )
+ kwargs.setdefault("config", {})
+ super(AWSY, self).__init__(**kwargs)
+ self.installer_url = self.config.get("installer_url")
+ self.tests = None
+
+ self.testdir = self.query_abs_dirs()["abs_test_install_dir"]
+ self.awsy_path = os.path.join(self.testdir, "awsy")
+ self.awsy_libdir = os.path.join(self.awsy_path, "awsy")
+ self.webroot_dir = os.path.join(self.testdir, "html")
+ self.results_dir = os.path.join(self.testdir, "results")
+ self.binary_path = self.config.get("binary_path")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(AWSY, self).query_abs_dirs()
+
+ dirs = {}
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def download_and_extract(self, extract_dirs=None, suite_categories=None):
+ ret = super(AWSY, self).download_and_extract(
+ suite_categories=["common", "awsy"]
+ )
+ return ret
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ requirements_files = [
+ os.path.join(self.testdir, "config", "marionette_requirements.txt")
+ ]
+
+ for requirements_file in requirements_files:
+ self.register_virtualenv_module(
+ requirements=[requirements_file], two_pass=True
+ )
+
+ self.register_virtualenv_module("awsy", self.awsy_path)
+
+ def populate_webroot(self):
+ """Populate the production test machines' webroots"""
+ self.info("Downloading pageset with tooltool...")
+ manifest_file = os.path.join(self.awsy_path, "tp5n-pageset.manifest")
+ page_load_test_dir = os.path.join(self.webroot_dir, "page_load_test")
+ if not os.path.isdir(page_load_test_dir):
+ self.mkdir_p(page_load_test_dir)
+ self.tooltool_fetch(
+ manifest_file,
+ output_dir=page_load_test_dir,
+ cache=self.config.get("tooltool_cache"),
+ )
+ archive = os.path.join(page_load_test_dir, "tp5n.zip")
+ unzip = self.query_exe("unzip")
+ unzip_cmd = [unzip, "-q", "-o", archive, "-d", page_load_test_dir]
+ self.run_command(unzip_cmd, halt_on_failure=False)
+ self.run_command("ls %s" % page_load_test_dir)
+
+ def run_tests(self, args=None, **kw):
+ """
+ AWSY test should be implemented here
+ """
+ dirs = self.abs_dirs
+ env = {}
+ error_summary_file = os.path.join(
+ dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
+ )
+
+ runtime_testvars = {
+ "webRootDir": self.webroot_dir,
+ "resultsDir": self.results_dir,
+ "bin": self.binary_path,
+ }
+
+ # Check if this is a DMD build and if so enable it.
+ dmd_enabled = False
+ dmd_py_lib_dir = os.path.dirname(self.binary_path)
+ if mozinfo.os == "mac":
+ # On mac binary is in MacOS and dmd.py is in Resources, ie:
+ # Name.app/Contents/MacOS/libdmd.dylib
+ # Name.app/Contents/Resources/dmd.py
+ dmd_py_lib_dir = os.path.join(dmd_py_lib_dir, "../Resources/")
+
+ dmd_path = os.path.join(dmd_py_lib_dir, "dmd.py")
+ if self.config["dmd"] and os.path.isfile(dmd_path):
+ dmd_enabled = True
+ runtime_testvars["dmd"] = True
+
+ # Allow the child process to import dmd.py
+ python_path = os.environ.get("PYTHONPATH")
+
+ if python_path:
+ os.environ["PYTHONPATH"] = "%s%s%s" % (
+ python_path,
+ os.pathsep,
+ dmd_py_lib_dir,
+ )
+ else:
+ os.environ["PYTHONPATH"] = dmd_py_lib_dir
+
+ env["DMD"] = "--mode=dark-matter --stacks=full"
+
+ runtime_testvars["tp6"] = self.config["tp6"]
+ if self.config["tp6"]:
+ # mitmproxy needs path to mozharness when installing the cert, and tooltool
+ env["SCRIPTSPATH"] = scripts_path
+ env["EXTERNALTOOLSPATH"] = external_tools_path
+
+ runtime_testvars_path = os.path.join(self.awsy_path, "runtime-testvars.json")
+ runtime_testvars_file = open(runtime_testvars_path, "wb" if PY2 else "w")
+ runtime_testvars_file.write(json.dumps(runtime_testvars, indent=2))
+ runtime_testvars_file.close()
+
+ cmd = ["marionette"]
+
+ test_vars_file = None
+ if self.config["test_about_blank"]:
+ test_vars_file = "base-testvars.json"
+ else:
+ if self.config["tp6"]:
+ test_vars_file = "tp6-testvars.json"
+ else:
+ test_vars_file = "testvars.json"
+
+ cmd.append(
+ "--testvars=%s" % os.path.join(self.awsy_path, "conf", test_vars_file)
+ )
+ cmd.append("--testvars=%s" % runtime_testvars_path)
+ cmd.append("--log-raw=-")
+ cmd.append("--log-errorsummary=%s" % error_summary_file)
+ cmd.append("--binary=%s" % self.binary_path)
+ cmd.append("--profile=%s" % (os.path.join(dirs["abs_work_dir"], "profile")))
+ if not self.config["e10s"]:
+ cmd.append("--disable-e10s")
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+ cmd.append(
+ "--gecko-log=%s" % os.path.join(dirs["abs_blob_upload_dir"], "gecko.log")
+ )
+ # TestingMixin._download_and_extract_symbols() should set
+ # self.symbols_path
+ cmd.append("--symbols-path=%s" % self.symbols_path)
+
+ if self.config["test_about_blank"]:
+ test_file = os.path.join(self.awsy_libdir, "test_base_memory_usage.py")
+ prefs_file = "base-prefs.json"
+ else:
+ test_file = os.path.join(self.awsy_libdir, "test_memory_usage.py")
+ if self.config["tp6"]:
+ prefs_file = "tp6-prefs.json"
+ else:
+ prefs_file = "prefs.json"
+
+ cmd.append(
+ "--preferences=%s" % os.path.join(self.awsy_path, "conf", prefs_file)
+ )
+ if dmd_enabled:
+ cmd.append("--setpref=security.sandbox.content.level=0")
+ cmd.append(test_file)
+
+ env["STYLO_THREADS"] = "4"
+
+ env["MOZ_UPLOAD_DIR"] = dirs["abs_blob_upload_dir"]
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ env["MINIDUMP_SAVE_PATH"] = dirs["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "1"
+ env = self.query_env(partial_env=env)
+ parser = StructuredOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=self.error_list,
+ strict=False,
+ )
+ return_code = self.run_command(
+ command=cmd,
+ cwd=self.awsy_path,
+ output_timeout=self.config.get("cmd_timeout"),
+ env=env,
+ output_parser=parser,
+ )
+
+ level = INFO
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code=return_code
+ )
+
+ self.log(
+ "AWSY exited with return code %s: %s" % (return_code, tbpl_status),
+ level=level,
+ )
+ self.record_status(tbpl_status)
+
+
+if __name__ == "__main__":
+ awsy_test = AWSY()
+ awsy_test.run_and_exit()
diff --git a/testing/mozharness/scripts/configtest.py b/testing/mozharness/scripts/configtest.py
new file mode 100755
index 0000000000..f846cba0d4
--- /dev/null
+++ b/testing/mozharness/scripts/configtest.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""configtest.py
+
+Verify the .json and .py files in the configs/ directory are well-formed.
+Further tests to verify validity would be desirable.
+
+This is also a good example script to look at to understand mozharness.
+"""
+
+import os
+import pprint
+import sys
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.script import BaseScript
+
+
+# ConfigTest {{{1
+class ConfigTest(BaseScript):
+ config_options = [
+ [
+ [
+ "--test-file",
+ ],
+ {
+ "action": "extend",
+ "dest": "test_files",
+ "help": "Specify which config files to test",
+ },
+ ]
+ ]
+
+ def __init__(self, require_config_file=False):
+ self.config_files = []
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ all_actions=[
+ "list-config-files",
+ "test-json-configs",
+ "test-python-configs",
+ "summary",
+ ],
+ default_actions=[
+ "test-json-configs",
+ "test-python-configs",
+ "summary",
+ ],
+ require_config_file=require_config_file,
+ )
+
+ def query_config_files(self):
+ """This query method, much like others, caches its runtime
+ settings in self.VAR so we don't have to figure out config_files
+ multiple times.
+ """
+ if self.config_files:
+ return self.config_files
+ c = self.config
+ if "test_files" in c:
+ self.config_files = c["test_files"]
+ return self.config_files
+ self.debug(
+ "No --test-file(s) specified; defaulting to crawling the configs/ directory."
+ )
+ config_files = []
+ for root, dirs, files in os.walk(os.path.join(sys.path[0], "..", "configs")):
+ for name in files:
+ # Hardcode =P
+ if name.endswith(".json") or name.endswith(".py"):
+ if not name.startswith("test_malformed"):
+ config_files.append(os.path.join(root, name))
+ self.config_files = config_files
+ return self.config_files
+
+ def list_config_files(self):
+ """Non-default action that is mainly here to demonstrate how
+ non-default actions work in a mozharness script.
+ """
+ config_files = self.query_config_files()
+ for config_file in config_files:
+ self.info(config_file)
+
+ def test_json_configs(self):
+ """Currently only "is this well-formed json?" """
+ config_files = self.query_config_files()
+ filecount = [0, 0]
+ for config_file in config_files:
+ if config_file.endswith(".json"):
+ filecount[0] += 1
+ self.info("Testing %s." % config_file)
+ contents = self.read_from_file(config_file, verbose=False)
+ try:
+ json.loads(contents)
+ except ValueError:
+ self.add_summary("%s is invalid json." % config_file, level="error")
+ self.error(pprint.pformat(sys.exc_info()[1]))
+ else:
+ self.info("Good.")
+ filecount[1] += 1
+ if filecount[0]:
+ self.add_summary(
+ "%d of %d json config files were good." % (filecount[1], filecount[0])
+ )
+ else:
+ self.add_summary("No json config files to test.")
+
+ def test_python_configs(self):
+ """Currently only "will this give me a config dictionary?" """
+ config_files = self.query_config_files()
+ filecount = [0, 0]
+ for config_file in config_files:
+ if config_file.endswith(".py"):
+ filecount[0] += 1
+ self.info("Testing %s." % config_file)
+ global_dict = {}
+ local_dict = {}
+ try:
+ with open(config_file, "r") as f:
+ exec(f.read(), global_dict, local_dict)
+ except Exception:
+ self.add_summary(
+ "%s is invalid python." % config_file, level="error"
+ )
+ self.error(pprint.pformat(sys.exc_info()[1]))
+ else:
+ if "config" in local_dict and isinstance(
+ local_dict["config"], dict
+ ):
+ self.info("Good.")
+ filecount[1] += 1
+ else:
+ self.add_summary(
+ "%s is valid python, "
+ "but doesn't create a config dictionary." % config_file,
+ level="error",
+ )
+ if filecount[0]:
+ self.add_summary(
+ "%d of %d python config files were good." % (filecount[1], filecount[0])
+ )
+ else:
+ self.add_summary("No python config files to test.")
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ config_test = ConfigTest()
+ config_test.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_l10n.py b/testing/mozharness/scripts/desktop_l10n.py
new file mode 100755
index 0000000000..6e401caa8b
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_l10n.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_l10n.py
+
+This script manages Desktop repacks for nightly builds.
+"""
+import glob
+import os
+import shlex
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0])) # noqa
+
+from mozharness.base.errors import MakefileErrorList
+from mozharness.base.script import BaseScript
+from mozharness.base.vcs.vcsbase import VCSMixin
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.building.buildbase import (
+ MakeUploadOutputParser,
+ get_mozconfig_path,
+)
+from mozharness.mozilla.l10n.locales import LocalesMixin
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+
+# needed by _map
+SUCCESS = 0
+FAILURE = 1
+
+SUCCESS_STR = "Success"
+FAILURE_STR = "Failed"
+
+
+# DesktopSingleLocale {{{1
+class DesktopSingleLocale(LocalesMixin, AutomationMixin, VCSMixin, BaseScript):
+ """Manages desktop repacks"""
+
+ config_options = [
+ [
+ [
+ "--locale",
+ ],
+ {
+ "action": "extend",
+ "dest": "locales",
+ "type": "string",
+ "help": "Specify the locale(s) to sign and update. Optionally pass"
+ " revision separated by colon, en-GB:default.",
+ },
+ ],
+ [
+ [
+ "--tag-override",
+ ],
+ {
+ "action": "store",
+ "dest": "tag_override",
+ "type": "string",
+ "help": "Override the tags set for all repos",
+ },
+ ],
+ [
+ [
+ "--en-us-installer-url",
+ ],
+ {
+ "action": "store",
+ "dest": "en_us_installer_url",
+ "type": "string",
+ "help": "Specify the url of the en-us binary",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ # fxbuild style:
+ buildscript_kwargs = {
+ "all_actions": [
+ "clone-locales",
+ "list-locales",
+ "setup",
+ "repack",
+ "summary",
+ ],
+ "config": {
+ "ignore_locales": ["en-US"],
+ "locales_dir": "browser/locales",
+ "log_name": "single_locale",
+ "hg_l10n_base": "https://hg.mozilla.org/l10n-central",
+ },
+ }
+
+ LocalesMixin.__init__(self)
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ **buildscript_kwargs
+ )
+
+ self.bootstrap_env = None
+ self.upload_env = None
+ self.upload_urls = {}
+ self.pushdate = None
+ # upload_files is a dictionary of files to upload, keyed by locale.
+ self.upload_files = {}
+
+ # Helper methods {{{2
+ def query_bootstrap_env(self):
+ """returns the env for repacks"""
+ if self.bootstrap_env:
+ return self.bootstrap_env
+ config = self.config
+ abs_dirs = self.query_abs_dirs()
+
+ bootstrap_env = self.query_env(
+ partial_env=config.get("bootstrap_env"), replace_dict=abs_dirs
+ )
+
+ bootstrap_env["L10NBASEDIR"] = abs_dirs["abs_l10n_dir"]
+ if self.query_is_nightly():
+ # we might set update_channel explicitly
+ if config.get("update_channel"):
+ update_channel = config["update_channel"]
+ else: # Let's just give the generic channel based on branch.
+ update_channel = "nightly-%s" % (config["branch"],)
+ if not isinstance(update_channel, bytes):
+ update_channel = update_channel.encode("utf-8")
+ bootstrap_env["MOZ_UPDATE_CHANNEL"] = update_channel
+ self.info(
+ "Update channel set to: {}".format(bootstrap_env["MOZ_UPDATE_CHANNEL"])
+ )
+ self.bootstrap_env = bootstrap_env
+ return self.bootstrap_env
+
+ def _query_upload_env(self):
+ """returns the environment used for the upload step"""
+ if self.upload_env:
+ return self.upload_env
+ config = self.config
+
+ upload_env = self.query_env(partial_env=config.get("upload_env"))
+ # check if there are any extra option from the platform configuration
+ # and append them to the env
+
+ if "upload_env_extra" in config:
+ for extra in config["upload_env_extra"]:
+ upload_env[extra] = config["upload_env_extra"][extra]
+
+ self.upload_env = upload_env
+ return self.upload_env
+
+ def query_l10n_env(self):
+ l10n_env = self._query_upload_env().copy()
+ l10n_env.update(self.query_bootstrap_env())
+ return l10n_env
+
+ def _query_make_variable(self, variable, make_args=None):
+ """returns the value of make echo-variable-<variable>
+ it accepts extra make arguements (make_args)
+ """
+ dirs = self.query_abs_dirs()
+ make_args = make_args or []
+ target = ["echo-variable-%s" % variable] + make_args
+ cwd = dirs["abs_locales_dir"]
+ raw_output = self._get_output_from_make(
+ target, cwd=cwd, env=self.query_bootstrap_env()
+ )
+ # we want to log all the messages from make
+ output = []
+ for line in raw_output.split("\n"):
+ output.append(line.strip())
+ output = " ".join(output).strip()
+ self.info("echo-variable-%s: %s" % (variable, output))
+ return output
+
+ def _map(self, func, items):
+ """runs func for any item in items, calls the add_failure() for each
+ error. It assumes that function returns 0 when successful.
+ returns a two element tuple with (success_count, total_count)"""
+ success_count = 0
+ total_count = len(items)
+ name = func.__name__
+ for item in items:
+ result = func(item)
+ if result == SUCCESS:
+ # success!
+ success_count += 1
+ else:
+ # func failed...
+ message = "failure: %s(%s)" % (name, item)
+ self.add_failure(item, message)
+ return (success_count, total_count)
+
+ # Actions {{{2
+ def clone_locales(self):
+ self.pull_locale_source()
+
+ def setup(self):
+ """setup step"""
+ self._run_tooltool()
+ self._copy_mozconfig()
+ self._mach_configure()
+ self._run_make_in_config_dir()
+ self.make_wget_en_US()
+ self.make_unpack_en_US()
+
+ def _run_make_in_config_dir(self):
+ """this step creates nsinstall, needed my make_wget_en_US()"""
+ dirs = self.query_abs_dirs()
+ config_dir = os.path.join(dirs["abs_obj_dir"], "config")
+ env = self.query_bootstrap_env()
+ return self._make(target=["export"], cwd=config_dir, env=env)
+
+ def _copy_mozconfig(self):
+ """copies the mozconfig file into abs_src_dir/.mozconfig
+ and logs the content
+ """
+ config = self.config
+ dirs = self.query_abs_dirs()
+ src = get_mozconfig_path(self, config, dirs)
+ dst = os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ self.copyfile(src, dst)
+ self.read_from_file(dst, verbose=True)
+
+ def _mach(self, target, env, halt_on_failure=True, output_parser=None):
+ dirs = self.query_abs_dirs()
+ mach = self._get_mach_executable()
+ return self.run_command(
+ mach + target,
+ halt_on_failure=True,
+ env=env,
+ cwd=dirs["abs_src_dir"],
+ output_parser=None,
+ )
+
+ def _mach_configure(self):
+ """calls mach configure"""
+ env = self.query_bootstrap_env()
+ target = ["configure"]
+ return self._mach(target=target, env=env)
+
+ def _get_mach_executable(self):
+ return [sys.executable, "mach"]
+
+ def _make(
+ self,
+ target,
+ cwd,
+ env,
+ error_list=MakefileErrorList,
+ halt_on_failure=True,
+ output_parser=None,
+ ):
+ """Runs make. Returns the exit code"""
+ make = ["make"]
+ if target:
+ make = make + target
+ return self.run_command(
+ make,
+ cwd=cwd,
+ env=env,
+ error_list=error_list,
+ halt_on_failure=halt_on_failure,
+ output_parser=output_parser,
+ )
+
+ def _get_output_from_make(
+ self, target, cwd, env, halt_on_failure=True, ignore_errors=False
+ ):
+ """runs make and returns the output of the command"""
+ return self.get_output_from_command(
+ ["make"] + target,
+ cwd=cwd,
+ env=env,
+ silent=True,
+ halt_on_failure=halt_on_failure,
+ ignore_errors=ignore_errors,
+ )
+
+ def make_unpack_en_US(self):
+ """wrapper for make unpack"""
+ config = self.config
+ dirs = self.query_abs_dirs()
+ env = self.query_bootstrap_env()
+ cwd = os.path.join(dirs["abs_obj_dir"], config["locales_dir"])
+ return self._make(target=["unpack"], cwd=cwd, env=env)
+
+ def make_wget_en_US(self):
+ """wrapper for make wget-en-US"""
+ env = self.query_bootstrap_env()
+ dirs = self.query_abs_dirs()
+ cwd = dirs["abs_locales_dir"]
+ return self._make(target=["wget-en-US"], cwd=cwd, env=env)
+
+ def make_upload(self, locale):
+ """wrapper for make upload command"""
+ env = self.query_l10n_env()
+ dirs = self.query_abs_dirs()
+ target = ["upload", "AB_CD=%s" % (locale)]
+ cwd = dirs["abs_locales_dir"]
+ parser = MakeUploadOutputParser(config=self.config, log_obj=self.log_obj)
+ retval = self._make(
+ target=target, cwd=cwd, env=env, halt_on_failure=False, output_parser=parser
+ )
+ if retval == SUCCESS:
+ self.info("Upload successful (%s)" % locale)
+ ret = SUCCESS
+ else:
+ self.error("failed to upload %s" % locale)
+ ret = FAILURE
+
+ if ret == FAILURE:
+ # If we failed above, we shouldn't even attempt a SIMPLE_NAME move
+ # even if we are configured to do so
+ return ret
+
+ # XXX Move the files to a SIMPLE_NAME format until we can enable
+ # Simple names in the build system
+ if self.config.get("simple_name_move"):
+ # Assume an UPLOAD PATH
+ upload_target = self.config["upload_env"]["UPLOAD_PATH"]
+ target_path = os.path.join(upload_target, locale)
+ self.mkdir_p(target_path)
+ glob_name = "*.%s.*" % locale
+ matches = (
+ glob.glob(os.path.join(upload_target, glob_name))
+ + glob.glob(os.path.join(upload_target, "update", glob_name))
+ + glob.glob(os.path.join(upload_target, "*", "xpi", glob_name))
+ + glob.glob(os.path.join(upload_target, "install", "sea", glob_name))
+ + glob.glob(os.path.join(upload_target, "setup.exe"))
+ + glob.glob(os.path.join(upload_target, "setup-stub.exe"))
+ )
+ targets_exts = [
+ "tar.bz2",
+ "dmg",
+ "langpack.xpi",
+ "checksums",
+ "zip",
+ "installer.exe",
+ "installer-stub.exe",
+ ]
+ targets = [(".%s" % (ext,), "target.%s" % (ext,)) for ext in targets_exts]
+ targets.extend([(f, f) for f in ("setup.exe", "setup-stub.exe")])
+ for f in matches:
+ possible_targets = [
+ (tail, target_file)
+ for (tail, target_file) in targets
+ if f.endswith(tail)
+ ]
+ if len(possible_targets) == 1:
+ _, target_file = possible_targets[0]
+ # Remove from list of available options for this locale
+ targets.remove(possible_targets[0])
+ else:
+ # wasn't valid (or already matched)
+ raise RuntimeError(
+ "Unexpected matching file name encountered: %s" % f
+ )
+ self.move(os.path.join(f), os.path.join(target_path, target_file))
+ self.log("Converted uploads for %s to simple names" % locale)
+ return ret
+
+ def set_upload_files(self, locale):
+ # The tree doesn't have a good way of exporting the list of files
+ # created during locale generation, but we can grab them by echoing the
+ # UPLOAD_FILES variable for each locale.
+ env = self.query_l10n_env()
+ target = [
+ "echo-variable-UPLOAD_FILES",
+ "echo-variable-CHECKSUM_FILES",
+ "AB_CD=%s" % locale,
+ ]
+ dirs = self.query_abs_dirs()
+ cwd = dirs["abs_locales_dir"]
+ # Bug 1242771 - echo-variable-UPLOAD_FILES via mozharness fails when stderr is found
+ # we should ignore stderr as unfortunately it's expected when parsing for values
+ output = self._get_output_from_make(
+ target=target, cwd=cwd, env=env, ignore_errors=True
+ )
+ self.info('UPLOAD_FILES is "%s"' % output)
+ files = shlex.split(output)
+ if not files:
+ self.error("failed to get upload file list for locale %s" % locale)
+ return FAILURE
+
+ self.upload_files[locale] = [
+ os.path.abspath(os.path.join(cwd, f)) for f in files
+ ]
+ return SUCCESS
+
+ def make_installers(self, locale):
+ """wrapper for make installers-(locale)"""
+ env = self.query_l10n_env()
+ env["PYTHONIOENCODING"] = "utf-8"
+ self._copy_mozconfig()
+ dirs = self.query_abs_dirs()
+ cwd = os.path.join(dirs["abs_locales_dir"])
+ target = [
+ "installers-%s" % locale,
+ ]
+ return self._make(target=target, cwd=cwd, env=env, halt_on_failure=False)
+
+ def repack_locale(self, locale):
+ """wraps the logic for make installers and generating
+ complete updates."""
+
+ # run make installers
+ if self.make_installers(locale) != SUCCESS:
+ self.error("make installers-%s failed" % (locale))
+ return FAILURE
+
+ # now try to upload the artifacts
+ if self.make_upload(locale):
+ self.error("make upload for locale %s failed!" % (locale))
+ return FAILURE
+
+ # set_upload_files() should be called after make upload, to make sure
+ # we have all files in place (checksums, etc)
+ if self.set_upload_files(locale):
+ self.error("failed to get list of files to upload for locale %s" % locale)
+ return FAILURE
+
+ return SUCCESS
+
+ def repack(self):
+ """creates the repacks and udpates"""
+ self._map(self.repack_locale, self.query_locales())
+
+ def _run_tooltool(self):
+ env = self.query_bootstrap_env()
+ config = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = config.get("tooltool_manifest_src")
+ if not manifest_src:
+ return
+ python = sys.executable
+
+ cmd = [
+ python,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = config["bootstrap_env"].get("TOOLTOOL_CACHE")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True, env=env)
+
+
+# main {{{
+if __name__ == "__main__":
+ single_locale = DesktopSingleLocale()
+ single_locale.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_partner_repacks.py b/testing/mozharness/scripts/desktop_partner_repacks.py
new file mode 100755
index 0000000000..4f20663c73
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_partner_repacks.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_partner_repacks.py
+
+This script manages Desktop partner repacks for beta/release builds.
+"""
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.log import FATAL
+from mozharness.base.python import VirtualenvMixin
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import AutomationMixin
+from mozharness.mozilla.secrets import SecretsMixin
+
+
+# DesktopPartnerRepacks {{{1
+class DesktopPartnerRepacks(AutomationMixin, BaseScript, VirtualenvMixin, SecretsMixin):
+ """Manages desktop partner repacks"""
+
+ actions = [
+ "get-secrets",
+ "setup",
+ "repack",
+ "summary",
+ ]
+ config_options = [
+ [
+ ["--version", "-v"],
+ {
+ "dest": "version",
+ "help": "Version of Firefox to repack",
+ },
+ ],
+ [
+ ["--build-number", "-n"],
+ {
+ "dest": "build_number",
+ "help": "Build number of Firefox to repack",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "dest": "platform",
+ "help": "Platform to repack (e.g. linux64, macosx64, ...)",
+ },
+ ],
+ [
+ ["--partner", "-p"],
+ {
+ "dest": "partner",
+ "help": "Limit repackaging to partners matching this string",
+ },
+ ],
+ [
+ ["--taskid", "-t"],
+ {
+ "dest": "taskIds",
+ "action": "extend",
+ "help": "taskId(s) of upstream tasks for vanilla Firefox artifacts",
+ },
+ ],
+ [
+ ["--limit-locale", "-l"],
+ {
+ "dest": "limitLocales",
+ "action": "append",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ # fxbuild style:
+ buildscript_kwargs = {
+ "all_actions": DesktopPartnerRepacks.actions,
+ "default_actions": DesktopPartnerRepacks.actions,
+ "config": {
+ "log_name": "partner-repacks",
+ "hashType": "sha512",
+ "workdir": "partner-repacks",
+ },
+ }
+ #
+
+ BaseScript.__init__(
+ self, config_options=self.config_options, **buildscript_kwargs
+ )
+
+ def _pre_config_lock(self, rw_config):
+ if os.getenv("REPACK_MANIFESTS_URL"):
+ self.info(
+ "Overriding repack_manifests_url to %s"
+ % os.getenv("REPACK_MANIFESTS_URL")
+ )
+ self.config["repack_manifests_url"] = os.getenv("REPACK_MANIFESTS_URL")
+ if os.getenv("UPSTREAM_TASKIDS"):
+ self.info("Overriding taskIds with %s" % os.getenv("UPSTREAM_TASKIDS"))
+ self.config["taskIds"] = os.getenv("UPSTREAM_TASKIDS").split()
+
+ if "version" not in self.config:
+ self.fatal("Version (-v) not supplied.")
+ if "build_number" not in self.config:
+ self.fatal("Build number (-n) not supplied.")
+ if "repo_file" not in self.config:
+ self.fatal("repo_file not supplied.")
+ if "repack_manifests_url" not in self.config:
+ self.fatal(
+ "repack_manifests_url not supplied in config or via REPACK_MANIFESTS_URL"
+ )
+ if "taskIds" not in self.config:
+ self.fatal("Need upstream taskIds from command line or in UPSTREAM_TASKIDS")
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(DesktopPartnerRepacks, self).query_abs_dirs()
+ for directory in abs_dirs:
+ value = abs_dirs[directory]
+ abs_dirs[directory] = value
+ dirs = {}
+ dirs["abs_repo_dir"] = os.path.join(abs_dirs["abs_work_dir"], ".repo")
+ dirs["abs_partners_dir"] = os.path.join(abs_dirs["abs_work_dir"], "partners")
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # Actions {{{
+ def _repo_cleanup(self):
+ self.rmtree(self.query_abs_dirs()["abs_repo_dir"])
+ self.rmtree(self.query_abs_dirs()["abs_partners_dir"])
+
+ def _repo_init(self, repo):
+ partial_env = {
+ "GIT_SSH_COMMAND": "ssh -oIdentityFile={}".format(self.config["ssh_key"])
+ }
+ status = self.run_command(
+ [
+ repo,
+ "init",
+ "--no-repo-verify",
+ "-u",
+ self.config["repack_manifests_url"],
+ ],
+ cwd=self.query_abs_dirs()["abs_work_dir"],
+ partial_env=partial_env,
+ )
+ if status:
+ return status
+ return self.run_command(
+ [repo, "sync", "--current-branch", "--no-tags"],
+ cwd=self.query_abs_dirs()["abs_work_dir"],
+ partial_env=partial_env,
+ )
+
+ def setup(self):
+ """setup step"""
+ repo = self.download_file(
+ self.config["repo_file"],
+ file_name="repo",
+ parent_dir=self.query_abs_dirs()["abs_work_dir"],
+ error_level=FATAL,
+ )
+ if not os.path.exists(repo):
+ self.fatal("Unable to download repo tool.")
+ self.chmod(repo, 0o755)
+ self.retry(
+ self._repo_init,
+ args=(repo,),
+ error_level=FATAL,
+ cleanup=self._repo_cleanup(),
+ good_statuses=[0],
+ sleeptime=5,
+ )
+
+ def repack(self):
+ """creates the repacks"""
+ repack_cmd = [
+ "./mach",
+ "python",
+ "python/mozrelease/mozrelease/partner_repack.py",
+ "-v",
+ self.config["version"],
+ "-n",
+ str(self.config["build_number"]),
+ ]
+ if self.config.get("platform"):
+ repack_cmd.extend(["--platform", self.config["platform"]])
+ if self.config.get("partner"):
+ repack_cmd.extend(["--partner", self.config["partner"]])
+ if self.config.get("taskIds"):
+ for taskId in self.config["taskIds"]:
+ repack_cmd.extend(["--taskid", taskId])
+ if self.config.get("limitLocales"):
+ for locale in self.config["limitLocales"]:
+ repack_cmd.extend(["--limit-locale", locale])
+
+ self.run_command(repack_cmd, cwd=os.environ["GECKO_PATH"], halt_on_failure=True)
+
+
+# main {{{
+if __name__ == "__main__":
+ partner_repacks = DesktopPartnerRepacks()
+ partner_repacks.run_and_exit()
diff --git a/testing/mozharness/scripts/desktop_unittest.py b/testing/mozharness/scripts/desktop_unittest.py
new file mode 100755
index 0000000000..5af7506c6b
--- /dev/null
+++ b/testing/mozharness/scripts/desktop_unittest.py
@@ -0,0 +1,1316 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""desktop_unittest.py
+
+author: Jordan Lund
+"""
+
+import copy
+import glob
+import imp
+import json
+import multiprocessing
+import os
+import re
+import shutil
+import sys
+from datetime import datetime, timedelta
+
+# load modules from parent dir
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(1, os.path.dirname(here))
+
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import INFO, WARNING
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import TBPL_EXCEPTION, TBPL_RETRY
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
+
+SUITE_CATEGORIES = [
+ "gtest",
+ "cppunittest",
+ "jittest",
+ "mochitest",
+ "reftest",
+ "xpcshell",
+]
+SUITE_DEFAULT_E10S = ["mochitest", "reftest"]
+SUITE_NO_E10S = ["xpcshell"]
+SUITE_REPEATABLE = ["mochitest", "reftest"]
+
+
+# DesktopUnittest {{{1
+class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ [
+ "--mochitest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_mochitest_suites",
+ "type": "string",
+ "help": "Specify which mochi suite to run. "
+ "Suites are defined in the config file.\n"
+ "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'",
+ },
+ ],
+ [
+ [
+ "--reftest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_reftest_suites",
+ "type": "string",
+ "help": "Specify which reftest suite to run. "
+ "Suites are defined in the config file.\n"
+ "Examples: 'all', 'crashplan', or 'jsreftest'",
+ },
+ ],
+ [
+ [
+ "--xpcshell-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_xpcshell_suites",
+ "type": "string",
+ "help": "Specify which xpcshell suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'xpcshell'",
+ },
+ ],
+ [
+ [
+ "--cppunittest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_cppunittest_suites",
+ "type": "string",
+ "help": "Specify which cpp unittest suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'cppunittest'",
+ },
+ ],
+ [
+ [
+ "--gtest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_gtest_suites",
+ "type": "string",
+ "help": "Specify which gtest suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'gtest'",
+ },
+ ],
+ [
+ [
+ "--jittest-suite",
+ ],
+ {
+ "action": "extend",
+ "dest": "specified_jittest_suites",
+ "type": "string",
+ "help": "Specify which jit-test suite to run. "
+ "Suites are defined in the config file\n."
+ "Examples: 'jittest'",
+ },
+ ],
+ [
+ [
+ "--run-all-suites",
+ ],
+ {
+ "action": "store_true",
+ "dest": "run_all_suites",
+ "default": False,
+ "help": "This will run all suites that are specified "
+ "in the config file. You do not need to specify "
+ "any other suites.\nBeware, this may take a while ;)",
+ },
+ ],
+ [
+ [
+ "--disable-e10s",
+ ],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run tests without multiple processes (e10s).",
+ },
+ ],
+ [
+ [
+ "--headless",
+ ],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ [
+ "--no-random",
+ ],
+ {
+ "action": "store_true",
+ "dest": "no_random",
+ "default": False,
+ "help": "Run tests with no random intermittents and bisect in case of real failure.", # NOQA: E501
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use "
+ "the GL compositor.",
+ },
+ ],
+ [
+ ["--threads"],
+ {
+ "action": "store",
+ "dest": "threads",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--gpu-required"],
+ {
+ "action": "store_true",
+ "dest": "gpu_required",
+ "default": False,
+ "help": "Run additional verification on modified tests using gpu instances.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Defines an extra user preference.",
+ },
+ ],
+ [
+ [
+ "--repeat",
+ ],
+ {
+ "action": "store",
+ "type": "int",
+ "dest": "repeat",
+ "default": 0,
+ "help": "Repeat the tests the given number of times. Supported "
+ "by mochitest, reftest, crashtest, ignored otherwise.",
+ },
+ ],
+ [
+ ["--enable-xorigin-tests"],
+ {
+ "action": "store_true",
+ "dest": "enable_xorigin_tests",
+ "default": False,
+ "help": "Run tests in a cross origin iframe.",
+ },
+ ],
+ [
+ ["--enable-a11y-checks"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "a11y_checks",
+ "help": "Run tests with accessibility checks disabled.",
+ },
+ ],
+ [
+ ["--run-failures"],
+ {
+ "action": "store",
+ "default": "",
+ "type": "string",
+ "dest": "run_failures",
+ "help": "Run only failures matching keyword. "
+ "Examples: 'apple_silicon'",
+ },
+ ],
+ [
+ ["--crash-as-pass"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "crash_as_pass",
+ "help": "treat harness level crash as a pass",
+ },
+ ],
+ [
+ ["--timeout-as-pass"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "timeout_as_pass",
+ "help": "treat harness level timeout as a pass",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "disable_fission",
+ "help": "do not run tests with fission enabled.",
+ },
+ ],
+ [
+ ["--conditioned-profile"],
+ {
+ "action": "store_true",
+ "default": False,
+ "dest": "conditioned_profile",
+ "help": "run tests with a conditioned profile",
+ },
+ ],
+ [
+ ["--tag"],
+ {
+ "action": "append",
+ "default": [],
+ "dest": "test_tags",
+ "help": "Filter out tests that don't have the given tag. Can be used multiple "
+ "times in which case the test must contain at least one of the given tags.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=True):
+ # abs_dirs defined already in BaseScript but is here to make pylint happy
+ self.abs_dirs = None
+ super(DesktopUnittest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "start-pulseaudio",
+ "install",
+ "stage-files",
+ "run-tests",
+ "uninstall",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ c = self.config
+ self.global_test_options = []
+ self.installer_url = c.get("installer_url")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.symbols_url = c.get("symbols_url")
+ # this is so mozinstall in install() doesn't bug out if we don't run
+ # the download_and_extract action
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.abs_app_dir = None
+ self.abs_res_dir = None
+
+ # Construct an identifier to be used to identify Perfherder data
+ # for resource monitoring recording. This attempts to uniquely
+ # identify this test invocation configuration.
+ perfherder_parts = []
+ perfherder_options = []
+ suites = (
+ ("specified_mochitest_suites", "mochitest"),
+ ("specified_reftest_suites", "reftest"),
+ ("specified_xpcshell_suites", "xpcshell"),
+ ("specified_cppunittest_suites", "cppunit"),
+ ("specified_gtest_suites", "gtest"),
+ ("specified_jittest_suites", "jittest"),
+ )
+ for s, prefix in suites:
+ if s in c:
+ perfherder_parts.append(prefix)
+ perfherder_parts.extend(c[s])
+
+ if "this_chunk" in c:
+ perfherder_parts.append(c["this_chunk"])
+
+ if c["e10s"]:
+ perfherder_options.append("e10s")
+
+ self.resource_monitor_perfherder_id = (
+ ".".join(perfherder_parts),
+ perfherder_options,
+ )
+
+ # helper methods {{{2
+ def _pre_config_lock(self, rw_config):
+ super(DesktopUnittest, self)._pre_config_lock(rw_config)
+ c = self.config
+ if not c.get("run_all_suites"):
+ return # configs are valid
+ for category in SUITE_CATEGORIES:
+ specific_suites = c.get("specified_%s_suites" % (category))
+ if specific_suites:
+ if specific_suites != "all":
+ self.fatal(
+ "Config options are not valid. Please ensure"
+ " that if the '--run-all-suites' flag was enabled,"
+ " then do not specify to run only specific suites "
+ "like:\n '--mochitest-suite browser-chrome'"
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(DesktopUnittest, self).query_abs_dirs()
+
+ c = self.config
+ dirs = {}
+ dirs["abs_work_dir"] = abs_dirs["abs_work_dir"]
+ dirs["abs_app_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "application"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_extensions_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "extensions"
+ )
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+ dirs["abs_test_bin_plugins_dir"] = os.path.join(
+ dirs["abs_test_bin_dir"], "plugins"
+ )
+ dirs["abs_test_bin_components_dir"] = os.path.join(
+ dirs["abs_test_bin_dir"], "components"
+ )
+ dirs["abs_mochitest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "mochitest"
+ )
+ dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest")
+ dirs["abs_xpcshell_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "xpcshell"
+ )
+ dirs["abs_cppunittest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "cppunittest"
+ )
+ dirs["abs_gtest_dir"] = os.path.join(dirs["abs_test_install_dir"], "gtest")
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_jittest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "jit-test", "jit-test"
+ )
+
+ if os.path.isabs(c["virtualenv_path"]):
+ dirs["abs_virtualenv_dir"] = c["virtualenv_path"]
+ else:
+ dirs["abs_virtualenv_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], c["virtualenv_path"]
+ )
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def query_abs_app_dir(self):
+ """We can't set this in advance, because OSX install directories
+ change depending on branding and opt/debug.
+ """
+ if self.abs_app_dir:
+ return self.abs_app_dir
+ if not self.binary_path:
+ self.fatal("Can't determine abs_app_dir (binary_path not set!)")
+ self.abs_app_dir = os.path.dirname(self.binary_path)
+ return self.abs_app_dir
+
+ def query_abs_res_dir(self):
+ """The directory containing resources like plugins and extensions. On
+ OSX this is Contents/Resources, on all other platforms its the same as
+ the app dir.
+
+ As with the app dir, we can't set this in advance, because OSX install
+ directories change depending on branding and opt/debug.
+ """
+ if self.abs_res_dir:
+ return self.abs_res_dir
+
+ abs_app_dir = self.query_abs_app_dir()
+ if self._is_darwin():
+ res_subdir = self.config.get("mac_res_subdir", "Resources")
+ self.abs_res_dir = os.path.join(os.path.dirname(abs_app_dir), res_subdir)
+ else:
+ self.abs_res_dir = abs_app_dir
+ return self.abs_res_dir
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ self.register_virtualenv_module(name="mock")
+ self.register_virtualenv_module(name="simplejson")
+
+ requirements_files = [
+ os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+ ]
+
+ if self._query_specified_suites("mochitest", "mochitest-media") is not None:
+ # mochitest-media is the only thing that needs this
+ requirements_files.append(
+ os.path.join(
+ dirs["abs_mochitest_dir"],
+ "websocketprocessbridge",
+ "websocketprocessbridge_requirements_3.txt",
+ )
+ )
+
+ for requirements_file in requirements_files:
+ self.register_virtualenv_module(
+ requirements=[requirements_file], two_pass=True
+ )
+
+ _python_interp = self.query_exe("python")
+ if "win" in self.platform_name() and os.path.exists(_python_interp):
+ multiprocessing.set_executable(_python_interp)
+
+ def _query_symbols_url(self):
+ """query the full symbols URL based upon binary URL"""
+ # may break with name convention changes but is one less 'input' for script
+ if self.symbols_url:
+ return self.symbols_url
+
+ # Use simple text substitution to determine the symbols_url from the
+ # installer_url. This will not always work: For signed builds, the
+ # installer_url is likely an artifact in a signing task, which may not
+ # have a symbols artifact. It might be better to use the test target
+ # preferentially, like query_prefixed_build_dir_url() does (for future
+ # consideration, if this code proves troublesome).
+ symbols_url = None
+ self.info("finding symbols_url based upon self.installer_url")
+ if self.installer_url:
+ for ext in [".zip", ".dmg", ".tar.bz2"]:
+ if ext in self.installer_url:
+ symbols_url = self.installer_url.replace(
+ ext, ".crashreporter-symbols.zip"
+ )
+ if not symbols_url:
+ self.fatal(
+ "self.installer_url was found but symbols_url could \
+ not be determined"
+ )
+ else:
+ self.fatal("self.installer_url was not found in self.config")
+ self.info("setting symbols_url as %s" % (symbols_url))
+ self.symbols_url = symbols_url
+ return self.symbols_url
+
+ def _get_mozharness_test_paths(self, suite_category, suite):
+ test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ if "-coverage" in suite:
+ suite = suite[: suite.index("-coverage")]
+
+ if not test_paths or suite not in test_paths:
+ return None
+
+ suite_test_paths = test_paths[suite]
+
+ if suite_category == "reftest":
+ dirs = self.query_abs_dirs()
+ suite_test_paths = [
+ os.path.join(dirs["abs_reftest_dir"], "tests", p)
+ for p in suite_test_paths
+ ]
+
+ return suite_test_paths
+
+ def _query_abs_base_cmd(self, suite_category, suite):
+ if self.binary_path:
+ c = self.config
+ dirs = self.query_abs_dirs()
+ run_file = c["run_file_names"][suite_category]
+ base_cmd = [self.query_python_path("python"), "-u"]
+ base_cmd.append(os.path.join(dirs["abs_%s_dir" % suite_category], run_file))
+ abs_app_dir = self.query_abs_app_dir()
+ abs_res_dir = self.query_abs_res_dir()
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], suite
+ )
+
+ str_format_values = {
+ "binary_path": self.binary_path,
+ "symbols_path": self._query_symbols_url(),
+ "abs_work_dir": dirs["abs_work_dir"],
+ "abs_app_dir": abs_app_dir,
+ "abs_res_dir": abs_res_dir,
+ "raw_log_file": raw_log_file,
+ "error_summary_file": error_summary_file,
+ "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"),
+ }
+
+ # TestingMixin._download_and_extract_symbols() will set
+ # self.symbols_path when downloading/extracting.
+ if self.symbols_path:
+ str_format_values["symbols_path"] = self.symbols_path
+
+ if suite_category not in SUITE_NO_E10S:
+ if suite_category in SUITE_DEFAULT_E10S and not c["e10s"]:
+ base_cmd.append("--disable-e10s")
+ elif suite_category not in SUITE_DEFAULT_E10S and c["e10s"]:
+ base_cmd.append("--e10s")
+ if c.get("repeat"):
+ if suite_category in SUITE_REPEATABLE:
+ base_cmd.extend(["--repeat=%s" % c.get("repeat")])
+ else:
+ self.log(
+ "--repeat not supported in {}".format(suite_category),
+ level=WARNING,
+ )
+
+ # do not add --disable fission if we don't have --disable-e10s
+ if c["disable_fission"] and suite_category not in [
+ "gtest",
+ "cppunittest",
+ "jittest",
+ ]:
+ base_cmd.append("--disable-fission")
+
+ # Ignore chunking if we have user specified test paths
+ if not (self.verify_enabled or self.per_test_coverage):
+ test_paths = self._get_mozharness_test_paths(suite_category, suite)
+ if test_paths:
+ base_cmd.extend(test_paths)
+ elif c.get("total_chunks") and c.get("this_chunk"):
+ base_cmd.extend(
+ [
+ "--total-chunks",
+ c["total_chunks"],
+ "--this-chunk",
+ c["this_chunk"],
+ ]
+ )
+
+ if c["no_random"]:
+ if suite_category == "mochitest":
+ base_cmd.append("--bisect-chunk=default")
+ else:
+ self.warning(
+ "--no-random does not currently work with suites other than "
+ "mochitest."
+ )
+
+ if c["headless"]:
+ base_cmd.append("--headless")
+
+ if c.get("threads"):
+ base_cmd.extend(["--threads", c["threads"]])
+
+ if c["enable_xorigin_tests"]:
+ base_cmd.append("--enable-xorigin-tests")
+
+ if c["extra_prefs"]:
+ base_cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])
+
+ if c["a11y_checks"]:
+ base_cmd.append("--enable-a11y-checks")
+
+ if c["run_failures"]:
+ base_cmd.extend(["--run-failures={}".format(c["run_failures"])])
+
+ if c["timeout_as_pass"]:
+ base_cmd.append("--timeout-as-pass")
+
+ if c["crash_as_pass"]:
+ base_cmd.append("--crash-as-pass")
+
+ if c["conditioned_profile"]:
+ base_cmd.append("--conditioned-profile")
+
+ # Ensure the --tag flag and its params get passed along
+ if c["test_tags"]:
+ base_cmd.extend(["--tag={}".format(t) for t in c["test_tags"]])
+
+ if suite_category not in c["suite_definitions"]:
+ self.fatal("'%s' not defined in the config!")
+
+ if suite in (
+ "browser-chrome-coverage",
+ "xpcshell-coverage",
+ "mochitest-devtools-chrome-coverage",
+ "plain-coverage",
+ ):
+ base_cmd.append("--jscov-dir-prefix=%s" % dirs["abs_blob_upload_dir"])
+
+ options = c["suite_definitions"][suite_category]["options"]
+ if options:
+ for option in options:
+ option = option % str_format_values
+ if not option.endswith("None"):
+ base_cmd.append(option)
+ if self.structured_output(
+ suite_category, self._query_try_flavor(suite_category, suite)
+ ):
+ base_cmd.append("--log-raw=-")
+ return base_cmd
+ else:
+ self.warning(
+ "Suite options for %s could not be determined."
+ "\nIf you meant to have options for this suite, "
+ "please make sure they are specified in your "
+ "config under %s_options" % (suite_category, suite_category)
+ )
+
+ return base_cmd
+ else:
+ self.fatal(
+ "'binary_path' could not be determined.\n This should "
+ "be like '/path/build/application/firefox/firefox'"
+ "\nIf you are running this script without the 'install' "
+ "action (where binary_path is set), please ensure you are"
+ " either:\n(1) specifying it in the config file under "
+ "binary_path\n(2) specifying it on command line with the"
+ " '--binary-path' flag"
+ )
+
+ def _query_specified_suites(self, category, sub_category=None):
+ """Checks if the provided suite does indeed exist.
+
+ If at least one suite was given and if it does exist, return the suite
+ as legitimate and line it up for execution.
+
+ Otherwise, do not run any suites and return a fatal error.
+ """
+ c = self.config
+ all_suites = c.get("all_{}_suites".format(category), None)
+ specified_suites = c.get("specified_{}_suites".format(category), None)
+
+ # Bug 1603842 - disallow selection of more than 1 suite at at time
+ if specified_suites is None:
+ # Path taken by test-verify
+ return self.query_per_test_category_suites(category, all_suites)
+ if specified_suites and len(specified_suites) > 1:
+ self.fatal(
+ """Selection of multiple suites is not permitted. \
+ Please select at most 1 test suite."""
+ )
+ return
+
+ # Normal path taken by most test suites as only one suite is specified
+ suite = specified_suites[0]
+ if suite not in all_suites:
+ self.fatal("""Selected suite does not exist!""")
+
+ # allow for fine grain suite selection
+ ret_val = all_suites[suite]
+ if sub_category in all_suites:
+ if all_suites[sub_category] != ret_val:
+ return None
+
+ return {suite: ret_val}
+
+ def _query_try_flavor(self, category, suite):
+ flavors = {
+ "mochitest": [
+ ("plain.*", "mochitest"),
+ ("browser-chrome.*", "browser-chrome"),
+ ("mochitest-browser-a11y.*", "browser-a11y"),
+ ("mochitest-browser-media.*", "browser-media"),
+ ("mochitest-devtools-chrome.*", "devtools-chrome"),
+ ("chrome", "chrome"),
+ ],
+ "xpcshell": [("xpcshell", "xpcshell")],
+ "reftest": [("reftest", "reftest"), ("crashtest", "crashtest")],
+ }
+ for suite_pattern, flavor in flavors.get(category, []):
+ if re.compile(suite_pattern).match(suite):
+ return flavor
+
+ def structured_output(self, suite_category, flavor=None):
+ unstructured_flavors = self.config.get("unstructured_flavors")
+ if not unstructured_flavors:
+ return True
+ if suite_category not in unstructured_flavors:
+ return True
+ if not unstructured_flavors.get(
+ suite_category
+ ) or flavor in unstructured_flavors.get(suite_category):
+ return False
+ return True
+
+ def get_test_output_parser(
+ self, suite_category, flavor=None, strict=False, **kwargs
+ ):
+ if not self.structured_output(suite_category, flavor):
+ return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
+ self.info("Structured output parser in use for %s." % suite_category)
+ return StructuredOutputParser(
+ suite_category=suite_category, strict=strict, **kwargs
+ )
+
+ # Actions {{{2
+
+ # clobber defined in BaseScript, deletes mozharness/build if exists
+ # preflight_download_and_extract is in TestingMixin.
+ # create_virtualenv is in VirtualenvMixin.
+ # preflight_install is in TestingMixin.
+ # install is in TestingMixin.
+
+ @PreScriptAction("download-and-extract")
+ def _pre_download_and_extract(self, action):
+ """Abort if --artifact try syntax is used with compiled-code tests"""
+ dir = self.query_abs_dirs()["abs_blob_upload_dir"]
+ self.mkdir_p(dir)
+
+ if not self.try_message_has_flag("artifact"):
+ return
+ self.info("Artifact build requested in try syntax.")
+ rejected = []
+ compiled_code_suites = [
+ "cppunit",
+ "gtest",
+ "jittest",
+ ]
+ for category in SUITE_CATEGORIES:
+ suites = self._query_specified_suites(category) or []
+ for suite in suites:
+ if any([suite.startswith(c) for c in compiled_code_suites]):
+ rejected.append(suite)
+ break
+ if rejected:
+ self.record_status(TBPL_EXCEPTION)
+ self.fatal(
+ "There are specified suites that are incompatible with "
+ "--artifact try syntax flag: {}".format(", ".join(rejected)),
+ exit_code=self.return_code,
+ )
+
+ def download_and_extract(self):
+ """
+ download and extract test zip / download installer
+ optimizes which subfolders to extract from tests archive
+ """
+ c = self.config
+
+ extract_dirs = None
+
+ if c.get("run_all_suites"):
+ target_categories = SUITE_CATEGORIES
+ else:
+ target_categories = [
+ cat
+ for cat in SUITE_CATEGORIES
+ if self._query_specified_suites(cat) is not None
+ ]
+ super(DesktopUnittest, self).download_and_extract(
+ extract_dirs=extract_dirs, suite_categories=target_categories
+ )
+
+ def start_pulseaudio(self):
+ command = []
+ # Implies that underlying system is Linux.
+ if os.environ.get("NEED_PULSEAUDIO") == "true":
+ command.extend(
+ [
+ "pulseaudio",
+ "--daemonize",
+ "--log-level=4",
+ "--log-time=1",
+ "-vvvvv",
+ "--exit-idle-time=-1",
+ ]
+ )
+
+ # Only run the initialization for Debian.
+ # Ubuntu appears to have an alternate method of starting pulseaudio.
+ if self._is_debian():
+ self._kill_named_proc("pulseaudio")
+ self.run_command(command)
+
+ # All Linux systems need module-null-sink to be loaded, otherwise
+ # media tests fail.
+ self.run_command("pactl load-module module-null-sink")
+ self.run_command("pactl list modules short")
+
+ def stage_files(self):
+ for category in SUITE_CATEGORIES:
+ suites = self._query_specified_suites(category)
+ stage = getattr(self, "_stage_{}".format(category), None)
+ if suites and stage:
+ stage(suites)
+
+ def _stage_files(self, bin_name=None, fail_if_not_exists=True):
+ dirs = self.query_abs_dirs()
+ abs_app_dir = self.query_abs_app_dir()
+
+ # For mac these directories are in Contents/Resources, on other
+ # platforms abs_res_dir will point to abs_app_dir.
+ abs_res_dir = self.query_abs_res_dir()
+ abs_res_components_dir = os.path.join(abs_res_dir, "components")
+ abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins")
+ abs_res_extensions_dir = os.path.join(abs_res_dir, "extensions")
+
+ if bin_name:
+ src = os.path.join(dirs["abs_test_bin_dir"], bin_name)
+ if os.path.exists(src):
+ self.info(
+ "copying %s to %s" % (src, os.path.join(abs_app_dir, bin_name))
+ )
+ shutil.copy2(src, os.path.join(abs_app_dir, bin_name))
+ elif fail_if_not_exists:
+ raise OSError("File %s not found" % src)
+ self.copytree(
+ dirs["abs_test_bin_components_dir"],
+ abs_res_components_dir,
+ overwrite="overwrite_if_exists",
+ )
+ self.mkdir_p(abs_res_plugins_dir)
+ self.copytree(
+ dirs["abs_test_bin_plugins_dir"],
+ abs_res_plugins_dir,
+ overwrite="overwrite_if_exists",
+ )
+ if os.path.isdir(dirs["abs_test_extensions_dir"]):
+ self.mkdir_p(abs_res_extensions_dir)
+ self.copytree(
+ dirs["abs_test_extensions_dir"],
+ abs_res_extensions_dir,
+ overwrite="overwrite_if_exists",
+ )
+
+ def _stage_xpcshell(self, suites):
+ if "WindowsApps" in self.binary_path:
+ self.log(
+ "Skipping stage xpcshell for MSIX tests because we cannot copy files into the installation directory."
+ )
+ return
+
+ self._stage_files(self.config["xpcshell_name"])
+ # http3server isn't built for Windows tests or Linux asan/tsan
+ # builds. Only stage if the `http3server_name` config is set and if
+ # the file actually exists.
+ if self.config.get("http3server_name"):
+ self._stage_files(self.config["http3server_name"], fail_if_not_exists=False)
+
+ def _stage_cppunittest(self, suites):
+ abs_res_dir = self.query_abs_res_dir()
+ dirs = self.query_abs_dirs()
+ abs_cppunittest_dir = dirs["abs_cppunittest_dir"]
+
+ # move manifest and js fils to resources dir, where tests expect them
+ files = glob.glob(os.path.join(abs_cppunittest_dir, "*.js"))
+ files.extend(glob.glob(os.path.join(abs_cppunittest_dir, "*.manifest")))
+ for f in files:
+ self.move(f, abs_res_dir)
+
+ def _stage_gtest(self, suites):
+ abs_res_dir = self.query_abs_res_dir()
+ abs_app_dir = self.query_abs_app_dir()
+ dirs = self.query_abs_dirs()
+ abs_gtest_dir = dirs["abs_gtest_dir"]
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+
+ files = glob.glob(os.path.join(dirs["abs_test_bin_plugins_dir"], "gmp-*"))
+ files.append(os.path.join(abs_gtest_dir, "dependentlibs.list.gtest"))
+ for f in files:
+ self.move(f, abs_res_dir)
+
+ self.copytree(
+ os.path.join(abs_gtest_dir, "gtest_bin"), os.path.join(abs_app_dir)
+ )
+
+ def _kill_proc_tree(self, pid):
+ # Kill a process tree (including grandchildren) with signal.SIGTERM
+ try:
+ import signal
+
+ import psutil
+
+ if pid == os.getpid():
+ return (None, None)
+
+ parent = psutil.Process(pid)
+ children = parent.children(recursive=True)
+ children.append(parent)
+
+ for p in children:
+ p.send_signal(signal.SIGTERM)
+
+ # allow for 60 seconds to kill procs
+ timeout = 60
+ gone, alive = psutil.wait_procs(children, timeout=timeout)
+ for p in gone:
+ self.info("psutil found pid %s dead" % p.pid)
+ for p in alive:
+ self.error("failed to kill pid %d after %d" % (p.pid, timeout))
+
+ return (gone, alive)
+ except Exception as e:
+ self.error("Exception while trying to kill process tree: %s" % str(e))
+
+ def _kill_named_proc(self, pname):
+ try:
+ import psutil
+ except Exception as e:
+ self.info(
+ "Error importing psutil, not killing process %s: %s" % pname, str(e)
+ )
+ return
+
+ for proc in psutil.process_iter():
+ try:
+ if proc.name() == pname:
+ procd = proc.as_dict(attrs=["pid", "ppid", "name", "username"])
+ self.info("in _kill_named_proc, killing %s" % procd)
+ self._kill_proc_tree(proc.pid)
+ except Exception as e:
+ self.info("Warning: Unable to kill process %s: %s" % (pname, str(e)))
+ # may not be able to access process info for all processes
+ continue
+
+ def _remove_xen_clipboard(self):
+ """
+ When running on a Windows 7 VM, we have XenDPriv.exe running which
+ interferes with the clipboard, lets terminate this process and remove
+ the binary so it doesn't restart
+ """
+ if not self._is_windows():
+ return
+
+ self._kill_named_proc("XenDPriv.exe")
+ xenpath = os.path.join(
+ os.environ["ProgramFiles"], "Citrix", "XenTools", "XenDPriv.exe"
+ )
+ try:
+ if os.path.isfile(xenpath):
+ os.remove(xenpath)
+ except Exception as e:
+ self.error("Error: Failure to remove file %s: %s" % (xenpath, str(e)))
+
+ def _report_system_info(self):
+ """
+ Create the system-info.log artifact file, containing a variety of
+ system information that might be useful in diagnosing test failures.
+ """
+ try:
+ import psutil
+
+ path = os.path.join(
+ self.query_abs_dirs()["abs_blob_upload_dir"], "system-info.log"
+ )
+ with open(path, "w") as f:
+ f.write("System info collected at %s\n\n" % datetime.now())
+ f.write("\nBoot time %s\n" % datetime.fromtimestamp(psutil.boot_time()))
+ f.write("\nVirtual memory: %s\n" % str(psutil.virtual_memory()))
+ f.write("\nDisk partitions: %s\n" % str(psutil.disk_partitions()))
+ f.write("\nDisk usage (/): %s\n" % str(psutil.disk_usage(os.path.sep)))
+ if not self._is_windows():
+ # bug 1417189: frequent errors querying users on Windows
+ f.write("\nUsers: %s\n" % str(psutil.users()))
+ f.write("\nNetwork connections:\n")
+ try:
+ for nc in psutil.net_connections():
+ f.write(" %s\n" % str(nc))
+ except Exception:
+ f.write("Exception getting network info: %s\n" % sys.exc_info()[0])
+ f.write("\nProcesses:\n")
+ try:
+ for p in psutil.process_iter():
+ ctime = str(datetime.fromtimestamp(p.create_time()))
+ f.write(
+ " PID %d %s %s created at %s\n"
+ % (p.pid, p.name(), str(p.cmdline()), ctime)
+ )
+ except Exception:
+ f.write("Exception getting process info: %s\n" % sys.exc_info()[0])
+ except Exception:
+ # psutil throws a variety of intermittent exceptions
+ self.info("Unable to complete system-info.log: %s" % sys.exc_info()[0])
+
+ # pull defined in VCSScript.
+ # preflight_run_tests defined in TestingMixin.
+
+ def run_tests(self):
+ self._remove_xen_clipboard()
+ self._report_system_info()
+ self.start_time = datetime.now()
+ for category in SUITE_CATEGORIES:
+ if not self._run_category_suites(category):
+ break
+
+ def get_timeout_for_category(self, suite_category):
+ if suite_category == "cppunittest":
+ return 2500
+ return self.config["suite_definitions"][suite_category].get("run_timeout", 1000)
+
+ def _run_category_suites(self, suite_category):
+ """run suite(s) to a specific category"""
+ dirs = self.query_abs_dirs()
+ suites = self._query_specified_suites(suite_category)
+ abs_app_dir = self.query_abs_app_dir()
+ abs_res_dir = self.query_abs_res_dir()
+
+ max_per_test_time = timedelta(minutes=60)
+ max_per_test_tests = 10
+ if self.per_test_coverage:
+ max_per_test_tests = 30
+ executed_tests = 0
+ executed_too_many_tests = False
+ xpcshell_selftests = 0
+
+ if suites:
+ self.info("#### Running %s suites" % suite_category)
+ for suite in suites:
+ if executed_too_many_tests and not self.per_test_coverage:
+ return False
+
+ replace_dict = {
+ "abs_app_dir": abs_app_dir,
+ # Mac specific, but points to abs_app_dir on other
+ # platforms.
+ "abs_res_dir": abs_res_dir,
+ "binary_path": self.binary_path,
+ "install_dir": self.install_dir,
+ }
+ options_list = []
+ env = {"TEST_SUITE": suite}
+ if isinstance(suites[suite], dict):
+ options_list = suites[suite].get("options", [])
+ if (
+ self.verify_enabled
+ or self.per_test_coverage
+ or self._get_mozharness_test_paths(suite_category, suite)
+ ):
+ # Ignore tests list in modes where we are running specific tests.
+ tests_list = []
+ else:
+ tests_list = suites[suite].get("tests", [])
+ env = copy.deepcopy(suites[suite].get("env", {}))
+ else:
+ options_list = suites[suite]
+ tests_list = []
+
+ flavor = self._query_try_flavor(suite_category, suite)
+ try_options, try_tests = self.try_args(flavor)
+
+ suite_name = suite_category + "-" + suite
+ tbpl_status, log_level = None, None
+ error_list = BaseErrorList + HarnessErrorList
+ parser = self.get_test_output_parser(
+ suite_category,
+ flavor=flavor,
+ config=self.config,
+ error_list=error_list,
+ log_obj=self.log_obj,
+ )
+
+ if suite_category == "reftest":
+ ref_formatter = imp.load_source(
+ "ReftestFormatter",
+ os.path.abspath(
+ os.path.join(dirs["abs_reftest_dir"], "output.py")
+ ),
+ )
+ parser.formatter = ref_formatter.ReftestFormatter()
+
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ if self.config["nodejs_path"]:
+ env["MOZ_NODE_PATH"] = self.config["nodejs_path"]
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ env["STYLO_THREADS"] = "4"
+
+ env = self.query_env(partial_env=env, log_level=INFO)
+ cmd_timeout = self.get_timeout_for_category(suite_category)
+
+ summary = {}
+ for per_test_args in self.query_args(suite):
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = (
+ "baselinecoverage" in per_test_args[-1]
+ if self.per_test_coverage
+ else False
+ )
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - self.start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>"
+ )
+ # Signal per-test time exceeded, to break out of suites and
+ # suite categories loops also.
+ return False
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info(
+ "TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>"
+ )
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
+
+ abs_base_cmd = self._query_abs_base_cmd(suite_category, suite)
+ cmd = abs_base_cmd[:]
+ cmd.extend(
+ self.query_options(
+ options_list, try_options, str_format_values=replace_dict
+ )
+ )
+ cmd.extend(
+ self.query_tests_args(
+ tests_list, try_tests, str_format_values=replace_dict
+ )
+ )
+
+ final_cmd = copy.copy(cmd)
+ final_cmd.extend(per_test_args)
+
+ # Bug 1714406: In test-verify of xpcshell tests on Windows, repeated
+ # self-tests can trigger https://bugs.python.org/issue37380,
+ # for python < 3.7; avoid by running xpcshell self-tests only once
+ # per test-verify run.
+ if (
+ (self.verify_enabled or self.per_test_coverage)
+ and sys.platform.startswith("win")
+ and sys.version_info < (3, 7)
+ and "--self-test" in final_cmd
+ ):
+ xpcshell_selftests += 1
+ if xpcshell_selftests > 1:
+ final_cmd.remove("--self-test")
+
+ final_env = copy.copy(env)
+
+ if self.per_test_coverage:
+ self.set_coverage_env(final_env)
+
+ return_code = self.run_command(
+ final_cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=cmd_timeout,
+ output_parser=parser,
+ env=final_env,
+ )
+
+ if self.per_test_coverage:
+ self.add_per_test_coverage_report(
+ final_env, suite, per_test_args[-1]
+ )
+
+ # mochitest, reftest, and xpcshell suites do not return
+ # appropriate return codes. Therefore, we must parse the output
+ # to determine what the tbpl_status and worst_log_level must
+ # be. We do this by:
+ # 1) checking to see if our mozharness script ran into any
+ # errors itself with 'num_errors' <- OutputParser
+ # 2) if num_errors is 0 then we look in the subclassed 'parser'
+ # findings for harness/suite errors <- DesktopUnittestOutputParser
+ # 3) checking to see if the return code is in success_codes
+
+ success_codes = None
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code, success_codes, summary
+ )
+ parser.append_tinderboxprint_line(suite_name)
+
+ self.record_status(tbpl_status, level=log_level)
+ if len(per_test_args) > 0:
+ self.log_per_test_status(
+ per_test_args[-1], tbpl_status, log_level
+ )
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return False
+ else:
+ self.log(
+ "The %s suite: %s ran with return status: %s"
+ % (suite_category, suite, tbpl_status),
+ level=log_level,
+ )
+
+ if executed_too_many_tests:
+ return False
+ else:
+ self.debug("There were no suites to run for %s" % suite_category)
+ return True
+
+ def uninstall(self):
+ # Technically, we might miss this step if earlier steps fail badly.
+ # If that becomes a big issue we should consider moving this to
+ # something that is more likely to execute, such as
+ # postflight_run_cmd_suites
+ if "WindowsApps" in self.binary_path:
+ self.uninstall_app(self.binary_path)
+ else:
+ self.log("Skipping uninstall for non-MSIX test")
+
+
+# main {{{1
+if __name__ == "__main__":
+ desktop_unittest = DesktopUnittest()
+ desktop_unittest.run_and_exit()
diff --git a/testing/mozharness/scripts/does_it_crash.py b/testing/mozharness/scripts/does_it_crash.py
new file mode 100755
index 0000000000..0c54b63131
--- /dev/null
+++ b/testing/mozharness/scripts/does_it_crash.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" does_it_crash.py
+
+ Runs a thing to see if it crashes within a set period.
+"""
+import os
+import sys
+
+import requests
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozinstall
+from mozharness.base.script import BaseScript
+from mozprocess import ProcessHandler
+
+
+class DoesItCrash(BaseScript):
+ config_options = [
+ [
+ [
+ "--thing-url",
+ ],
+ {
+ "action": "store",
+ "dest": "thing_url",
+ "type": str,
+ "help": "An URL that points to a package containing the thing to run",
+ },
+ ],
+ [
+ [
+ "--thing-to-run",
+ ],
+ {
+ "action": "store",
+ "dest": "thing_to_run",
+ "type": str,
+ "help": "The thing to run. If --thing-url is a package, this should be "
+ "its location relative to the root of the package.",
+ },
+ ],
+ [
+ [
+ "--thing-arg",
+ ],
+ {
+ "action": "append",
+ "dest": "thing_args",
+ "type": str,
+ "default": [],
+ "help": "Args for the thing. May be passed multiple times",
+ },
+ ],
+ [
+ [
+ "--run-for",
+ ],
+ {
+ "action": "store",
+ "dest": "run_for",
+ "default": 30,
+ "type": int,
+ "help": "How long to run the thing for, in seconds",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ super(DoesItCrash, self).__init__(
+ all_actions=[
+ "download",
+ "run-thing",
+ ],
+ default_actions=[
+ "download",
+ "run-thing",
+ ],
+ config_options=self.config_options,
+ )
+
+ def downloadFile(self, url, file_name):
+ req = requests.get(url, stream=True, timeout=30)
+ file_path = os.path.join(os.getcwd(), file_name)
+
+ with open(file_path, "wb") as f:
+ for chunk in req.iter_content(chunk_size=1024):
+ if not chunk:
+ continue
+ f.write(chunk)
+ f.flush()
+ return file_path
+
+ def download(self):
+ url = self.config["thing_url"]
+ fn = "thing." + url.split(".")[-1]
+ self.downloadFile(url=url, file_name=fn)
+ if mozinstall.is_installer(fn):
+ self.install_dir = mozinstall.install(fn, "thing")
+ else:
+ self.install_dir = ""
+
+ def run_thing(self):
+ thing = os.path.abspath(
+ os.path.join(self.install_dir, self.config["thing_to_run"])
+ )
+ # thing_args is a LockedTuple, which mozprocess doesn't like
+ args = list(self.config["thing_args"])
+ timeout = self.config["run_for"]
+
+ self.log(f"Running {thing} with args {args}")
+ p = ProcessHandler(
+ thing,
+ args=args,
+ shell=False,
+ storeOutput=True,
+ kill_on_timeout=True,
+ stream=False,
+ )
+ p.run(timeout)
+ # Wait for the timeout + a grace period (to make sure we don't interrupt
+ # process tear down).
+ # Without this, this script could potentially hang
+ p.wait(timeout + 10)
+ if not p.timedOut:
+ # It crashed, oh no!
+ self.critical(
+ f"TEST-UNEXPECTED-FAIL: {thing} did not run for {timeout} seconds"
+ )
+ self.critical("Output was:")
+ for l in p.output:
+ self.critical(l)
+ self.fatal("fail")
+ else:
+ self.info(f"PASS: {thing} ran successfully for {timeout} seconds")
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ crashit = DoesItCrash()
+ crashit.run_and_exit()
diff --git a/testing/mozharness/scripts/firefox_ui_tests.py b/testing/mozharness/scripts/firefox_ui_tests.py
new file mode 100644
index 0000000000..7b05d0ca5c
--- /dev/null
+++ b/testing/mozharness/scripts/firefox_ui_tests.py
@@ -0,0 +1,299 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+import copy
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.python import PreScriptAction
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.vcstools import VCSToolsScript
+
+# General command line arguments for Firefox ui tests
+firefox_ui_tests_config_options = (
+ [
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL "
+ "compositor.",
+ },
+ ],
+ [
+ ["--dry-run"],
+ {
+ "dest": "dry_run",
+ "default": False,
+ "help": "Only show what was going to be tested.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Disable multi-process (e10s) mode when running tests.",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "dest": "disable_fission",
+ "action": "store_true",
+ "default": False,
+ "help": "Disable fission mode when running tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "dest": "extra_prefs",
+ "action": "append",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--symbols-path=SYMBOLS_PATH"],
+ {
+ "dest": "symbols_path",
+ "help": "absolute path to directory containing breakpad "
+ "symbols, or the url of a zip file containing symbols.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+)
+
+
+class FirefoxUIFunctionalTests(TestingMixin, VCSToolsScript, CodeCoverageMixin):
+ def __init__(
+ self,
+ config_options=None,
+ all_actions=None,
+ default_actions=None,
+ *args,
+ **kwargs
+ ):
+ config_options = config_options or firefox_ui_tests_config_options
+ actions = [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ "uninstall",
+ ]
+
+ super(FirefoxUIFunctionalTests, self).__init__(
+ config_options=config_options,
+ all_actions=all_actions or actions,
+ default_actions=default_actions or actions,
+ *args,
+ **kwargs
+ )
+
+ # Code which runs in automation has to include the following properties
+ self.binary_path = self.config.get("binary_path")
+ self.installer_path = self.config.get("installer_path")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url = self.config.get("test_url")
+
+ if not self.test_url and not self.test_packages_url:
+ self.fatal("You must use --test-url, or --test-packages-url")
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "firefox_ui_requirements.txt"
+ )
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def download_and_extract(self):
+ """Override method from TestingMixin for more specific behavior."""
+ extract_dirs = [
+ "config/*",
+ "firefox-ui/*",
+ "marionette/*",
+ "mozbase/*",
+ "tools/mozterm/*",
+ "tools/wptserve/*",
+ "tools/wpt_third_party/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ]
+ super(FirefoxUIFunctionalTests, self).download_and_extract(
+ extract_dirs=extract_dirs
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(FirefoxUIFunctionalTests, self).query_abs_dirs()
+ abs_tests_install_dir = os.path.join(abs_dirs["abs_work_dir"], "tests")
+
+ dirs = {
+ "abs_blob_upload_dir": os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ ),
+ "abs_fxui_dir": os.path.join(abs_tests_install_dir, "firefox-ui"),
+ "abs_fxui_manifest_dir": os.path.join(
+ abs_tests_install_dir,
+ "firefox-ui",
+ "tests",
+ "testing",
+ "firefox-ui",
+ "tests",
+ ),
+ "abs_test_install_dir": abs_tests_install_dir,
+ }
+
+ for key in dirs:
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def query_harness_args(self, extra_harness_config_options=None):
+ """Collects specific test related command line arguments.
+
+ Sub classes should override this method for their own specific arguments.
+ """
+ config_options = extra_harness_config_options or []
+
+ args = []
+ for option in config_options:
+ dest = option[1]["dest"]
+ name = self.config.get(dest)
+
+ if name:
+ if type(name) is bool:
+ args.append(option[0][0])
+ else:
+ args.extend([option[0][0], self.config[dest]])
+
+ return args
+
+ def run_test(self, binary_path, env=None, marionette_port=2828):
+ """All required steps for running the tests against an installer."""
+ dirs = self.query_abs_dirs()
+
+ # Import the harness to retrieve the location of the cli scripts
+ import firefox_ui_harness
+
+ cmd = [
+ self.query_python_path(),
+ os.path.join(
+ os.path.dirname(firefox_ui_harness.__file__), "cli_functional.py"
+ ),
+ "--binary",
+ binary_path,
+ "--address",
+ "localhost:{}".format(marionette_port),
+ # Resource files to serve via local webserver
+ "--server-root",
+ os.path.join(dirs["abs_fxui_dir"], "resources"),
+ # Use the work dir to get temporary data stored
+ "--workspace",
+ dirs["abs_work_dir"],
+ # logging options
+ "--gecko-log=-", # output from the gecko process redirected to stdout
+ "--log-raw=-", # structured log for output parser redirected to stdout
+ # Enable tracing output to log transmission protocol
+ "-vv",
+ ]
+
+ # Collect all pass-through harness options to the script
+ cmd.extend(self.query_harness_args())
+
+ if not self.config.get("e10s"):
+ cmd.append("--disable-e10s")
+
+ if self.config.get("disable_fission"):
+ cmd.append("--disable-fission")
+
+ cmd.extend(["--setpref={}".format(p) for p in self.config.get("extra_prefs")])
+
+ if self.symbols_url:
+ cmd.extend(["--symbols-path", self.symbols_url])
+
+ parser = StructuredOutputParser(
+ config=self.config, log_obj=self.log_obj, strict=False
+ )
+
+ # Add the tests to run
+ cmd.append(
+ os.path.join(dirs["abs_fxui_manifest_dir"], "functional", "manifest.ini")
+ )
+
+ # Set further environment settings
+ env = env or self.query_env()
+ env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
+ if self.query_minidump_stackwalk():
+ env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
+ env["RUST_BACKTRACE"] = "full"
+
+ # If code coverage is enabled, set GCOV_PREFIX and JS_CODE_COVERAGE_OUTPUT_DIR
+ # env variables
+ if self.config.get("code_coverage"):
+ env["GCOV_PREFIX"] = self.gcov_dir
+ env["JS_CODE_COVERAGE_OUTPUT_DIR"] = self.jsvm_dir
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_fxui_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=env,
+ )
+
+ tbpl_status, log_level, summary = parser.evaluate_parser(return_code)
+ self.record_status(tbpl_status, level=log_level)
+
+ return return_code
+
+ @PreScriptAction("run-tests")
+ def _pre_run_tests(self, action):
+ if not self.installer_path and not self.installer_url:
+ self.critical(
+ "Please specify an installer via --installer-path or --installer-url."
+ )
+ sys.exit(1)
+
+ def run_tests(self):
+ """Run all the tests"""
+ return self.run_test(
+ binary_path=self.binary_path,
+ env=self.query_env(),
+ )
+
+
+if __name__ == "__main__":
+ myScript = FirefoxUIFunctionalTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/fx_desktop_build.py b/testing/mozharness/scripts/fx_desktop_build.py
new file mode 100755
index 0000000000..93f46b34ca
--- /dev/null
+++ b/testing/mozharness/scripts/fx_desktop_build.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""fx_desktop_build.py.
+
+script harness to build nightly firefox within Mozilla's build environment
+and developer machines alike
+
+author: Jordan Lund
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozharness.base.script as script
+from mozharness.mozilla.building.buildbase import (
+ BUILD_BASE_CONFIG_OPTIONS,
+ BuildingConfig,
+ BuildScript,
+)
+
+
+class FxDesktopBuild(BuildScript, object):
+ def __init__(self):
+ buildscript_kwargs = {
+ "config_options": BUILD_BASE_CONFIG_OPTIONS,
+ "all_actions": [
+ "get-secrets",
+ "clobber",
+ "build",
+ "static-analysis-autotest",
+ "valgrind-test",
+ "multi-l10n",
+ "package-source",
+ ],
+ "require_config_file": True,
+ # Default configuration
+ "config": {
+ "is_automation": True,
+ "debug_build": False,
+ # nightly stuff
+ "nightly_build": False,
+ # Seed all clones with mozilla-unified. This ensures subsequent
+ # jobs have a minimal `hg pull`.
+ "clone_upstream_url": "https://hg.mozilla.org/mozilla-unified",
+ "repo_base": "https://hg.mozilla.org",
+ "build_resources_path": "%(upload_path)s/build_resources.json",
+ "nightly_promotion_branches": ["mozilla-central", "mozilla-aurora"],
+ # try will overwrite these
+ "clone_with_purge": False,
+ "clone_by_revision": False,
+ "virtualenv_modules": [
+ "requests==2.8.1",
+ ],
+ "virtualenv_path": "venv",
+ },
+ "ConfigClass": BuildingConfig,
+ }
+ super(FxDesktopBuild, self).__init__(**buildscript_kwargs)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(FxDesktopBuild, self).query_abs_dirs()
+
+ dirs = {
+ # BuildFactories in factory.py refer to a 'build' dir on the slave.
+ # This contains all the source code/objdir to compile. However,
+ # there is already a build dir in mozharness for every mh run. The
+ # 'build' that factory refers to I named: 'src' so
+ # there is a seperation in mh. for example, rather than having
+ # '{mozharness_repo}/build/build/', I have '{
+ # mozharness_repo}/build/src/'
+ "abs_obj_dir": os.path.join(abs_dirs["abs_work_dir"], self._query_objdir()),
+ "upload_path": self.config["upload_env"]["UPLOAD_PATH"],
+ }
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ # Actions {{{2
+
+ @script.PreScriptRun
+ def suppress_windows_modal_dialogs(self, *args, **kwargs):
+ if self._is_windows():
+ # Suppress Windows modal dialogs to avoid hangs
+ import ctypes
+
+ ctypes.windll.kernel32.SetErrorMode(0x8001)
+
+
+if __name__ == "__main__":
+ fx_desktop_build = FxDesktopBuild()
+ fx_desktop_build.run_and_exit()
diff --git a/testing/mozharness/scripts/l10n_bumper.py b/testing/mozharness/scripts/l10n_bumper.py
new file mode 100755
index 0000000000..e597d5386d
--- /dev/null
+++ b/testing/mozharness/scripts/l10n_bumper.py
@@ -0,0 +1,380 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" l10n_bumper.py
+
+ Updates a gecko repo with up to date changesets from l10n.mozilla.org.
+
+ Specifically, it updates l10n-changesets.json which is used by mobile releases.
+
+ This is to allow for `mach taskgraph` to reference specific l10n revisions
+ without having to resort to task.extra or commandline base64 json hacks.
+"""
+import codecs
+import os
+import pprint
+import sys
+import time
+
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import HgErrorList
+from mozharness.base.log import FATAL
+from mozharness.base.vcs.vcsbase import VCSScript
+
+
+class L10nBumper(VCSScript):
+ config_options = [
+ [
+ [
+ "--ignore-closed-tree",
+ ],
+ {
+ "action": "store_true",
+ "dest": "ignore_closed_tree",
+ "default": False,
+ "help": "Bump l10n changesets on a closed tree.",
+ },
+ ],
+ [
+ [
+ "--build",
+ ],
+ {
+ "action": "store_false",
+ "dest": "dontbuild",
+ "default": True,
+ "help": "Trigger new builds on push.",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ super(L10nBumper, self).__init__(
+ all_actions=[
+ "clobber",
+ "check-treestatus",
+ "checkout-gecko",
+ "bump-changesets",
+ "push",
+ "push-loop",
+ ],
+ default_actions=[
+ "push-loop",
+ ],
+ require_config_file=require_config_file,
+ config_options=self.config_options,
+ # Default config options
+ config={
+ "treestatus_base_url": "https://treestatus.mozilla-releng.net",
+ "log_max_rotate": 99,
+ },
+ )
+
+ # Helper methods {{{1
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(L10nBumper, self).query_abs_dirs()
+
+ abs_dirs.update(
+ {
+ "gecko_local_dir": os.path.join(
+ abs_dirs["abs_work_dir"],
+ self.config.get(
+ "gecko_local_dir",
+ os.path.basename(self.config["gecko_pull_url"]),
+ ),
+ ),
+ }
+ )
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def hg_commit(self, path, repo_path, message):
+ """
+ Commits changes in repo_path, with specified user and commit message
+ """
+ user = self.config["hg_user"]
+ hg = self.query_exe("hg", return_type="list")
+ env = self.query_env(partial_env={"LANG": "en_US.UTF-8"})
+ cmd = hg + ["add", path]
+ self.run_command(cmd, cwd=repo_path, env=env)
+ cmd = hg + ["commit", "-u", user, "-m", message]
+ self.run_command(cmd, cwd=repo_path, env=env)
+
+ def hg_push(self, repo_path):
+ hg = self.query_exe("hg", return_type="list")
+ command = hg + [
+ "push",
+ "-e",
+ "ssh -oIdentityFile=%s -l %s"
+ % (
+ self.config["ssh_key"],
+ self.config["ssh_user"],
+ ),
+ "-r",
+ ".",
+ self.config["gecko_push_url"],
+ ]
+ status = self.run_command(command, cwd=repo_path, error_list=HgErrorList)
+ if status != 0:
+ # We failed; get back to a known state so we can either retry
+ # or fail out and continue later.
+ self.run_command(
+ hg
+ + ["--config", "extensions.mq=", "strip", "--no-backup", "outgoing()"],
+ cwd=repo_path,
+ )
+ self.run_command(hg + ["up", "-C"], cwd=repo_path)
+ self.run_command(
+ hg + ["--config", "extensions.purge=", "purge", "--all"], cwd=repo_path
+ )
+ return False
+ return True
+
+ def _read_json(self, path):
+ contents = self.read_from_file(path)
+ try:
+ json_contents = json.loads(contents)
+ return json_contents
+ except ValueError:
+ self.error("%s is invalid json!" % path)
+
+ def _read_version(self, path):
+ contents = self.read_from_file(path).split("\n")[0]
+ return contents.split(".")
+
+ def _build_locale_map(self, old_contents, new_contents):
+ locale_map = {}
+ for key in old_contents:
+ if key not in new_contents:
+ locale_map[key] = "removed"
+ for k, v in new_contents.items():
+ if old_contents.get(k, {}).get("revision") != v["revision"]:
+ locale_map[k] = v["revision"]
+ elif old_contents.get(k, {}).get("platforms") != v["platforms"]:
+ locale_map[k] = v["platforms"]
+ return locale_map
+
+ def _build_platform_dict(self, bump_config):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ platform_dict = {}
+ ignore_config = bump_config.get("ignore_config", {})
+ for platform_config in bump_config["platform_configs"]:
+ path = os.path.join(repo_path, platform_config["path"])
+ self.info(
+ "Reading %s for %s locales..." % (path, platform_config["platforms"])
+ )
+ contents = self.read_from_file(path)
+ for locale in contents.splitlines():
+ # locale is 1st word in line in shipped-locales
+ if platform_config.get("format") == "shipped-locales":
+ locale = locale.split(" ")[0]
+ existing_platforms = set(
+ platform_dict.get(locale, {}).get("platforms", [])
+ )
+ platforms = set(platform_config["platforms"])
+ ignore_platforms = set(ignore_config.get(locale, []))
+ platforms = (platforms | existing_platforms) - ignore_platforms
+ platform_dict[locale] = {"platforms": sorted(list(platforms))}
+ self.info("Built platform_dict:\n%s" % pprint.pformat(platform_dict))
+ return platform_dict
+
+ def _build_revision_dict(self, bump_config, version_list):
+ self.info("Building revision dict...")
+ platform_dict = self._build_platform_dict(bump_config)
+ revision_dict = {}
+ if bump_config.get("revision_url"):
+ repl_dict = {
+ "MAJOR_VERSION": version_list[0],
+ "COMBINED_MAJOR_VERSION": str(
+ int(version_list[0]) + int(version_list[1])
+ ),
+ }
+
+ url = bump_config["revision_url"] % repl_dict
+ path = self.download_file(url, error_level=FATAL)
+ revision_info = self.read_from_file(path)
+ self.info("Got %s" % revision_info)
+ for line in revision_info.splitlines():
+ locale, revision = line.split(" ")
+ if locale in platform_dict:
+ revision_dict[locale] = platform_dict[locale]
+ revision_dict[locale]["revision"] = revision
+ else:
+ for k, v in platform_dict.items():
+ v["revision"] = "default"
+ revision_dict[k] = v
+ self.info("revision_dict:\n%s" % pprint.pformat(revision_dict))
+ return revision_dict
+
+ def build_commit_message(self, name, locale_map):
+ comments = ""
+ approval_str = "r=release a=l10n-bump"
+ for locale, revision in sorted(locale_map.items()):
+ comments += "%s -> %s\n" % (locale, revision)
+ if self.config["dontbuild"]:
+ approval_str += " DONTBUILD"
+ if self.config["ignore_closed_tree"]:
+ approval_str += " CLOSED TREE"
+ message = "no bug - Bumping %s %s\n\n" % (name, approval_str)
+ message += comments
+ message = message.encode("utf-8")
+ return message
+
+ def query_treestatus(self):
+ "Return True if we can land based on treestatus"
+ c = self.config
+ dirs = self.query_abs_dirs()
+ tree = c.get(
+ "treestatus_tree", os.path.basename(c["gecko_pull_url"].rstrip("/"))
+ )
+ treestatus_url = "%s/trees/%s" % (c["treestatus_base_url"], tree)
+ treestatus_json = os.path.join(dirs["abs_work_dir"], "treestatus.json")
+ if not os.path.exists(dirs["abs_work_dir"]):
+ self.mkdir_p(dirs["abs_work_dir"])
+ self.rmtree(treestatus_json)
+
+ self.run_command(
+ ["curl", "--retry", "4", "-o", treestatus_json, treestatus_url],
+ throw_exception=True,
+ )
+
+ treestatus = self._read_json(treestatus_json)
+ if treestatus["result"]["status"] != "closed":
+ self.info(
+ "treestatus is %s - assuming we can land"
+ % repr(treestatus["result"]["status"])
+ )
+ return True
+
+ return False
+
+ # Actions {{{1
+ def check_treestatus(self):
+ if not self.config["ignore_closed_tree"] and not self.query_treestatus():
+ self.info("breaking early since treestatus is closed")
+ sys.exit(0)
+
+ def checkout_gecko(self):
+ c = self.config
+ dirs = self.query_abs_dirs()
+ dest = dirs["gecko_local_dir"]
+ repos = [
+ {
+ "repo": c["gecko_pull_url"],
+ "tag": c.get("gecko_tag", "default"),
+ "dest": dest,
+ "vcs": "hg",
+ }
+ ]
+ self.vcs_checkout_repos(repos)
+
+ def bump_changesets(self):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ version_path = os.path.join(repo_path, self.config["version_path"])
+ changes = False
+ version_list = self._read_version(version_path)
+ for bump_config in self.config["bump_configs"]:
+ path = os.path.join(repo_path, bump_config["path"])
+ # For now, assume format == 'json'. When we add desktop support,
+ # we may need to add flatfile support
+ if os.path.exists(path):
+ old_contents = self._read_json(path)
+ else:
+ old_contents = {}
+
+ new_contents = self._build_revision_dict(bump_config, version_list)
+
+ if new_contents == old_contents:
+ continue
+ # super basic sanity check
+ if not isinstance(new_contents, dict) or len(new_contents) < 5:
+ self.error(
+ "Cowardly refusing to land a broken-seeming changesets file!"
+ )
+ continue
+
+ # Write to disk
+ content_string = json.dumps(
+ new_contents,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ )
+ fh = codecs.open(path, encoding="utf-8", mode="w+")
+ fh.write(content_string + "\n")
+ fh.close()
+
+ locale_map = self._build_locale_map(old_contents, new_contents)
+
+ # Commit
+ message = self.build_commit_message(bump_config["name"], locale_map)
+ self.hg_commit(path, repo_path, message)
+ changes = True
+ return changes
+
+ def push(self):
+ dirs = self.query_abs_dirs()
+ repo_path = dirs["gecko_local_dir"]
+ return self.hg_push(repo_path)
+
+ def push_loop(self):
+ max_retries = 5
+ for _ in range(max_retries):
+ changed = False
+ if not self.config["ignore_closed_tree"] and not self.query_treestatus():
+ # Tree is closed; exit early to avoid a bunch of wasted time
+ self.info("breaking early since treestatus is closed")
+ break
+
+ self.checkout_gecko()
+ if self.bump_changesets():
+ changed = True
+
+ if not changed:
+ # Nothing changed, we're all done
+ self.info("No changes - all done")
+ break
+
+ if self.push():
+ # We did it! Hurray!
+ self.info("Great success!")
+ break
+ # If we're here, then the push failed. It also stripped any
+ # outgoing commits, so we should be in a pristine state again
+ # Empty our local cache of manifests so they get loaded again next
+ # time through this loop. This makes sure we get fresh upstream
+ # manifests, and avoids problems like bug 979080
+ self.device_manifests = {}
+
+ # Sleep before trying again
+ self.info("Sleeping 60 before trying again")
+ time.sleep(60)
+ else:
+ self.fatal("Didn't complete successfully (hit max_retries)")
+
+ # touch status file for nagios
+ dirs = self.query_abs_dirs()
+ status_path = os.path.join(dirs["base_work_dir"], self.config["status_path"])
+ self._touch_file(status_path)
+
+
+# __main__ {{{1
+if __name__ == "__main__":
+ bumper = L10nBumper()
+ bumper.run_and_exit()
diff --git a/testing/mozharness/scripts/marionette.py b/testing/mozharness/scripts/marionette.py
new file mode 100755
index 0000000000..8052927d2a
--- /dev/null
+++ b/testing/mozharness/scripts/marionette.py
@@ -0,0 +1,455 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import copy
+import json
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.base.errors import BaseErrorList, TarErrorList
+from mozharness.base.log import INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.transfer import TransferMixin
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import HarnessErrorList, LogcatErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper
+
+
+class MarionetteTest(TestingMixin, MercurialScript, TransferMixin, CodeCoverageMixin):
+ config_options = (
+ [
+ [
+ ["--application"],
+ {
+ "action": "store",
+ "dest": "application",
+ "default": None,
+ "help": "application name of binary",
+ },
+ ],
+ [
+ ["--app-arg"],
+ {
+ "action": "store",
+ "dest": "app_arg",
+ "default": None,
+ "help": "Optional command-line argument to pass to the browser",
+ },
+ ],
+ [
+ ["--marionette-address"],
+ {
+ "action": "store",
+ "dest": "marionette_address",
+ "default": None,
+ "help": "The host:port of the Marionette server running inside Gecko. "
+ "Unused for emulator testing",
+ },
+ ],
+ [
+ ["--emulator"],
+ {
+ "action": "store",
+ "type": "choice",
+ "choices": ["arm", "x86"],
+ "dest": "emulator",
+ "default": None,
+ "help": "Use an emulator for testing",
+ },
+ ],
+ [
+ ["--test-manifest"],
+ {
+ "action": "store",
+ "dest": "test_manifest",
+ "default": "unit-tests.ini",
+ "help": "Path to test manifest to run relative to the Marionette "
+ "tests directory",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--headless"],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ ["--headless-width"],
+ {
+ "action": "store",
+ "dest": "headless_width",
+ "default": "1600",
+ "help": "Specify headless virtual screen width (default: 1600).",
+ },
+ ],
+ [
+ ["--headless-height"],
+ {
+ "action": "store",
+ "dest": "headless_height",
+ "default": "1200",
+ "help": "Specify headless virtual screen height (default: 1200).",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) to use the GL compositor.", # NOQA: E501
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run the browser without fission enabled",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ repos = []
+
+ def __init__(self, require_config_file=False):
+ super(MarionetteTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "pull",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ default_actions=[
+ "clobber",
+ "pull",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ # these are necessary since self.config is read only
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+
+ self.test_suite = self._get_test_suite(c.get("emulator"))
+ if self.test_suite not in self.config["suite_definitions"]:
+ self.fatal("{} is not defined in the config!".format(self.test_suite))
+
+ if c.get("structured_output"):
+ self.parser_class = StructuredOutputParser
+ else:
+ self.parser_class = TestSummaryOutputParserHelper
+
+ def _pre_config_lock(self, rw_config):
+ super(MarionetteTest, self)._pre_config_lock(rw_config)
+ if not self.config.get("emulator") and not self.config.get(
+ "marionette_address"
+ ):
+ self.fatal(
+ "You need to specify a --marionette-address for non-emulator tests! "
+ "(Try --marionette-address localhost:2828 )"
+ )
+
+ def _query_tests_dir(self):
+ dirs = self.query_abs_dirs()
+ test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
+
+ return os.path.join(dirs["abs_test_install_dir"], test_dir)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(MarionetteTest, self).query_abs_dirs()
+ dirs = {}
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_marionette_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "marionette", "harness", "marionette_harness"
+ )
+ dirs["abs_marionette_tests_dir"] = os.path.join(
+ dirs["abs_test_install_dir"],
+ "marionette",
+ "tests",
+ "testing",
+ "marionette",
+ "harness",
+ "marionette_harness",
+ "tests",
+ )
+ dirs["abs_gecko_dir"] = os.path.join(abs_dirs["abs_work_dir"], "gecko")
+ dirs["abs_emulator_dir"] = os.path.join(abs_dirs["abs_work_dir"], "emulator")
+
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ @PreScriptAction("create-virtualenv")
+ def _configure_marionette_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+ if not os.path.isfile(requirements):
+ self.fatal(
+ "Could not find marionette requirements file: {}".format(requirements)
+ )
+
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def _get_test_suite(self, is_emulator):
+ """
+ Determine which in tree options group to use and return the
+ appropriate key.
+ """
+ platform = "emulator" if is_emulator else "desktop"
+ # Currently running marionette on an emulator means webapi
+ # tests. This method will need to change if this does.
+ testsuite = "webapi" if is_emulator else "marionette"
+ return "{}_{}".format(testsuite, platform)
+
+ def download_and_extract(self):
+ super(MarionetteTest, self).download_and_extract()
+
+ if self.config.get("emulator"):
+ dirs = self.query_abs_dirs()
+
+ self.mkdir_p(dirs["abs_emulator_dir"])
+ tar = self.query_exe("tar", return_type="list")
+ self.run_command(
+ tar + ["zxf", self.installer_path],
+ cwd=dirs["abs_emulator_dir"],
+ error_list=TarErrorList,
+ halt_on_failure=True,
+ fatal_exit_code=3,
+ )
+
+ def install(self):
+ if self.config.get("emulator"):
+ self.info("Emulator tests; skipping.")
+ else:
+ super(MarionetteTest, self).install()
+
+ def run_tests(self):
+ """
+ Run the Marionette tests
+ """
+ dirs = self.query_abs_dirs()
+
+ raw_log_file = os.path.join(dirs["abs_blob_upload_dir"], "marionette_raw.log")
+ error_summary_file = os.path.join(
+ dirs["abs_blob_upload_dir"], "marionette_errorsummary.log"
+ )
+ html_report_file = os.path.join(dirs["abs_blob_upload_dir"], "report.html")
+
+ config_fmt_args = {
+ # emulator builds require a longer timeout
+ "timeout": 60000 if self.config.get("emulator") else 10000,
+ "profile": os.path.join(dirs["abs_work_dir"], "profile"),
+ "xml_output": os.path.join(dirs["abs_work_dir"], "output.xml"),
+ "html_output": os.path.join(dirs["abs_blob_upload_dir"], "output.html"),
+ "logcat_dir": dirs["abs_work_dir"],
+ "emulator": "arm",
+ "symbols_path": self.symbols_path,
+ "binary": self.binary_path,
+ "address": self.config.get("marionette_address"),
+ "raw_log_file": raw_log_file,
+ "error_summary_file": error_summary_file,
+ "html_report_file": html_report_file,
+ "gecko_log": dirs["abs_blob_upload_dir"],
+ "this_chunk": self.config.get("this_chunk", 1),
+ "total_chunks": self.config.get("total_chunks", 1),
+ }
+
+ self.info("The emulator type: %s" % config_fmt_args["emulator"])
+ # build the marionette command arguments
+ python = self.query_python_path("python")
+
+ cmd = [python, "-u", os.path.join(dirs["abs_marionette_dir"], "runtests.py")]
+
+ manifest = os.path.join(
+ dirs["abs_marionette_tests_dir"], self.config["test_manifest"]
+ )
+
+ if self.config.get("app_arg"):
+ config_fmt_args["app_arg"] = self.config["app_arg"]
+
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+
+ cmd.append("--gecko-log=-")
+
+ if self.config.get("structured_output"):
+ cmd.append("--log-raw=-")
+
+ if self.config["disable_fission"]:
+ cmd.append("--disable-fission")
+ cmd.extend(["--setpref=fission.autostart=false"])
+
+ for arg in self.config["suite_definitions"][self.test_suite]["options"]:
+ cmd.append(arg % config_fmt_args)
+
+ if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
+ # Make sure that the logging directory exists
+ self.fatal("Could not create blobber upload directory")
+
+ test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""'))
+
+ if test_paths and "marionette" in test_paths:
+ paths = [
+ os.path.join(dirs["abs_test_install_dir"], "marionette", "tests", p)
+ for p in test_paths["marionette"]
+ ]
+ cmd.extend(paths)
+ else:
+ cmd.append(manifest)
+
+ try_options, try_tests = self.try_args("marionette")
+ cmd.extend(self.query_tests_args(try_tests, str_format_values=config_fmt_args))
+
+ env = {}
+ if self.query_minidump_stackwalk():
+ env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path
+ env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"]
+ env["RUST_BACKTRACE"] = "full"
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+
+ if self.config["headless"]:
+ env["MOZ_HEADLESS"] = "1"
+ env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
+ env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
+
+ if not os.path.isdir(env["MOZ_UPLOAD_DIR"]):
+ self.mkdir_p(env["MOZ_UPLOAD_DIR"])
+
+ # Causes Firefox to crash when using non-local connections.
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+ env = self.query_env(partial_env=env)
+
+ try:
+ cwd = self._query_tests_dir()
+ except Exception as e:
+ self.fatal(
+ "Don't know how to run --test-suite '{0}': {1}!".format(
+ self.test_suite, e
+ )
+ )
+
+ marionette_parser = self.parser_class(
+ config=self.config,
+ log_obj=self.log_obj,
+ error_list=BaseErrorList + HarnessErrorList,
+ strict=False,
+ )
+ return_code = self.run_command(
+ cmd, cwd=cwd, output_timeout=1000, output_parser=marionette_parser, env=env
+ )
+ level = INFO
+ tbpl_status, log_level, summary = marionette_parser.evaluate_parser(
+ return_code=return_code
+ )
+ marionette_parser.append_tinderboxprint_line("marionette")
+
+ qemu = os.path.join(dirs["abs_work_dir"], "qemu.log")
+ if os.path.isfile(qemu):
+ self.copyfile(qemu, os.path.join(dirs["abs_blob_upload_dir"], "qemu.log"))
+
+ # dump logcat output if there were failures
+ if self.config.get("emulator"):
+ if (
+ marionette_parser.failed != "0"
+ or "T-FAIL" in marionette_parser.tsummary
+ ):
+ logcat = os.path.join(dirs["abs_work_dir"], "emulator-5554.log")
+ if os.access(logcat, os.F_OK):
+ self.info("dumping logcat")
+ self.run_command(["cat", logcat], error_list=LogcatErrorList)
+ else:
+ self.info("no logcat file found")
+ else:
+ # .. or gecko.log if it exists
+ gecko_log = os.path.join(self.config["base_work_dir"], "gecko.log")
+ if os.access(gecko_log, os.F_OK):
+ self.info("dumping gecko.log")
+ self.run_command(["cat", gecko_log])
+ self.rmtree(gecko_log)
+ else:
+ self.info("gecko.log not found")
+
+ marionette_parser.print_summary("marionette")
+
+ self.log(
+ "Marionette exited with return code %s: %s" % (return_code, tbpl_status),
+ level=level,
+ )
+ self.record_status(tbpl_status)
+
+
+if __name__ == "__main__":
+ marionetteTest = MarionetteTest()
+ marionetteTest.run_and_exit()
diff --git a/testing/mozharness/scripts/multil10n.py b/testing/mozharness/scripts/multil10n.py
new file mode 100755
index 0000000000..ae5c013fc7
--- /dev/null
+++ b/testing/mozharness/scripts/multil10n.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""multil10n.py
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.l10n.multi_locale_build import MultiLocaleBuild
+
+if __name__ == "__main__":
+ multi_locale_build = MultiLocaleBuild()
+ multi_locale_build.run_and_exit()
diff --git a/testing/mozharness/scripts/openh264_build.py b/testing/mozharness/scripts/openh264_build.py
new file mode 100755
index 0000000000..9905ca7b31
--- /dev/null
+++ b/testing/mozharness/scripts/openh264_build.py
@@ -0,0 +1,470 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import glob
+import os
+import re
+import subprocess
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+# import the guts
+import mozharness
+from mozharness.base.log import DEBUG, ERROR, FATAL
+from mozharness.base.transfer import TransferMixin
+from mozharness.base.vcs.vcsbase import VCSScript
+from mozharness.mozilla.tooltool import TooltoolMixin
+
+external_tools_path = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
+ "external_tools",
+)
+
+
+class OpenH264Build(TransferMixin, VCSScript, TooltoolMixin):
+ all_actions = [
+ "clobber",
+ "get-tooltool",
+ "checkout-sources",
+ "build",
+ "test",
+ "package",
+ "dump-symbols",
+ ]
+
+ default_actions = [
+ "get-tooltool",
+ "checkout-sources",
+ "build",
+ "package",
+ "dump-symbols",
+ ]
+
+ config_options = [
+ [
+ ["--repo"],
+ {
+ "dest": "repo",
+ "help": "OpenH264 repository to use",
+ "default": "https://github.com/dminor/openh264.git",
+ },
+ ],
+ [
+ ["--rev"],
+ {"dest": "revision", "help": "revision to checkout", "default": "master"},
+ ],
+ [
+ ["--debug"],
+ {
+ "dest": "debug_build",
+ "action": "store_true",
+ "help": "Do a debug build",
+ },
+ ],
+ [
+ ["--arch"],
+ {
+ "dest": "arch",
+ "help": "Arch type to use (x64, x86, arm, or aarch64)",
+ },
+ ],
+ [
+ ["--os"],
+ {
+ "dest": "operating_system",
+ "help": "Specify the operating system to build for",
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "dest": "branch",
+ "help": "dummy option",
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "dest": "build_pool",
+ "help": "dummy option",
+ },
+ ],
+ ]
+
+ def __init__(
+ self,
+ require_config_file=False,
+ config={},
+ all_actions=all_actions,
+ default_actions=default_actions,
+ ):
+
+ # Default configuration
+ default_config = {
+ "debug_build": False,
+ "upload_ssh_key": "~/.ssh/ffxbld_rsa",
+ "upload_ssh_user": "ffxbld",
+ "upload_ssh_host": "upload.ffxbld.productdelivery.prod.mozaws.net",
+ "upload_path_base": "/tmp/openh264",
+ }
+ default_config.update(config)
+
+ VCSScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ config=default_config,
+ all_actions=all_actions,
+ default_actions=default_actions,
+ )
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ dirs = super(OpenH264Build, self).query_abs_dirs()
+ dirs["abs_upload_dir"] = os.path.join(dirs["abs_work_dir"], "upload")
+ self.abs_dirs = dirs
+ return self.abs_dirs
+
+ def get_tooltool(self):
+ c = self.config
+ if not c.get("tooltool_manifest_file"):
+ self.info("Skipping tooltool fetching since no tooltool manifest")
+ return
+ dirs = self.query_abs_dirs()
+ self.mkdir_p(dirs["abs_work_dir"])
+ manifest = os.path.join(
+ dirs["abs_src_dir"],
+ "testing",
+ "mozharness",
+ "configs",
+ "openh264",
+ "tooltool-manifests",
+ c["tooltool_manifest_file"],
+ )
+ self.info("Getting tooltool files from manifest (%s)" % manifest)
+ try:
+ self.tooltool_fetch(
+ manifest=manifest,
+ output_dir=os.path.join(dirs["abs_work_dir"]),
+ cache=c.get("tooltool_cache"),
+ )
+ except KeyError:
+ self.error("missing a required key.")
+
+ def query_package_name(self):
+ if self.config["arch"] in ("x64", "aarch64"):
+ bits = "64"
+ else:
+ bits = "32"
+ version = self.config["revision"]
+
+ if sys.platform in ("linux2", "linux"):
+ if self.config.get("operating_system") == "android":
+ return "openh264-android-{arch}-{version}.zip".format(
+ version=version, arch=self.config["arch"]
+ )
+ elif self.config.get("operating_system") == "darwin":
+ suffix = ""
+ if self.config["arch"] != "x64":
+ suffix = "-" + self.config["arch"]
+ return "openh264-macosx{bits}{suffix}-{version}.zip".format(
+ version=version, bits=bits, suffix=suffix
+ )
+ else:
+ return "openh264-linux{bits}-{version}.zip".format(
+ version=version, bits=bits
+ )
+ elif sys.platform == "win32":
+ if self.config["arch"] == "aarch64":
+ return "openh264-win64-aarch64-{version}.zip".format(version=version)
+ else:
+ return "openh264-win{bits}-{version}.zip".format(
+ version=version, bits=bits
+ )
+ self.fatal("can't determine platform")
+
+ def query_make_params(self):
+ retval = []
+ if self.config["debug_build"]:
+ retval.append("BUILDTYPE=Debug")
+
+ if self.config["arch"] in ("x64", "aarch64"):
+ retval.append("ENABLE64BIT=Yes")
+ else:
+ retval.append("ENABLE64BIT=No")
+
+ if self.config["arch"] == "x86":
+ retval.append("ARCH=x86")
+ elif self.config["arch"] == "x64":
+ retval.append("ARCH=x86_64")
+ elif self.config["arch"] == "aarch64":
+ retval.append("ARCH=arm64")
+ else:
+ self.fatal("Unknown arch: {}".format(self.config["arch"]))
+
+ if "operating_system" in self.config:
+ retval.append("OS=%s" % self.config["operating_system"])
+ if self.config["operating_system"] == "android":
+ retval.append("TARGET=invalid")
+ retval.append("NDKLEVEL=%s" % self.config["min_sdk"])
+ retval.append("NDKROOT=%s/android-ndk" % os.environ["MOZ_FETCHES_DIR"])
+ retval.append("NDK_TOOLCHAIN_VERSION=clang")
+ if self.config["operating_system"] == "darwin":
+ retval.append("OS=darwin")
+
+ if self._is_windows():
+ retval.append("OS=msvc")
+ retval.append("CC=clang-cl")
+ retval.append("CXX=clang-cl")
+ if self.config["arch"] == "aarch64":
+ retval.append("CXX_LINK_O=-nologo --target=aarch64-windows-msvc -Fe$@")
+ else:
+ retval.append("CC=clang")
+ retval.append("CXX=clang++")
+
+ return retval
+
+ def query_upload_ssh_key(self):
+ return self.config["upload_ssh_key"]
+
+ def query_upload_ssh_host(self):
+ return self.config["upload_ssh_host"]
+
+ def query_upload_ssh_user(self):
+ return self.config["upload_ssh_user"]
+
+ def query_upload_ssh_path(self):
+ return "%s/%s" % (self.config["upload_path_base"], self.config["revision"])
+
+ def run_make(self, target, capture_output=False):
+ make = (
+ f"{os.environ['MOZ_FETCHES_DIR']}/mozmake/mozmake"
+ if sys.platform == "win32"
+ else "make"
+ )
+ cmd = [make, target] + self.query_make_params()
+ dirs = self.query_abs_dirs()
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ env = None
+ if self.config.get("partial_env"):
+ env = self.query_env(self.config["partial_env"])
+ kwargs = dict(cwd=repo_dir, env=env)
+ if capture_output:
+ return self.get_output_from_command(cmd, **kwargs)
+ else:
+ return self.run_command(cmd, **kwargs)
+
+ def _git_checkout(self, repo, repo_dir, rev):
+ try:
+ subprocess.run(["git", "clone", "-q", "--no-checkout", repo, repo_dir])
+ subprocess.run(["git", "checkout", "-q", "-f", f"{rev}^0"], cwd=repo_dir)
+ except Exception:
+ self.rmtree(repo_dir)
+ raise
+ return True
+
+ def checkout_sources(self):
+ repo = self.config["repo"]
+ rev = self.config["revision"]
+
+ dirs = self.query_abs_dirs()
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+
+ if self._is_windows():
+ # We don't have git on our windows builders, so download a zip
+ # package instead.
+ path = repo.replace(".git", "/archive/") + rev + ".zip"
+ self.download_file(path)
+ self.unzip(rev + ".zip", dirs["abs_work_dir"])
+ self.move(
+ os.path.join(dirs["abs_work_dir"], "openh264-" + rev),
+ os.path.join(dirs["abs_work_dir"], "openh264"),
+ )
+
+ # Retrieve in-tree version of gmp-api
+ self.copytree(
+ os.path.join(dirs["abs_src_dir"], "dom", "media", "gmp", "gmp-api"),
+ os.path.join(repo_dir, "gmp-api"),
+ )
+
+ # We need gas-preprocessor.pl for arm64 builds
+ if self.config["arch"] == "aarch64":
+ openh264_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ self.download_file(
+ (
+ "https://raw.githubusercontent.com/libav/"
+ "gas-preprocessor/c2bc63c96678d9739509e58"
+ "7aa30c94bdc0e636d/gas-preprocessor.pl"
+ ),
+ parent_dir=openh264_dir,
+ )
+ self.chmod(os.path.join(openh264_dir, "gas-preprocessor.pl"), 744)
+
+ # gas-preprocessor.pl expects cpp to exist
+ # os.symlink is not available on Windows until we switch to
+ # Python 3.
+ os.system(
+ "ln -s %s %s"
+ % (
+ os.path.join(
+ os.environ["MOZ_FETCHES_DIR"], "clang", "bin", "clang.exe"
+ ),
+ os.path.join(openh264_dir, "cpp"),
+ )
+ )
+ return 0
+
+ self.retry(
+ self._git_checkout,
+ error_level=FATAL,
+ error_message="Automation Error: couldn't clone repo",
+ args=(repo, repo_dir, rev),
+ )
+
+ # Checkout gmp-api
+ # TODO: Nothing here updates it yet, or enforces versions!
+ if not os.path.exists(os.path.join(repo_dir, "gmp-api")):
+ retval = self.run_make("gmp-bootstrap")
+ if retval != 0:
+ self.fatal("couldn't bootstrap gmp")
+ else:
+ self.info("skipping gmp bootstrap - we have it locally")
+
+ # Checkout gtest
+ # TODO: Requires svn!
+ if not os.path.exists(os.path.join(repo_dir, "gtest")):
+ retval = self.run_make("gtest-bootstrap")
+ if retval != 0:
+ self.fatal("couldn't bootstrap gtest")
+ else:
+ self.info("skipping gtest bootstrap - we have it locally")
+
+ return retval
+
+ def build(self):
+ retval = self.run_make("plugin")
+ if retval != 0:
+ self.fatal("couldn't build plugin")
+
+ def package(self):
+ dirs = self.query_abs_dirs()
+ srcdir = os.path.join(dirs["abs_work_dir"], "openh264")
+ package_name = self.query_package_name()
+ package_file = os.path.join(dirs["abs_work_dir"], package_name)
+ if os.path.exists(package_file):
+ os.unlink(package_file)
+ to_package = []
+ for f in glob.glob(os.path.join(srcdir, "*gmpopenh264*")):
+ if not re.search(
+ "(?:lib)?gmpopenh264(?!\.\d)\.(?:dylib|so|dll|info)(?!\.\d)", f
+ ):
+ # Don't package unnecessary zip bloat
+ # Blocks things like libgmpopenh264.2.dylib and libgmpopenh264.so.1
+ self.log("Skipping packaging of {package}".format(package=f))
+ continue
+ to_package.append(os.path.basename(f))
+ self.log("Packaging files %s" % to_package)
+ cmd = ["zip", package_file] + to_package
+ retval = self.run_command(cmd, cwd=srcdir)
+ if retval != 0:
+ self.fatal("couldn't make package")
+ self.copy_to_upload_dir(
+ package_file, dest=os.path.join(srcdir, "artifacts", package_name)
+ )
+
+ # Taskcluster expects this path to exist, but we don't use it
+ # because our builds are private.
+ path = os.path.join(
+ self.query_abs_dirs()["abs_work_dir"], "..", "public", "build"
+ )
+ self.mkdir_p(path)
+
+ def dump_symbols(self):
+ dirs = self.query_abs_dirs()
+ c = self.config
+ srcdir = os.path.join(dirs["abs_work_dir"], "openh264")
+ package_name = self.run_make("echo-plugin-name", capture_output=True)
+ if not package_name:
+ self.fatal("failure running make")
+ zip_package_name = self.query_package_name()
+ if not zip_package_name[-4:] == ".zip":
+ self.fatal("Unexpected zip_package_name")
+ symbol_package_name = "{base}.symbols.zip".format(base=zip_package_name[:-4])
+ symbol_zip_path = os.path.join(srcdir, "artifacts", symbol_package_name)
+ repo_dir = os.path.join(dirs["abs_work_dir"], "openh264")
+ env = None
+ if self.config.get("partial_env"):
+ env = self.query_env(self.config["partial_env"])
+ kwargs = dict(cwd=repo_dir, env=env)
+ dump_syms = os.path.join(dirs["abs_work_dir"], c["dump_syms_binary"])
+ self.chmod(dump_syms, 0o755)
+ python = self.query_exe("python3")
+ cmd = [
+ python,
+ os.path.join(external_tools_path, "packagesymbols.py"),
+ "--symbol-zip",
+ symbol_zip_path,
+ dump_syms,
+ os.path.join(srcdir, package_name),
+ ]
+ self.run_command(cmd, **kwargs)
+
+ def test(self):
+ retval = self.run_make("test")
+ if retval != 0:
+ self.fatal("test failures")
+
+ def copy_to_upload_dir(
+ self,
+ target,
+ dest=None,
+ log_level=DEBUG,
+ error_level=ERROR,
+ compress=False,
+ upload_dir=None,
+ ):
+ """Copy target file to upload_dir/dest.
+
+ Potentially update a manifest in the future if we go that route.
+
+ Currently only copies a single file; would be nice to allow for
+ recursive copying; that would probably done by creating a helper
+ _copy_file_to_upload_dir().
+ """
+ dest_filename_given = dest is not None
+ if upload_dir is None:
+ upload_dir = self.query_abs_dirs()["abs_upload_dir"]
+ if dest is None:
+ dest = os.path.basename(target)
+ if dest.endswith("/"):
+ dest_file = os.path.basename(target)
+ dest_dir = os.path.join(upload_dir, dest)
+ dest_filename_given = False
+ else:
+ dest_file = os.path.basename(dest)
+ dest_dir = os.path.join(upload_dir, os.path.dirname(dest))
+ if compress and not dest_filename_given:
+ dest_file += ".gz"
+ dest = os.path.join(dest_dir, dest_file)
+ if not os.path.exists(target):
+ self.log("%s doesn't exist!" % target, level=error_level)
+ return None
+ self.mkdir_p(dest_dir)
+ self.copyfile(target, dest, log_level=log_level, compress=compress)
+ if os.path.exists(dest):
+ return dest
+ else:
+ self.log("%s doesn't exist after copy!" % dest, level=error_level)
+ return None
+
+
+# main {{{1
+if __name__ == "__main__":
+ myScript = OpenH264Build()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/raptor_script.py b/testing/mozharness/scripts/raptor_script.py
new file mode 100644
index 0000000000..be2ed181e8
--- /dev/null
+++ b/testing/mozharness/scripts/raptor_script.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""raptor
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.testing.raptor import Raptor
+
+if __name__ == "__main__":
+ raptor = Raptor()
+ raptor.run_and_exit()
diff --git a/testing/mozharness/scripts/release/bouncer_check.py b/testing/mozharness/scripts/release/bouncer_check.py
new file mode 100644
index 0000000000..7a7e39b274
--- /dev/null
+++ b/testing/mozharness/scripts/release/bouncer_check.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+# lint_ignore=E501
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+""" bouncer_check.py
+
+A script to check HTTP statuses of Bouncer products to be shipped.
+"""
+
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.automation import EXIT_STATUS_DICT, TBPL_FAILURE
+
+BOUNCER_URL_PATTERN = "{bouncer_prefix}?product={product}&os={os}&lang={lang}"
+
+
+class BouncerCheck(BaseScript):
+ config_options = [
+ [
+ ["--version"],
+ {
+ "dest": "version",
+ "help": "Version of release, eg: 39.0b5",
+ },
+ ],
+ [
+ ["--product-field"],
+ {
+ "dest": "product_field",
+ "help": "Version field of release from product details, eg: LATEST_FIREFOX_VERSION", # NOQA: E501
+ },
+ ],
+ [
+ ["--products-url"],
+ {
+ "dest": "products_url",
+ "help": "The URL of the current Firefox product versions",
+ "type": str,
+ "default": "https://product-details.mozilla.org/1.0/firefox_versions.json",
+ },
+ ],
+ [
+ ["--previous-version"],
+ {
+ "dest": "prev_versions",
+ "action": "extend",
+ "help": "Previous version(s)",
+ },
+ ],
+ [
+ ["--locale"],
+ {
+ "dest": "locales",
+ # Intentionally limited for several reasons:
+ # 1) faster to check
+ # 2) do not need to deal with situation when a new locale
+ # introduced and we do not have partials for it yet
+ # 3) it mimics the old Sentry behaviour that worked for ages
+ # 4) no need to handle ja-JP-mac
+ "default": ["en-US", "de", "it", "zh-TW"],
+ "action": "append",
+ "help": "List of locales to check.",
+ },
+ ],
+ [
+ ["-j", "--parallelization"],
+ {
+ "dest": "parallelization",
+ "default": 20,
+ "type": int,
+ "help": "Number of HTTP sessions running in parallel",
+ },
+ ],
+ ]
+
+ def __init__(self, require_config_file=True):
+ super(BouncerCheck, self).__init__(
+ config_options=self.config_options,
+ require_config_file=require_config_file,
+ config={
+ "cdn_urls": [
+ "download-installer.cdn.mozilla.net",
+ "download.cdn.mozilla.net",
+ "download.mozilla.org",
+ "archive.mozilla.org",
+ ],
+ },
+ all_actions=[
+ "check-bouncer",
+ ],
+ default_actions=[
+ "check-bouncer",
+ ],
+ )
+
+ def _pre_config_lock(self, rw_config):
+ super(BouncerCheck, self)._pre_config_lock(rw_config)
+
+ if "product_field" not in self.config:
+ return
+
+ firefox_versions = self.load_json_url(self.config["products_url"])
+
+ if self.config["product_field"] not in firefox_versions:
+ self.fatal("Unknown Firefox label: {}".format(self.config["product_field"]))
+ self.config["version"] = firefox_versions[self.config["product_field"]]
+ self.log("Set Firefox version {}".format(self.config["version"]))
+
+ def check_url(self, session, url):
+ from redo import retry
+ from requests.exceptions import HTTPError
+
+ try:
+ from urllib.parse import urlparse
+ except ImportError:
+ # Python 2
+ from urlparse import urlparse
+
+ def do_check_url():
+ self.log("Checking {}".format(url))
+ r = session.head(url, verify=True, timeout=10, allow_redirects=True)
+ try:
+ r.raise_for_status()
+ except HTTPError:
+ self.error("FAIL: {}, status: {}".format(url, r.status_code))
+ raise
+
+ final_url = urlparse(r.url)
+ if final_url.scheme != "https":
+ self.error("FAIL: URL scheme is not https: {}".format(r.url))
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+
+ if final_url.netloc not in self.config["cdn_urls"]:
+ self.error("FAIL: host not in allowed locations: {}".format(r.url))
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+
+ try:
+ retry(do_check_url, sleeptime=3, max_sleeptime=10, attempts=3)
+ except HTTPError:
+ # The error was already logged above.
+ self.return_code = EXIT_STATUS_DICT[TBPL_FAILURE]
+ return
+
+ def get_urls(self):
+ for product in self.config["products"].values():
+ product_name = product["product-name"] % {"version": self.config["version"]}
+ for bouncer_platform in product["platforms"]:
+ for locale in self.config["locales"]:
+ url = BOUNCER_URL_PATTERN.format(
+ bouncer_prefix=self.config["bouncer_prefix"],
+ product=product_name,
+ os=bouncer_platform,
+ lang=locale,
+ )
+ yield url
+
+ for product in self.config.get("partials", {}).values():
+ for prev_version in self.config.get("prev_versions", []):
+ product_name = product["product-name"] % {
+ "version": self.config["version"],
+ "prev_version": prev_version,
+ }
+ for bouncer_platform in product["platforms"]:
+ for locale in self.config["locales"]:
+ url = BOUNCER_URL_PATTERN.format(
+ bouncer_prefix=self.config["bouncer_prefix"],
+ product=product_name,
+ os=bouncer_platform,
+ lang=locale,
+ )
+ yield url
+
+ def check_bouncer(self):
+ import concurrent.futures as futures
+
+ import requests
+
+ session = requests.Session()
+ http_adapter = requests.adapters.HTTPAdapter(
+ pool_connections=self.config["parallelization"],
+ pool_maxsize=self.config["parallelization"],
+ )
+ session.mount("https://", http_adapter)
+ session.mount("http://", http_adapter)
+
+ with futures.ThreadPoolExecutor(self.config["parallelization"]) as e:
+ fs = []
+ for url in self.get_urls():
+ fs.append(e.submit(self.check_url, session, url))
+ for f in futures.as_completed(fs):
+ f.result()
+
+
+if __name__ == "__main__":
+ BouncerCheck().run_and_exit()
diff --git a/testing/mozharness/scripts/release/generate-checksums.py b/testing/mozharness/scripts/release/generate-checksums.py
new file mode 100644
index 0000000000..6d01923e44
--- /dev/null
+++ b/testing/mozharness/scripts/release/generate-checksums.py
@@ -0,0 +1,263 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import binascii
+import hashlib
+import os
+import re
+import sys
+from multiprocessing.pool import ThreadPool
+
+import six
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
+from mozharness.base.script import BaseScript
+from mozharness.mozilla.checksums import parse_checksums_file
+from mozharness.mozilla.merkle import MerkleTree
+
+
+class ChecksumsGenerator(BaseScript, VirtualenvMixin):
+ config_options = [
+ [
+ ["--stage-product"],
+ {
+ "dest": "stage_product",
+ "help": "Name of product used in file server's directory structure, "
+ "e.g.: firefox, mobile",
+ },
+ ],
+ [
+ ["--version"],
+ {
+ "dest": "version",
+ "help": "Version of release, e.g.: 59.0b5",
+ },
+ ],
+ [
+ ["--build-number"],
+ {
+ "dest": "build_number",
+ "help": "Build number of release, e.g.: 2",
+ },
+ ],
+ [
+ ["--bucket-name"],
+ {
+ "dest": "bucket_name",
+ "help": "Full bucket name e.g.: net-mozaws-prod-delivery-{firefox,archive}.",
+ },
+ ],
+ [
+ ["-j", "--parallelization"],
+ {
+ "dest": "parallelization",
+ "default": 20,
+ "type": int,
+ "help": "Number of checksums file to download concurrently",
+ },
+ ],
+ [
+ ["--branch"],
+ {
+ "dest": "branch",
+ "help": "dummy option",
+ },
+ ],
+ [
+ ["--build-pool"],
+ {
+ "dest": "build_pool",
+ "help": "dummy option",
+ },
+ ],
+ ] + virtualenv_config_options
+
+ def __init__(self):
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ require_config_file=False,
+ config={
+ "virtualenv_modules": [
+ "boto",
+ ],
+ "virtualenv_path": "venv",
+ },
+ all_actions=[
+ "create-virtualenv",
+ "collect-individual-checksums",
+ "create-big-checksums",
+ "create-summary",
+ ],
+ default_actions=[
+ "create-virtualenv",
+ "collect-individual-checksums",
+ "create-big-checksums",
+ "create-summary",
+ ],
+ )
+
+ self.checksums = {}
+ self.file_prefix = self._get_file_prefix()
+
+ def _pre_config_lock(self, rw_config):
+ super(ChecksumsGenerator, self)._pre_config_lock(rw_config)
+
+ # These defaults are set here rather in the config because default
+ # lists cannot be completely overidden, only appended to.
+ if not self.config.get("formats"):
+ self.config["formats"] = ["sha512", "sha256"]
+
+ if not self.config.get("includes"):
+ self.config["includes"] = [
+ r"^.*\.tar\.bz2$",
+ r"^.*\.tar\.xz$",
+ r"^.*\.snap$",
+ r"^.*\.dmg$",
+ r"^.*\.pkg$",
+ r"^.*\.bundle$",
+ r"^.*\.mar$",
+ r"^.*Setup.*\.exe$",
+ r"^.*Installer\.exe$",
+ r"^.*\.msi$",
+ r"^.*\.xpi$",
+ r"^.*fennec.*\.apk$",
+ r"^.*/jsshell.*$",
+ ]
+
+ def _get_file_prefix(self):
+ return "pub/{}/candidates/{}-candidates/build{}/".format(
+ self.config["stage_product"],
+ self.config["version"],
+ self.config["build_number"],
+ )
+
+ def _get_sums_filename(self, format_):
+ return "{}SUMS".format(format_.upper())
+
+ def _get_summary_filename(self, format_):
+ return "{}SUMMARY".format(format_.upper())
+
+ def _get_hash_function(self, format_):
+ if format_ in ("sha256", "sha384", "sha512"):
+ return getattr(hashlib, format_)
+ else:
+ self.fatal("Unsupported format {}".format(format_))
+
+ def _get_bucket(self):
+ self.activate_virtualenv()
+ from boto import connect_s3
+
+ self.info("Connecting to S3")
+ conn = connect_s3(anon=True)
+ self.info("Connecting to bucket {}".format(self.config["bucket_name"]))
+ self.bucket = conn.get_bucket(self.config["bucket_name"])
+ return self.bucket
+
+ def collect_individual_checksums(self):
+ """This step grabs all of the small checksums files for the release,
+ filters out any unwanted files from within them, and adds the remainder
+ to self.checksums for subsequent steps to use."""
+ bucket = self._get_bucket()
+ self.info("File prefix is: {}".format(self.file_prefix))
+
+ # temporary holding place for checksums
+ raw_checksums = []
+
+ def worker(item):
+ self.debug("Downloading {}".format(item))
+ sums = bucket.get_key(item).get_contents_as_string()
+ raw_checksums.append(sums)
+
+ def find_checksums_files():
+ self.info("Getting key names from bucket")
+ checksum_files = {"beets": [], "checksums": []}
+ for key in bucket.list(prefix=self.file_prefix):
+ if key.key.endswith(".checksums"):
+ self.debug("Found checksums file: {}".format(key.key))
+ checksum_files["checksums"].append(key.key)
+ elif key.key.endswith(".beet"):
+ self.debug("Found beet file: {}".format(key.key))
+ checksum_files["beets"].append(key.key)
+ else:
+ self.debug("Ignoring non-checksums file: {}".format(key.key))
+ if checksum_files["beets"]:
+ self.log("Using beet format")
+ return checksum_files["beets"]
+ else:
+ self.log("Using checksums format")
+ return checksum_files["checksums"]
+
+ pool = ThreadPool(self.config["parallelization"])
+ pool.map(worker, find_checksums_files())
+
+ for c in raw_checksums:
+ for f, info in six.iteritems(parse_checksums_file(c)):
+ for pattern in self.config["includes"]:
+ if re.search(pattern, f):
+ if f in self.checksums:
+ if info == self.checksums[f]:
+ self.debug(
+ "Duplicate checksum for file {}"
+ " but the data matches;"
+ " continuing...".format(f)
+ )
+ continue
+ self.fatal(
+ "Found duplicate checksum entry for {}, "
+ "don't know which one to pick.".format(f)
+ )
+ if not set(self.config["formats"]) <= set(info["hashes"]):
+ self.fatal("Missing necessary format for file {}".format(f))
+ self.debug("Adding checksums for file: {}".format(f))
+ self.checksums[f] = info
+ break
+ else:
+ self.debug("Ignoring checksums for file: {}".format(f))
+
+ def create_summary(self):
+ """
+ This step computes a Merkle tree over the checksums for each format
+ and writes a file containing the head of the tree and inclusion proofs
+ for each file.
+ """
+ for fmt in self.config["formats"]:
+ hash_fn = self._get_hash_function(fmt)
+ files = [fn for fn in sorted(self.checksums)]
+ data = [self.checksums[fn]["hashes"][fmt] for fn in files]
+
+ tree = MerkleTree(hash_fn, data)
+ head = binascii.hexlify(tree.head())
+ proofs = [
+ binascii.hexlify(tree.inclusion_proof(i).to_rfc6962_bis())
+ for i in range(len(files))
+ ]
+
+ summary = self._get_summary_filename(fmt)
+ self.info("Creating summary file: {}".format(summary))
+
+ content = "{} TREE_HEAD\n".format(head.decode("ascii"))
+ for i in range(len(files)):
+ content += "{} {}\n".format(proofs[i].decode("ascii"), files[i])
+
+ self.write_to_file(summary, content)
+
+ def create_big_checksums(self):
+ for fmt in self.config["formats"]:
+ sums = self._get_sums_filename(fmt)
+ self.info("Creating big checksums file: {}".format(sums))
+ with open(sums, "w+") as output_file:
+ for fn in sorted(self.checksums):
+ output_file.write(
+ "{} {}\n".format(
+ self.checksums[fn]["hashes"][fmt].decode("ascii"), fn
+ )
+ )
+
+
+if __name__ == "__main__":
+ myScript = ChecksumsGenerator()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/release/update-verify-config-creator.py b/testing/mozharness/scripts/release/update-verify-config-creator.py
new file mode 100644
index 0000000000..9de0175577
--- /dev/null
+++ b/testing/mozharness/scripts/release/update-verify-config-creator.py
@@ -0,0 +1,642 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import math
+import os
+import pprint
+import re
+import sys
+
+from looseversion import LooseVersion
+from mozilla_version.gecko import GeckoVersion
+from mozilla_version.version import VersionType
+from six.moves.urllib.parse import urljoin
+
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.log import DEBUG, FATAL, INFO, WARNING
+from mozharness.base.script import BaseScript
+
+
+# ensure all versions are 3 part (i.e. 99.1.0)
+# ensure all text (i.e. 'esr') is in the last part
+class CompareVersion(LooseVersion):
+ version = ""
+
+ def __init__(self, versionMap):
+ parts = versionMap.split(".")
+ # assume version is 99.9.0, look for 99.0
+ if len(parts) == 2:
+ intre = re.compile("([0-9.]+)(.*)")
+ match = intre.match(parts[-1])
+ if match:
+ parts[-1] = match.group(1)
+ parts.append("0%s" % match.group(2))
+ else:
+ parts.append("0")
+ self.version = ".".join(parts)
+ LooseVersion(versionMap)
+
+
+def is_triangualar(x):
+ """Check if a number is triangular (0, 1, 3, 6, 10, 15, ...)
+ see: https://en.wikipedia.org/wiki/Triangular_number#Triangular_roots_and_tests_for_triangular_numbers # noqa
+
+ >>> is_triangualar(0)
+ True
+ >>> is_triangualar(1)
+ True
+ >>> is_triangualar(2)
+ False
+ >>> is_triangualar(3)
+ True
+ >>> is_triangualar(4)
+ False
+ >>> all(is_triangualar(x) for x in [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105])
+ True
+ >>> all(not is_triangualar(x) for x in [4, 5, 8, 9, 11, 17, 25, 29, 39, 44, 59, 61, 72, 98, 112])
+ True
+ """
+ # pylint --py3k W1619
+ n = (math.sqrt(8 * x + 1) - 1) / 2
+ return n == int(n)
+
+
+class UpdateVerifyConfigCreator(BaseScript):
+ config_options = [
+ [
+ ["--product"],
+ {
+ "dest": "product",
+ "help": "Product being tested, as used in the update URL and filenames. Eg: firefox", # NOQA: E501
+ },
+ ],
+ [
+ ["--stage-product"],
+ {
+ "dest": "stage_product",
+ "help": "Product being tested, as used in stage directories and ship it"
+ "If not passed this is assumed to be the same as product.",
+ },
+ ],
+ [
+ ["--app-name"],
+ {
+ "dest": "app_name",
+ "help": "App name being tested. Eg: browser",
+ },
+ ],
+ [
+ ["--branch-prefix"],
+ {
+ "dest": "branch_prefix",
+ "help": "Prefix of release branch names. Eg: mozilla, comm",
+ },
+ ],
+ [
+ ["--channel"],
+ {
+ "dest": "channel",
+ "help": "Channel to run update verify against",
+ },
+ ],
+ [
+ ["--aus-server"],
+ {
+ "dest": "aus_server",
+ "default": "https://aus5.mozilla.org",
+ "help": "AUS server to run update verify against",
+ },
+ ],
+ [
+ ["--to-version"],
+ {
+ "dest": "to_version",
+ "help": "The version of the release being updated to. Eg: 59.0b5",
+ },
+ ],
+ [
+ ["--to-app-version"],
+ {
+ "dest": "to_app_version",
+ "help": "The in-app version of the release being updated to. Eg: 59.0",
+ },
+ ],
+ [
+ ["--to-display-version"],
+ {
+ "dest": "to_display_version",
+ "help": "The human-readable version of the release being updated to. Eg: 59.0 Beta 9", # NOQA: E501
+ },
+ ],
+ [
+ ["--to-build-number"],
+ {
+ "dest": "to_build_number",
+ "help": "The build number of the release being updated to",
+ },
+ ],
+ [
+ ["--to-buildid"],
+ {
+ "dest": "to_buildid",
+ "help": "The buildid of the release being updated to",
+ },
+ ],
+ [
+ ["--to-revision"],
+ {
+ "dest": "to_revision",
+ "help": "The revision that the release being updated to was built against",
+ },
+ ],
+ [
+ ["--partial-version"],
+ {
+ "dest": "partial_versions",
+ "default": [],
+ "action": "append",
+ "help": "A previous release version that is expected to receive a partial update. "
+ "Eg: 59.0b4. May be specified multiple times.",
+ },
+ ],
+ [
+ ["--last-watershed"],
+ {
+ "dest": "last_watershed",
+ "help": "The earliest version to include in the update verify config. Eg: 57.0b10",
+ },
+ ],
+ [
+ ["--include-version"],
+ {
+ "dest": "include_versions",
+ "default": [],
+ "action": "append",
+ "help": "Only include versions that match one of these regexes. "
+ "May be passed multiple times",
+ },
+ ],
+ [
+ ["--mar-channel-id-override"],
+ {
+ "dest": "mar_channel_id_options",
+ "default": [],
+ "action": "append",
+ "help": "A version regex and channel id string to override those versions with."
+ "Eg: ^\\d+\\.\\d+(\\.\\d+)?$,firefox-mozilla-beta,firefox-mozilla-release "
+ "will set accepted mar channel ids to 'firefox-mozilla-beta' and "
+ "'firefox-mozilla-release for x.y and x.y.z versions. "
+ "May be passed multiple times",
+ },
+ ],
+ [
+ ["--override-certs"],
+ {
+ "dest": "override_certs",
+ "default": None,
+ "help": "Certs to override the updater with prior to running update verify."
+ "If passed, should be one of: dep, nightly, release"
+ "If not passed, no certificate overriding will be configured",
+ },
+ ],
+ [
+ ["--platform"],
+ {
+ "dest": "platform",
+ "help": "The platform to generate the update verify config for, in FTP-style",
+ },
+ ],
+ [
+ ["--updater-platform"],
+ {
+ "dest": "updater_platform",
+ "help": "The platform to run the updater on, in FTP-style."
+ "If not specified, this is assumed to be the same as platform",
+ },
+ ],
+ [
+ ["--archive-prefix"],
+ {
+ "dest": "archive_prefix",
+ "help": "The server/path to pull the current release from. "
+ "Eg: https://archive.mozilla.org/pub",
+ },
+ ],
+ [
+ ["--previous-archive-prefix"],
+ {
+ "dest": "previous_archive_prefix",
+ "help": "The server/path to pull the previous releases from"
+ "If not specified, this is assumed to be the same as --archive-prefix",
+ },
+ ],
+ [
+ ["--repo-path"],
+ {
+ "dest": "repo_path",
+ "help": (
+ "The repository (relative to the hg server root) that the current "
+ "release was built from Eg: releases/mozilla-beta"
+ ),
+ },
+ ],
+ [
+ ["--output-file"],
+ {
+ "dest": "output_file",
+ "help": "Where to write the update verify config to",
+ },
+ ],
+ [
+ ["--product-details-server"],
+ {
+ "dest": "product_details_server",
+ "default": "https://product-details.mozilla.org",
+ "help": "Product Details server to pull previous release info from. "
+ "Using anything other than the production server is likely to "
+ "cause issues with update verify.",
+ },
+ ],
+ [
+ ["--hg-server"],
+ {
+ "dest": "hg_server",
+ "default": "https://hg.mozilla.org",
+ "help": "Mercurial server to pull various previous and current version info from",
+ },
+ ],
+ [
+ ["--full-check-locale"],
+ {
+ "dest": "full_check_locales",
+ "default": ["de", "en-US", "ru"],
+ "action": "append",
+ "help": "A list of locales to generate full update verify checks for",
+ },
+ ],
+ ]
+
+ def __init__(self):
+ BaseScript.__init__(
+ self,
+ config_options=self.config_options,
+ config={},
+ all_actions=[
+ "gather-info",
+ "create-config",
+ "write-config",
+ ],
+ default_actions=[
+ "gather-info",
+ "create-config",
+ "write-config",
+ ],
+ )
+
+ def _pre_config_lock(self, rw_config):
+ super(UpdateVerifyConfigCreator, self)._pre_config_lock(rw_config)
+
+ if "updater_platform" not in self.config:
+ self.config["updater_platform"] = self.config["platform"]
+ if "stage_product" not in self.config:
+ self.config["stage_product"] = self.config["product"]
+ if "previous_archive_prefix" not in self.config:
+ self.config["previous_archive_prefix"] = self.config["archive_prefix"]
+ self.config["archive_prefix"].rstrip("/")
+ self.config["previous_archive_prefix"].rstrip("/")
+ self.config["mar_channel_id_overrides"] = {}
+ for override in self.config["mar_channel_id_options"]:
+ pattern, override_str = override.split(",", 1)
+ self.config["mar_channel_id_overrides"][pattern] = override_str
+
+ def _get_branch_url(self, branch_prefix, version):
+ version = GeckoVersion.parse(version)
+ branch = None
+ if version.version_type == VersionType.BETA:
+ branch = "releases/{}-beta".format(branch_prefix)
+ elif version.version_type == VersionType.ESR:
+ branch = "releases/{}-esr{}".format(branch_prefix, version.major_number)
+ elif version.version_type == VersionType.RELEASE:
+ if branch_prefix == "comm":
+ # Thunderbird does not have ESR releases, regular releases
+ # go in an ESR branch
+ branch = "releases/{}-esr{}".format(branch_prefix, version.major_number)
+ else:
+ branch = "releases/{}-release".format(branch_prefix)
+ if not branch:
+ raise Exception("Cannot determine branch, cannot continue!")
+
+ return branch
+
+ def _get_update_paths(self):
+ from mozrelease.l10n import getPlatformLocales
+ from mozrelease.paths import getCandidatesDir
+ from mozrelease.platforms import ftp2infoFile
+ from mozrelease.versions import MozillaVersion
+
+ self.update_paths = {}
+
+ ret = self._retry_download(
+ "{}/1.0/{}.json".format(
+ self.config["product_details_server"],
+ self.config["stage_product"],
+ ),
+ "WARNING",
+ )
+ releases = json.load(ret)["releases"]
+ for release_name, release_info in reversed(
+ sorted(releases.items(), key=lambda x: MozillaVersion(x[1]["version"]))
+ ):
+ # we need to use releases_name instead of release_info since esr
+ # string is included in the name. later we rely on this.
+ product, version = release_name.split("-", 1)
+ tag = "{}_{}_RELEASE".format(product.upper(), version.replace(".", "_"))
+
+ # Exclude any releases that don't match one of our include version
+ # regexes. This is generally to avoid including versions from other
+ # channels. Eg: including betas when testing releases
+ for v in self.config["include_versions"]:
+ if re.match(v, version):
+ break
+ else:
+ self.log(
+ "Skipping release whose version doesn't match any "
+ "include_version pattern: %s" % release_name,
+ level=INFO,
+ )
+ continue
+
+ # We also have to trim out previous releases that aren't in the same
+ # product line, too old, etc.
+ if self.config["stage_product"] != product:
+ self.log(
+ "Skipping release that doesn't match product name: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if MozillaVersion(version) < MozillaVersion(self.config["last_watershed"]):
+ self.log(
+ "Skipping release that's behind the last watershed: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if version == self.config["to_version"]:
+ self.log(
+ "Skipping release that is the same as to version: %s"
+ % release_name,
+ level=INFO,
+ )
+ continue
+ if MozillaVersion(version) > MozillaVersion(self.config["to_version"]):
+ self.log(
+ "Skipping release that's newer than to version: %s" % release_name,
+ level=INFO,
+ )
+ continue
+
+ if version in self.update_paths:
+ raise Exception("Found duplicate release for version: %s", version)
+
+ # This is a crappy place to get buildids from, but we don't have a better one.
+ # This will start to fail if old info files are deleted.
+ info_file_url = "{}{}/{}_info.txt".format(
+ self.config["previous_archive_prefix"],
+ getCandidatesDir(
+ self.config["stage_product"],
+ version,
+ release_info["build_number"],
+ ),
+ ftp2infoFile(self.config["platform"]),
+ )
+ self.log(
+ "Retrieving buildid from info file: %s" % info_file_url, level=DEBUG
+ )
+ ret = self._retry_download(info_file_url, "WARNING")
+ buildID = ret.read().split(b"=")[1].strip().decode("utf-8")
+
+ branch = self._get_branch_url(self.config["branch_prefix"], version)
+
+ shipped_locales_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/locales/shipped-locales".format(
+ branch,
+ tag,
+ self.config["app_name"],
+ ),
+ )
+ ret = self._retry_download(shipped_locales_url, "WARNING")
+ shipped_locales = ret.read().strip().decode("utf-8")
+
+ app_version_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/config/version.txt".format(
+ branch,
+ tag,
+ self.config["app_name"],
+ ),
+ )
+ app_version = (
+ self._retry_download(app_version_url, "WARNING")
+ .read()
+ .strip()
+ .decode("utf-8")
+ )
+
+ self.log("Adding {} to update paths".format(version), level=INFO)
+ self.update_paths[version] = {
+ "appVersion": app_version,
+ "locales": getPlatformLocales(shipped_locales, self.config["platform"]),
+ "buildID": buildID,
+ }
+ for pattern, mar_channel_ids in self.config[
+ "mar_channel_id_overrides"
+ ].items():
+ if re.match(pattern, version):
+ self.update_paths[version]["marChannelIds"] = mar_channel_ids
+
+ def gather_info(self):
+ from mozilla_version.gecko import GeckoVersion
+
+ self._get_update_paths()
+ if self.update_paths:
+ self.log("Found update paths:", level=DEBUG)
+ self.log(pprint.pformat(self.update_paths), level=DEBUG)
+ elif GeckoVersion.parse(self.config["to_version"]) <= GeckoVersion.parse(
+ self.config["last_watershed"]
+ ):
+ self.log(
+ "Didn't find any update paths, but to_version {} is before the last_"
+ "watershed {}, generating empty config".format(
+ self.config["to_version"],
+ self.config["last_watershed"],
+ ),
+ level=WARNING,
+ )
+ else:
+ self.log("Didn't find any update paths, cannot continue", level=FATAL)
+
+ def create_config(self):
+ from mozrelease.l10n import getPlatformLocales
+ from mozrelease.paths import (
+ getCandidatesDir,
+ getReleaseInstallerPath,
+ getReleasesDir,
+ )
+ from mozrelease.platforms import ftp2updatePlatforms
+ from mozrelease.update_verify import UpdateVerifyConfig
+ from mozrelease.versions import getPrettyVersion
+
+ candidates_dir = getCandidatesDir(
+ self.config["stage_product"],
+ self.config["to_version"],
+ self.config["to_build_number"],
+ )
+ to_ = getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ self.config["to_version"],
+ self.config["platform"],
+ locale="%locale%",
+ )
+ to_path = "{}/{}".format(candidates_dir, to_)
+
+ to_display_version = self.config.get("to_display_version")
+ if not to_display_version:
+ to_display_version = getPrettyVersion(self.config["to_version"])
+
+ self.update_verify_config = UpdateVerifyConfig(
+ product=self.config["product"].title(),
+ channel=self.config["channel"],
+ aus_server=self.config["aus_server"],
+ to=to_path,
+ to_build_id=self.config["to_buildid"],
+ to_app_version=self.config["to_app_version"],
+ to_display_version=to_display_version,
+ override_certs=self.config.get("override_certs"),
+ )
+
+ to_shipped_locales_url = urljoin(
+ self.config["hg_server"],
+ "{}/raw-file/{}/{}/locales/shipped-locales".format(
+ self.config["repo_path"],
+ self.config["to_revision"],
+ self.config["app_name"],
+ ),
+ )
+ to_shipped_locales = (
+ self._retry_download(to_shipped_locales_url, "WARNING")
+ .read()
+ .strip()
+ .decode("utf-8")
+ )
+ to_locales = set(
+ getPlatformLocales(to_shipped_locales, self.config["platform"])
+ )
+
+ completes_only_index = 0
+ for fromVersion in reversed(sorted(self.update_paths, key=CompareVersion)):
+ from_ = self.update_paths[fromVersion]
+ locales = sorted(list(set(from_["locales"]).intersection(to_locales)))
+ appVersion = from_["appVersion"]
+ build_id = from_["buildID"]
+ mar_channel_IDs = from_.get("marChannelIds")
+
+ # Use new build targets for Windows, but only on compatible
+ # versions (42+). See bug 1185456 for additional context.
+ if self.config["platform"] not in ("win32", "win64") or LooseVersion(
+ fromVersion
+ ) < LooseVersion("42.0"):
+ update_platform = ftp2updatePlatforms(self.config["platform"])[0]
+ else:
+ update_platform = ftp2updatePlatforms(self.config["platform"])[1]
+
+ release_dir = getReleasesDir(self.config["stage_product"], fromVersion)
+ path_ = getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ fromVersion,
+ self.config["platform"],
+ locale="%locale%",
+ )
+ from_path = "{}/{}".format(release_dir, path_)
+
+ updater_package = "{}/{}".format(
+ release_dir,
+ getReleaseInstallerPath(
+ self.config["product"],
+ self.config["product"].title(),
+ fromVersion,
+ self.config["updater_platform"],
+ locale="%locale%",
+ ),
+ )
+
+ # Exclude locales being full checked
+ quick_check_locales = [
+ l for l in locales if l not in self.config["full_check_locales"]
+ ]
+ # Get the intersection of from and to full_check_locales
+ this_full_check_locales = [
+ l for l in self.config["full_check_locales"] if l in locales
+ ]
+
+ if fromVersion in self.config["partial_versions"]:
+ self.info(
+ "Generating configs for partial update checks for %s" % fromVersion
+ )
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=locales,
+ patch_types=["complete", "partial"],
+ from_path=from_path,
+ ftp_server_from=self.config["previous_archive_prefix"],
+ ftp_server_to=self.config["archive_prefix"],
+ mar_channel_IDs=mar_channel_IDs,
+ platform=update_platform,
+ updater_package=updater_package,
+ )
+ else:
+ if this_full_check_locales and is_triangualar(completes_only_index):
+ self.info("Generating full check configs for %s" % fromVersion)
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=this_full_check_locales,
+ from_path=from_path,
+ ftp_server_from=self.config["previous_archive_prefix"],
+ ftp_server_to=self.config["archive_prefix"],
+ mar_channel_IDs=mar_channel_IDs,
+ platform=update_platform,
+ updater_package=updater_package,
+ )
+ # Quick test for other locales, no download
+ if len(quick_check_locales) > 0:
+ self.info("Generating quick check configs for %s" % fromVersion)
+ if not is_triangualar(completes_only_index):
+ # Assuming we skipped full check locales, using all locales
+ _locales = locales
+ else:
+ # Excluding full check locales from the quick check
+ _locales = quick_check_locales
+ self.update_verify_config.addRelease(
+ release=appVersion,
+ build_id=build_id,
+ locales=_locales,
+ platform=update_platform,
+ )
+ completes_only_index += 1
+
+ def write_config(self):
+ # Needs to be opened in "bytes" mode because we perform relative seeks on it
+ with open(self.config["output_file"], "wb+") as fh:
+ self.update_verify_config.write(fh)
+
+
+if __name__ == "__main__":
+ UpdateVerifyConfigCreator().run_and_exit()
diff --git a/testing/mozharness/scripts/repackage.py b/testing/mozharness/scripts/repackage.py
new file mode 100644
index 0000000000..e26a32c1db
--- /dev/null
+++ b/testing/mozharness/scripts/repackage.py
@@ -0,0 +1,175 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+sys.path.insert(1, os.path.dirname(sys.path[0])) # noqa - don't warn about imports
+
+from mozharness.base.log import FATAL
+from mozharness.base.script import BaseScript
+
+
+class Repackage(BaseScript):
+ def __init__(self, require_config_file=False):
+ script_kwargs = {
+ "all_actions": [
+ "setup",
+ "repackage",
+ ],
+ }
+ BaseScript.__init__(
+ self, require_config_file=require_config_file, **script_kwargs
+ )
+
+ def setup(self):
+ dirs = self.query_abs_dirs()
+
+ self._run_tooltool()
+
+ mar_path = os.path.join(dirs["abs_input_dir"], "mar")
+ if self._is_windows():
+ mar_path += ".exe"
+ if mar_path and os.path.exists(mar_path):
+ self.chmod(mar_path, 0o755)
+ if self.config.get("run_configure", True):
+ self._get_mozconfig()
+ self._run_configure()
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(Repackage, self).query_abs_dirs()
+ config = self.config
+
+ dirs = {}
+ dirs["abs_input_dir"] = os.path.join(abs_dirs["base_work_dir"], "fetches")
+ output_dir_suffix = []
+ if config.get("locale"):
+ output_dir_suffix.append(config["locale"])
+ if config.get("repack_id"):
+ output_dir_suffix.append(config["repack_id"])
+ dirs["abs_output_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "outputs", *output_dir_suffix
+ )
+ for key in dirs.keys():
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+ self.abs_dirs = abs_dirs
+ return self.abs_dirs
+
+ def repackage(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+
+ subst = {
+ "package-name": config["package-name"],
+ # sfx-stub is only defined for Windows targets
+ "sfx-stub": config.get("sfx-stub"),
+ "installer-tag": config["installer-tag"],
+ "stub-installer-tag": config["stub-installer-tag"],
+ "wsx-stub": config["wsx-stub"],
+ }
+ subst.update(dirs)
+ if config.get("fetch-dir"):
+ subst.update({"fetch-dir": os.path.abspath(config["fetch-dir"])})
+
+ # Make sure the upload dir is around.
+ self.mkdir_p(dirs["abs_output_dir"])
+
+ for repack_config in config["repackage_config"]:
+ command = [sys.executable, "mach", "--log-no-times", "repackage"]
+ command.extend([arg.format(**subst) for arg in repack_config["args"]])
+ for arg, filename in repack_config["inputs"].items():
+ command.extend(
+ [
+ "--{}".format(arg),
+ os.path.join(dirs["abs_input_dir"], filename),
+ ]
+ )
+ command.extend(
+ [
+ "--output",
+ os.path.join(dirs["abs_output_dir"], repack_config["output"]),
+ ]
+ )
+ self.run_command(
+ command=command,
+ cwd=dirs["abs_src_dir"],
+ halt_on_failure=True,
+ env=self.query_env(),
+ )
+
+ def _run_tooltool(self):
+ config = self.config
+ dirs = self.query_abs_dirs()
+ manifest_src = os.environ.get("TOOLTOOL_MANIFEST")
+ if not manifest_src:
+ manifest_src = config.get("tooltool_manifest_src")
+ if not manifest_src:
+ return
+
+ cmd = [
+ sys.executable,
+ "-u",
+ os.path.join(dirs["abs_src_dir"], "mach"),
+ "artifact",
+ "toolchain",
+ "-v",
+ "--retry",
+ "4",
+ "--artifact-manifest",
+ os.path.join(dirs["abs_src_dir"], "toolchains.json"),
+ ]
+ if manifest_src:
+ cmd.extend(
+ [
+ "--tooltool-manifest",
+ os.path.join(dirs["abs_src_dir"], manifest_src),
+ ]
+ )
+ cache = config.get("tooltool_cache")
+ if cache:
+ cmd.extend(["--cache-dir", cache])
+ self.info(str(cmd))
+ self.run_command(cmd, cwd=dirs["abs_src_dir"], halt_on_failure=True)
+
+ def _get_mozconfig(self):
+ """assign mozconfig."""
+ c = self.config
+ dirs = self.query_abs_dirs()
+ abs_mozconfig_path = ""
+
+ # first determine the mozconfig path
+ if c.get("src_mozconfig"):
+ self.info("Using in-tree mozconfig")
+ abs_mozconfig_path = os.path.join(dirs["abs_src_dir"], c["src_mozconfig"])
+ else:
+ self.fatal(
+ "'src_mozconfig' must be in the config "
+ "in order to determine the mozconfig."
+ )
+
+ # print its contents
+ self.read_from_file(abs_mozconfig_path, error_level=FATAL)
+
+ # finally, copy the mozconfig to a path that 'mach build' expects it to be
+ self.copyfile(
+ abs_mozconfig_path, os.path.join(dirs["abs_src_dir"], ".mozconfig")
+ )
+
+ def _run_configure(self):
+ dirs = self.query_abs_dirs()
+ command = [sys.executable, "mach", "--log-no-times", "configure"]
+ return self.run_command(
+ command=command,
+ cwd=dirs["abs_src_dir"],
+ output_timeout=60 * 3,
+ halt_on_failure=True,
+ )
+
+
+if __name__ == "__main__":
+ repack = Repackage()
+ repack.run_and_exit()
diff --git a/testing/mozharness/scripts/talos_script.py b/testing/mozharness/scripts/talos_script.py
new file mode 100755
index 0000000000..10e441070c
--- /dev/null
+++ b/testing/mozharness/scripts/talos_script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+"""talos
+
+"""
+
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+from mozharness.mozilla.testing.talos import Talos
+
+if __name__ == "__main__":
+ talos = Talos()
+ talos.run_and_exit()
diff --git a/testing/mozharness/scripts/telemetry/telemetry_client.py b/testing/mozharness/scripts/telemetry/telemetry_client.py
new file mode 100755
index 0000000000..a0c91ad1a1
--- /dev/null
+++ b/testing/mozharness/scripts/telemetry/telemetry_client.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+
+import copy
+import os
+import sys
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
+
+from mozharness.base.python import PreScriptAction
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+from mozharness.mozilla.vcstools import VCSToolsScript
+
+# General command line arguments for Firefox ui tests
+telemetry_tests_config_options = (
+ [
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) "
+ "to use the GL compositor.",
+ },
+ ],
+ [
+ ["--dry-run"],
+ {
+ "dest": "dry_run",
+ "default": False,
+ "help": "Only show what was going to be tested.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "dest": "e10s",
+ "action": "store_false",
+ "default": True,
+ "help": "Disable multi-process (e10s) mode when running tests.",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "dest": "disable_fission",
+ "action": "store_true",
+ "default": False,
+ "help": "Disable fission mode when running tests.",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "dest": "extra_prefs",
+ "action": "append",
+ "default": [],
+ "help": "Extra user prefs.",
+ },
+ ],
+ [
+ ["--symbols-path=SYMBOLS_PATH"],
+ {
+ "dest": "symbols_path",
+ "help": "absolute path to directory containing breakpad "
+ "symbols, or the url of a zip file containing symbols.",
+ },
+ ],
+ [
+ ["--tag=TAG"],
+ {
+ "dest": "tag",
+ "help": "Subset of tests to run (local, remote).",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+)
+
+
+class TelemetryTests(TestingMixin, VCSToolsScript, CodeCoverageMixin):
+ def __init__(
+ self,
+ config_options=None,
+ all_actions=None,
+ default_actions=None,
+ *args,
+ **kwargs
+ ):
+ config_options = config_options or telemetry_tests_config_options
+ actions = [
+ "clobber",
+ "download-and-extract",
+ "create-virtualenv",
+ "install",
+ "run-tests",
+ "uninstall",
+ ]
+
+ super(TelemetryTests, self).__init__(
+ config_options=config_options,
+ all_actions=all_actions or actions,
+ default_actions=default_actions or actions,
+ *args,
+ **kwargs
+ )
+
+ # Code which runs in automation has to include the following properties
+ self.binary_path = self.config.get("binary_path")
+ self.installer_path = self.config.get("installer_path")
+ self.installer_url = self.config.get("installer_url")
+ self.test_packages_url = self.config.get("test_packages_url")
+ self.test_url = self.config.get("test_url")
+ self.disable_fission = self.config.get("disable_fission")
+
+ if not self.test_url and not self.test_packages_url:
+ self.fatal("You must use --test-url, or --test-packages-url")
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ abs_dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ abs_dirs["abs_test_install_dir"],
+ "config",
+ "telemetry_tests_requirements.txt",
+ )
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+
+ abs_dirs = super(TelemetryTests, self).query_abs_dirs()
+
+ abs_test_install_dir = os.path.join(abs_dirs["abs_work_dir"], "tests")
+
+ dirs = {
+ "abs_test_install_dir": abs_test_install_dir,
+ "abs_telemetry_dir": os.path.join(
+ abs_test_install_dir, "telemetry", "marionette"
+ ),
+ "abs_blob_upload_dir": os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ ),
+ }
+
+ for key in dirs:
+ if key not in abs_dirs:
+ abs_dirs[key] = dirs[key]
+
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ def run_test(self, binary_path, env=None, marionette_port=2828):
+ """All required steps for running the tests against an installer."""
+ dirs = self.query_abs_dirs()
+
+ # Import the harness to retrieve the location of the cli scripts
+ import telemetry_harness
+
+ cmd = [
+ self.query_python_path(),
+ os.path.join(os.path.dirname(telemetry_harness.__file__), self.cli_script),
+ "--binary",
+ binary_path,
+ "--address",
+ "localhost:{}".format(marionette_port),
+ # Resource files to serve via local webserver
+ "--server-root",
+ os.path.join(dirs["abs_telemetry_dir"], "harness", "www"),
+ # Use the work dir to get temporary data stored
+ "--workspace",
+ dirs["abs_work_dir"],
+ # logging options
+ "--gecko-log=-", # output from the gecko process redirected to stdout
+ "--log-raw=-", # structured log for output parser redirected to stdout
+ # additional reports helpful for Jenkins and inpection via Treeherder
+ "--log-html",
+ os.path.join(dirs["abs_blob_upload_dir"], "report.html"),
+ "--log-xunit",
+ os.path.join(dirs["abs_blob_upload_dir"], "report.xml"),
+ # Enable tracing output to log transmission protocol
+ "-vv",
+ ]
+
+ # Symbols for crash reports
+ if self.symbols_path:
+ cmd.extend(["--symbols-path", self.symbols_path])
+
+ if self.disable_fission:
+ cmd.append("--disable-fission")
+ cmd.extend(["--setpref={}".format(p) for p in self.config["extra_prefs"]])
+
+ if not self.config["e10s"]:
+ cmd.append("--disable-e10s")
+
+ parser = StructuredOutputParser(
+ config=self.config, log_obj=self.log_obj, strict=False
+ )
+
+ # Add the default tests to run
+ tests = [
+ os.path.join(dirs["abs_telemetry_dir"], "tests", test)
+ for test in self.default_tests
+ ]
+ cmd.extend(tests)
+
+ # Set further environment settings
+ env = env or self.query_env()
+ env.update({"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]})
+ if self.query_minidump_stackwalk():
+ env.update({"MINIDUMP_STACKWALK": self.minidump_stackwalk_path})
+ env["RUST_BACKTRACE"] = "1"
+ env["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"
+
+ # Causes Firefox to crash when using non-local connections.
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+ # If code coverage is enabled, set GCOV_PREFIX env variable
+ if self.config.get("code_coverage"):
+ env["GCOV_PREFIX"] = self.gcov_dir
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=env,
+ )
+
+ tbpl_status, log_level, _ = parser.evaluate_parser(return_code)
+ self.record_status(tbpl_status, level=log_level)
+
+ return return_code
+
+ @PreScriptAction("run-tests")
+ def _pre_run_tests(self, action):
+ if not self.installer_path and not self.installer_url:
+ self.critical(
+ "Please specify an installer via --installer-path or --installer-url."
+ )
+ sys.exit(1)
+
+ def run_tests(self):
+ """Run all the tests"""
+ return self.run_test(
+ binary_path=self.binary_path,
+ env=self.query_env(),
+ )
+
+
+class TelemetryClientTests(TelemetryTests):
+ cli_script = "runtests.py"
+ default_tests = [
+ os.path.join("client", "manifest.ini"),
+ os.path.join("unit", "manifest.ini"),
+ ]
+
+
+if __name__ == "__main__":
+ myScript = TelemetryClientTests()
+ myScript.run_and_exit()
diff --git a/testing/mozharness/scripts/web_platform_tests.py b/testing/mozharness/scripts/web_platform_tests.py
new file mode 100755
index 0000000000..2473e153e9
--- /dev/null
+++ b/testing/mozharness/scripts/web_platform_tests.py
@@ -0,0 +1,656 @@
+#!/usr/bin/env python
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import copy
+import gzip
+import json
+import os
+import sys
+from datetime import datetime, timedelta
+
+# load modules from parent dir
+sys.path.insert(1, os.path.dirname(sys.path[0]))
+
+import mozinfo
+from mozharness.base.errors import BaseErrorList
+from mozharness.base.log import INFO
+from mozharness.base.script import PreScriptAction
+from mozharness.base.vcs.vcsbase import MercurialScript
+from mozharness.mozilla.automation import TBPL_RETRY
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozharness.mozilla.testing.android import AndroidMixin
+from mozharness.mozilla.testing.codecoverage import (
+ CodeCoverageMixin,
+ code_coverage_config_options,
+)
+from mozharness.mozilla.testing.errors import WptHarnessErrorList
+from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
+
+
+class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin):
+ config_options = (
+ [
+ [
+ ["--test-type"],
+ {
+ "action": "extend",
+ "dest": "test_type",
+ "help": "Specify the test types to run.",
+ },
+ ],
+ [
+ ["--disable-e10s"],
+ {
+ "action": "store_false",
+ "dest": "e10s",
+ "default": True,
+ "help": "Run without e10s enabled",
+ },
+ ],
+ [
+ ["--disable-fission"],
+ {
+ "action": "store_true",
+ "dest": "disable_fission",
+ "default": False,
+ "help": "Run without fission enabled",
+ },
+ ],
+ [
+ ["--total-chunks"],
+ {
+ "action": "store",
+ "dest": "total_chunks",
+ "help": "Number of total chunks",
+ },
+ ],
+ [
+ ["--this-chunk"],
+ {
+ "action": "store",
+ "dest": "this_chunk",
+ "help": "Number of this chunk",
+ },
+ ],
+ [
+ ["--allow-software-gl-layers"],
+ {
+ "action": "store_true",
+ "dest": "allow_software_gl_layers",
+ "default": False,
+ "help": "Permits a software GL implementation (such as LLVMPipe) "
+ "to use the GL compositor.",
+ },
+ ],
+ [
+ ["--headless"],
+ {
+ "action": "store_true",
+ "dest": "headless",
+ "default": False,
+ "help": "Run tests in headless mode.",
+ },
+ ],
+ [
+ ["--headless-width"],
+ {
+ "action": "store",
+ "dest": "headless_width",
+ "default": "1600",
+ "help": "Specify headless virtual screen width (default: 1600).",
+ },
+ ],
+ [
+ ["--headless-height"],
+ {
+ "action": "store",
+ "dest": "headless_height",
+ "default": "1200",
+ "help": "Specify headless virtual screen height (default: 1200).",
+ },
+ ],
+ [
+ ["--setpref"],
+ {
+ "action": "append",
+ "metavar": "PREF=VALUE",
+ "dest": "extra_prefs",
+ "default": [],
+ "help": "Defines an extra user preference.",
+ },
+ ],
+ [
+ ["--skip-implementation-status"],
+ {
+ "action": "extend",
+ "dest": "skip_implementation_status",
+ "default": [],
+ "help": "Defines a way to not run a specific implementation status "
+ " (i.e. not implemented).",
+ },
+ ],
+ [
+ ["--backlog"],
+ {
+ "action": "store_true",
+ "dest": "backlog",
+ "default": False,
+ "help": "Defines if test category is backlog.",
+ },
+ ],
+ [
+ ["--skip-timeout"],
+ {
+ "action": "store_true",
+ "dest": "skip_timeout",
+ "default": False,
+ "help": "Ignore tests that are expected status of TIMEOUT",
+ },
+ ],
+ [
+ ["--include"],
+ {
+ "action": "store",
+ "dest": "include",
+ "default": None,
+ "help": "URL prefix to include.",
+ },
+ ],
+ ]
+ + copy.deepcopy(testing_config_options)
+ + copy.deepcopy(code_coverage_config_options)
+ )
+
+ def __init__(self, require_config_file=True):
+ super(WebPlatformTest, self).__init__(
+ config_options=self.config_options,
+ all_actions=[
+ "clobber",
+ "download-and-extract",
+ "download-and-process-manifest",
+ "create-virtualenv",
+ "pull",
+ "start-emulator",
+ "verify-device",
+ "install",
+ "run-tests",
+ ],
+ require_config_file=require_config_file,
+ config={"require_test_zip": True},
+ )
+
+ # Surely this should be in the superclass
+ c = self.config
+ self.installer_url = c.get("installer_url")
+ self.test_url = c.get("test_url")
+ self.test_packages_url = c.get("test_packages_url")
+ self.installer_path = c.get("installer_path")
+ self.binary_path = c.get("binary_path")
+ self.abs_app_dir = None
+ self.xre_path = None
+ if self.is_emulator:
+ self.device_serial = "emulator-5554"
+
+ def query_abs_app_dir(self):
+ """We can't set this in advance, because OSX install directories
+ change depending on branding and opt/debug.
+ """
+ if self.abs_app_dir:
+ return self.abs_app_dir
+ if not self.binary_path:
+ self.fatal("Can't determine abs_app_dir (binary_path not set!)")
+ self.abs_app_dir = os.path.dirname(self.binary_path)
+ return self.abs_app_dir
+
+ def query_abs_dirs(self):
+ if self.abs_dirs:
+ return self.abs_dirs
+ abs_dirs = super(WebPlatformTest, self).query_abs_dirs()
+
+ dirs = {}
+ dirs["abs_app_install_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "application"
+ )
+ dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests")
+ dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin")
+ dirs["abs_wpttest_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "web-platform"
+ )
+ dirs["abs_blob_upload_dir"] = os.path.join(
+ abs_dirs["abs_work_dir"], "blobber_upload_dir"
+ )
+ dirs["abs_test_extensions_dir"] = os.path.join(
+ dirs["abs_test_install_dir"], "extensions"
+ )
+ if self.is_android:
+ dirs["abs_xre_dir"] = os.path.join(abs_dirs["abs_work_dir"], "hostutils")
+ if self.is_emulator:
+ work_dir = os.environ.get("MOZ_FETCHES_DIR") or abs_dirs["abs_work_dir"]
+ dirs["abs_sdk_dir"] = os.path.join(work_dir, "android-sdk-linux")
+ dirs["abs_avds_dir"] = os.path.join(work_dir, "android-device")
+ dirs["abs_bundletool_path"] = os.path.join(work_dir, "bundletool.jar")
+ # AndroidMixin uses this when launching the emulator. We only want
+ # GLES3 if we're running WebRender (default)
+ self.use_gles3 = True
+
+ abs_dirs.update(dirs)
+ self.abs_dirs = abs_dirs
+
+ return self.abs_dirs
+
+ @PreScriptAction("create-virtualenv")
+ def _pre_create_virtualenv(self, action):
+ dirs = self.query_abs_dirs()
+
+ requirements = os.path.join(
+ dirs["abs_test_install_dir"], "config", "marionette_requirements.txt"
+ )
+
+ self.register_virtualenv_module(requirements=[requirements], two_pass=True)
+
+ def _query_geckodriver(self):
+ path = None
+ c = self.config
+ dirs = self.query_abs_dirs()
+ repl_dict = {}
+ repl_dict.update(dirs)
+ path = c.get("geckodriver", "geckodriver")
+ if path:
+ path = path % repl_dict
+ return path
+
+ def _query_cmd(self, test_types):
+ if not self.binary_path:
+ self.fatal("Binary path could not be determined")
+ # And exit
+
+ c = self.config
+ run_file_name = "runtests.py"
+
+ dirs = self.query_abs_dirs()
+ abs_app_dir = self.query_abs_app_dir()
+ str_format_values = {
+ "binary_path": self.binary_path,
+ "test_path": dirs["abs_wpttest_dir"],
+ "test_install_path": dirs["abs_test_install_dir"],
+ "abs_app_dir": abs_app_dir,
+ "abs_work_dir": dirs["abs_work_dir"],
+ "xre_path": self.xre_path,
+ }
+
+ cmd = [self.query_python_path("python"), "-u"]
+ cmd.append(os.path.join(dirs["abs_wpttest_dir"], run_file_name))
+
+ mozinfo.find_and_update_from_json(dirs["abs_test_install_dir"])
+
+ raw_log_file, error_summary_file = self.get_indexed_logs(
+ dirs["abs_blob_upload_dir"], "wpt"
+ )
+
+ cmd += [
+ "--log-raw=-",
+ "--log-raw=%s" % raw_log_file,
+ "--log-wptreport=%s"
+ % os.path.join(dirs["abs_blob_upload_dir"], "wptreport.json"),
+ "--log-errorsummary=%s" % error_summary_file,
+ "--symbols-path=%s" % self.symbols_path,
+ "--stackwalk-binary=%s" % self.query_minidump_stackwalk(),
+ "--stackfix-dir=%s" % os.path.join(dirs["abs_test_install_dir"], "bin"),
+ "--no-pause-after-test",
+ "--instrument-to-file=%s"
+ % os.path.join(dirs["abs_blob_upload_dir"], "wpt_instruments.txt"),
+ "--specialpowers-path=%s"
+ % os.path.join(
+ dirs["abs_test_extensions_dir"], "specialpowers@mozilla.org.xpi"
+ ),
+ ]
+
+ is_windows_7 = (
+ mozinfo.info["os"] == "win" and mozinfo.info["os_version"] == "6.1"
+ )
+
+ if (
+ self.is_android
+ or mozinfo.info["tsan"]
+ or "wdspec" in test_types
+ or not c["disable_fission"]
+ # Bug 1392106 - skia error 0x80070005: Access is denied.
+ or is_windows_7
+ and mozinfo.info["debug"]
+ ):
+ processes = 1
+ else:
+ processes = 2
+ cmd.append("--processes=%s" % processes)
+
+ if self.is_android:
+ cmd += [
+ "--device-serial=%s" % self.device_serial,
+ "--package-name=%s" % self.query_package_name(),
+ ]
+ else:
+ cmd.append("--binary=%s" % self.binary_path)
+
+ if is_windows_7:
+ # On Windows 7 --install-fonts fails, so fall back to a Firefox-specific codepath
+ self._install_fonts()
+ else:
+ cmd += ["--install-fonts"]
+
+ for test_type in test_types:
+ cmd.append("--test-type=%s" % test_type)
+
+ if c["extra_prefs"]:
+ cmd.extend(["--setpref={}".format(p) for p in c["extra_prefs"]])
+
+ if c["disable_fission"]:
+ cmd.append("--disable-fission")
+
+ if not c["e10s"]:
+ cmd.append("--disable-e10s")
+
+ if c["skip_timeout"]:
+ cmd.append("--skip-timeout")
+
+ for implementation_status in c["skip_implementation_status"]:
+ cmd.append("--skip-implementation-status=%s" % implementation_status)
+
+ # Bug 1643177 - reduce timeout multiplier for web-platform-tests backlog
+ if c["backlog"]:
+ cmd.append("--timeout-multiplier=0.25")
+
+ test_paths = set()
+ if not (self.verify_enabled or self.per_test_coverage):
+ mozharness_test_paths = json.loads(
+ os.environ.get("MOZHARNESS_TEST_PATHS", '""')
+ )
+ if mozharness_test_paths:
+ path = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")
+
+ if not os.path.exists(path):
+ self.critical("Unable to locate web-platform-test groups file.")
+
+ cmd.append("--test-groups={}".format(path))
+
+ for key in mozharness_test_paths.keys():
+ paths = mozharness_test_paths.get(key, [])
+ for path in paths:
+ if not path.startswith("/"):
+ # Assume this is a filesystem path rather than a test id
+ path = os.path.relpath(path, "testing/web-platform")
+ if ".." in path:
+ self.fatal("Invalid WPT path: {}".format(path))
+ path = os.path.join(dirs["abs_wpttest_dir"], path)
+ test_paths.add(path)
+ else:
+ # As per WPT harness, the --run-by-dir flag is incompatible with
+ # the --test-groups flag.
+ cmd.append("--run-by-dir=%i" % (3 if not mozinfo.info["asan"] else 0))
+ for opt in ["total_chunks", "this_chunk"]:
+ val = c.get(opt)
+ if val:
+ cmd.append("--%s=%s" % (opt.replace("_", "-"), val))
+
+ options = list(c.get("options", []))
+
+ if "wdspec" in test_types:
+ geckodriver_path = self._query_geckodriver()
+ if not geckodriver_path or not os.path.isfile(geckodriver_path):
+ self.fatal(
+ "Unable to find geckodriver binary "
+ "in common test package: %s" % str(geckodriver_path)
+ )
+ cmd.append("--webdriver-binary=%s" % geckodriver_path)
+ cmd.append("--webdriver-arg=-vv") # enable trace logs
+
+ test_type_suite = {
+ "testharness": "web-platform-tests",
+ "crashtest": "web-platform-tests-crashtest",
+ "print-reftest": "web-platform-tests-print-reftest",
+ "reftest": "web-platform-tests-reftest",
+ "wdspec": "web-platform-tests-wdspec",
+ }
+ for test_type in test_types:
+ try_options, try_tests = self.try_args(test_type_suite[test_type])
+
+ cmd.extend(
+ self.query_options(
+ options, try_options, str_format_values=str_format_values
+ )
+ )
+ cmd.extend(
+ self.query_tests_args(try_tests, str_format_values=str_format_values)
+ )
+ if "include" in c and c["include"]:
+ cmd.append("--include=%s" % c["include"])
+
+ cmd.extend(test_paths)
+
+ return cmd
+
+ def download_and_extract(self):
+ super(WebPlatformTest, self).download_and_extract(
+ extract_dirs=[
+ "mach",
+ "bin/*",
+ "config/*",
+ "extensions/*",
+ "mozbase/*",
+ "marionette/*",
+ "tools/*",
+ "web-platform/*",
+ "mozpack/*",
+ "mozbuild/*",
+ ],
+ suite_categories=["web-platform"],
+ )
+ dirs = self.query_abs_dirs()
+ if self.is_android:
+ self.xre_path = self.download_hostutils(dirs["abs_xre_dir"])
+ # Make sure that the logging directory exists
+ if self.mkdir_p(dirs["abs_blob_upload_dir"]) == -1:
+ self.fatal("Could not create blobber upload directory")
+ # Exit
+
+ def download_and_process_manifest(self):
+ """Downloads the tests-by-manifest JSON mapping generated by the decision task.
+
+ web-platform-tests are chunked in the decision task as of Bug 1608837
+ and this means tests are resolved by the TestResolver as part of this process.
+
+ The manifest file contains tests keyed by the groups generated in
+ TestResolver.get_wpt_group().
+
+ Upon successful call, a JSON file containing only the web-platform test
+ groups are saved in the fetch directory.
+
+ Bug:
+ 1634554
+ """
+ dirs = self.query_abs_dirs()
+ url = os.environ.get("TESTS_BY_MANIFEST_URL", "")
+ if not url:
+ self.fatal("TESTS_BY_MANIFEST_URL not defined.")
+
+ artifact_name = url.split("/")[-1]
+
+ # Save file to the MOZ_FETCHES dir.
+ self.download_file(
+ url, file_name=artifact_name, parent_dir=dirs["abs_fetches_dir"]
+ )
+
+ with gzip.open(os.path.join(dirs["abs_fetches_dir"], artifact_name), "r") as f:
+ tests_by_manifest = json.loads(f.read())
+
+ # We need to filter out non-web-platform-tests without knowing what the
+ # groups are. Fortunately, all web-platform test 'manifests' begin with a
+ # forward slash.
+ test_groups = {
+ key: tests_by_manifest[key]
+ for key in tests_by_manifest.keys()
+ if key.startswith("/")
+ }
+
+ outfile = os.path.join(dirs["abs_fetches_dir"], "wpt_tests_by_group.json")
+ with open(outfile, "w+") as f:
+ json.dump(test_groups, f, indent=2, sort_keys=True)
+
+ def install(self):
+ if self.is_android:
+ self.install_android_app(self.installer_path)
+ else:
+ super(WebPlatformTest, self).install()
+
+ def _install_fonts(self):
+ if self.is_android:
+ return
+ # Ensure the Ahem font is available
+ dirs = self.query_abs_dirs()
+
+ if not sys.platform.startswith("darwin"):
+ font_path = os.path.join(os.path.dirname(self.binary_path), "fonts")
+ else:
+ font_path = os.path.join(
+ os.path.dirname(self.binary_path),
+ os.pardir,
+ "Resources",
+ "res",
+ "fonts",
+ )
+ if not os.path.exists(font_path):
+ os.makedirs(font_path)
+ ahem_src = os.path.join(dirs["abs_wpttest_dir"], "tests", "fonts", "Ahem.ttf")
+ ahem_dest = os.path.join(font_path, "Ahem.ttf")
+ with open(ahem_src, "rb") as src, open(ahem_dest, "wb") as dest:
+ dest.write(src.read())
+
+ def run_tests(self):
+ dirs = self.query_abs_dirs()
+
+ parser = StructuredOutputParser(
+ config=self.config,
+ log_obj=self.log_obj,
+ log_compact=True,
+ error_list=BaseErrorList + WptHarnessErrorList,
+ allow_crashes=True,
+ )
+
+ env = {"MINIDUMP_SAVE_PATH": dirs["abs_blob_upload_dir"]}
+ env["RUST_BACKTRACE"] = "full"
+
+ if self.config["allow_software_gl_layers"]:
+ env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1"
+ if self.config["headless"]:
+ env["MOZ_HEADLESS"] = "1"
+ env["MOZ_HEADLESS_WIDTH"] = self.config["headless_width"]
+ env["MOZ_HEADLESS_HEIGHT"] = self.config["headless_height"]
+
+ env["STYLO_THREADS"] = "4"
+
+ if self.is_android:
+ env["ADB_PATH"] = self.adb_path
+
+ env = self.query_env(partial_env=env, log_level=INFO)
+
+ start_time = datetime.now()
+ max_per_test_time = timedelta(minutes=60)
+ max_per_test_tests = 10
+ if self.per_test_coverage:
+ max_per_test_tests = 30
+ executed_tests = 0
+ executed_too_many_tests = False
+
+ if self.per_test_coverage or self.verify_enabled:
+ suites = self.query_per_test_category_suites(None, None)
+ if "wdspec" in suites:
+ # geckodriver is required for wdspec, but not always available
+ geckodriver_path = self._query_geckodriver()
+ if not geckodriver_path or not os.path.isfile(geckodriver_path):
+ suites.remove("wdspec")
+ self.info("Skipping 'wdspec' tests - no geckodriver")
+ else:
+ test_types = self.config.get("test_type", [])
+ suites = [None]
+ for suite in suites:
+ if executed_too_many_tests and not self.per_test_coverage:
+ continue
+
+ if suite:
+ test_types = [suite]
+
+ summary = {}
+ for per_test_args in self.query_args(suite):
+ # Make sure baseline code coverage tests are never
+ # skipped and that having them run has no influence
+ # on the max number of actual tests that are to be run.
+ is_baseline_test = (
+ "baselinecoverage" in per_test_args[-1]
+ if self.per_test_coverage
+ else False
+ )
+ if executed_too_many_tests and not is_baseline_test:
+ continue
+
+ if not is_baseline_test:
+ if (datetime.now() - start_time) > max_per_test_time:
+ # Running tests has run out of time. That is okay! Stop running
+ # them so that a task timeout is not triggered, and so that
+ # (partial) results are made available in a timely manner.
+ self.info(
+ "TinderboxPrint: Running tests took too long: Not all tests "
+ "were executed.<br/>"
+ )
+ return
+ if executed_tests >= max_per_test_tests:
+ # When changesets are merged between trees or many tests are
+ # otherwise updated at once, there probably is not enough time
+ # to run all tests, and attempting to do so may cause other
+ # problems, such as generating too much log output.
+ self.info(
+ "TinderboxPrint: Too many modified tests: Not all tests "
+ "were executed.<br/>"
+ )
+ executed_too_many_tests = True
+
+ executed_tests = executed_tests + 1
+
+ cmd = self._query_cmd(test_types)
+ cmd.extend(per_test_args)
+
+ final_env = copy.copy(env)
+
+ if self.per_test_coverage:
+ self.set_coverage_env(final_env, is_baseline_test)
+
+ return_code = self.run_command(
+ cmd,
+ cwd=dirs["abs_work_dir"],
+ output_timeout=1000,
+ output_parser=parser,
+ env=final_env,
+ )
+
+ if self.per_test_coverage:
+ self.add_per_test_coverage_report(
+ final_env, suite, per_test_args[-1]
+ )
+
+ tbpl_status, log_level, summary = parser.evaluate_parser(
+ return_code, previous_summary=summary
+ )
+ self.record_status(tbpl_status, level=log_level)
+
+ if len(per_test_args) > 0:
+ self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
+ if tbpl_status == TBPL_RETRY:
+ self.info("Per-test run abandoned due to RETRY status")
+ return
+
+
+# main {{{1
+if __name__ == "__main__":
+ web_platform_tests = WebPlatformTest()
+ web_platform_tests.run_and_exit()
diff --git a/testing/mozharness/setup.cfg b/testing/mozharness/setup.cfg
new file mode 100644
index 0000000000..d8057aec13
--- /dev/null
+++ b/testing/mozharness/setup.cfg
@@ -0,0 +1,2 @@
+[nosetests]
+exclude=TestingMixin
diff --git a/testing/mozharness/setup.py b/testing/mozharness/setup.py
new file mode 100644
index 0000000000..0c4a9e7186
--- /dev/null
+++ b/testing/mozharness/setup.py
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from setuptools import setup, find_packages
+
+try:
+ here = os.path.dirname(os.path.abspath(__file__))
+ description = open(os.path.join(here, "README.txt")).read()
+except IOError:
+ description = ""
+
+import mozharness
+
+version = mozharness.version_string
+
+dependencies = ["virtualenv", "mock", "coverage", "nose", "pylint", "pyflakes"]
+try:
+ import json
+except ImportError:
+ dependencies.append("simplejson")
+
+setup(
+ name="mozharness",
+ version=version,
+ description="Mozharness is a configuration-driven script harness with full logging that allows production infrastructure and individual developers to use the same scripts. ",
+ long_description=description,
+ classifiers=[
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 2 :: Only",
+ ], # Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
+ author="Aki Sasaki",
+ author_email="aki@mozilla.com",
+ url="https://hg.mozilla.org/build/mozharness/",
+ license="MPL",
+ packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=dependencies,
+ entry_points="""
+ # -*- Entry points: -*-
+ """,
+)
diff --git a/testing/mozharness/test/README b/testing/mozharness/test/README
new file mode 100644
index 0000000000..889c8a83d4
--- /dev/null
+++ b/testing/mozharness/test/README
@@ -0,0 +1,2 @@
+test/ : non-network-dependent unit tests
+test/networked/ : network-dependent unit tests.
diff --git a/testing/mozharness/test/helper_files/.noserc b/testing/mozharness/test/helper_files/.noserc
new file mode 100644
index 0000000000..e6f21cf31d
--- /dev/null
+++ b/testing/mozharness/test/helper_files/.noserc
@@ -0,0 +1,2 @@
+[nosetests]
+with-xunit=1
diff --git a/testing/mozharness/test/helper_files/archives/archive.tar b/testing/mozharness/test/helper_files/archives/archive.tar
new file mode 100644
index 0000000000..1dc094198f
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/archive.tar
Binary files differ
diff --git a/testing/mozharness/test/helper_files/archives/archive.tar.bz2 b/testing/mozharness/test/helper_files/archives/archive.tar.bz2
new file mode 100644
index 0000000000..c393ea4b88
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/archive.tar.bz2
Binary files differ
diff --git a/testing/mozharness/test/helper_files/archives/archive.tar.gz b/testing/mozharness/test/helper_files/archives/archive.tar.gz
new file mode 100644
index 0000000000..0fbfa39b1c
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/archive.tar.gz
Binary files differ
diff --git a/testing/mozharness/test/helper_files/archives/archive.zip b/testing/mozharness/test/helper_files/archives/archive.zip
new file mode 100644
index 0000000000..aa2fb34c16
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/archive.zip
Binary files differ
diff --git a/testing/mozharness/test/helper_files/archives/archive_invalid_filename.zip b/testing/mozharness/test/helper_files/archives/archive_invalid_filename.zip
new file mode 100644
index 0000000000..20bdc5acdf
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/archive_invalid_filename.zip
Binary files differ
diff --git a/testing/mozharness/test/helper_files/archives/reference/bin/script.sh b/testing/mozharness/test/helper_files/archives/reference/bin/script.sh
new file mode 100755
index 0000000000..134f2933c9
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/reference/bin/script.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo Hello world!
diff --git a/testing/mozharness/test/helper_files/archives/reference/lorem.txt b/testing/mozharness/test/helper_files/archives/reference/lorem.txt
new file mode 100644
index 0000000000..d2cf010d36
--- /dev/null
+++ b/testing/mozharness/test/helper_files/archives/reference/lorem.txt
@@ -0,0 +1 @@
+Lorem ipsum dolor sit amet.
diff --git a/testing/mozharness/test/helper_files/create_archives.sh b/testing/mozharness/test/helper_files/create_archives.sh
new file mode 100755
index 0000000000..314b55d276
--- /dev/null
+++ b/testing/mozharness/test/helper_files/create_archives.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# Script to auto-generate the different archive types under the archives directory.
+
+cd archives
+
+rm archive.*
+
+tar cf archive.tar -C reference .
+gzip -fk archive.tar >archive.tar.gz
+bzip2 -fk archive.tar >archive.tar.bz2
+cd reference && zip ../archive.zip -r * && cd ..
diff --git a/testing/mozharness/test/helper_files/init_hgrepo.sh b/testing/mozharness/test/helper_files/init_hgrepo.sh
new file mode 100755
index 0000000000..0f4561695f
--- /dev/null
+++ b/testing/mozharness/test/helper_files/init_hgrepo.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Set up an hg repo for testing
+dest=$1
+if [ -z "$dest" ]; then
+ echo You must specify a destination directory 1>&2
+ exit 1
+fi
+
+rm -rf $dest
+hg init $dest
+cd $dest
+
+echo "Hello world $RANDOM" > hello.txt
+hg add hello.txt
+hg commit -m "Adding hello"
+
+hg branch branch2 > /dev/null
+echo "So long, farewell" >> hello.txt
+hg commit -m "Changing hello on branch"
+
+hg checkout default
+echo "Is this thing on?" >> hello.txt
+hg commit -m "Last change on default"
diff --git a/testing/mozharness/test/helper_files/locales.json b/testing/mozharness/test/helper_files/locales.json
new file mode 100644
index 0000000000..c9056b1d15
--- /dev/null
+++ b/testing/mozharness/test/helper_files/locales.json
@@ -0,0 +1,18 @@
+{
+ "ar": {
+ "revision": "default",
+ "platforms": ["maemo"]
+ },
+ "be": {
+ "revision": "default",
+ "platforms": ["maemo"]
+ },
+ "de": {
+ "revision": "default",
+ "platforms": ["maemo", "maemo-multilocale", "android-multilocale"]
+ },
+ "es-ES": {
+ "revision": "default",
+ "platforms": ["maemo", "maemo-multilocale", "android-multilocale"]
+ }
+}
diff --git a/testing/mozharness/test/helper_files/locales.txt b/testing/mozharness/test/helper_files/locales.txt
new file mode 100644
index 0000000000..0b65ab76df
--- /dev/null
+++ b/testing/mozharness/test/helper_files/locales.txt
@@ -0,0 +1,4 @@
+ar
+be
+de
+es-ES
diff --git a/testing/mozharness/test/helper_files/mozconfig_manifest.json b/testing/mozharness/test/helper_files/mozconfig_manifest.json
new file mode 100644
index 0000000000..8cc8049002
--- /dev/null
+++ b/testing/mozharness/test/helper_files/mozconfig_manifest.json
@@ -0,0 +1,3 @@
+{
+ "gecko_path": "path/to/mozconfig"
+}
diff --git a/testing/mozharness/test/hgrc b/testing/mozharness/test/hgrc
new file mode 100644
index 0000000000..85e670518b
--- /dev/null
+++ b/testing/mozharness/test/hgrc
@@ -0,0 +1,9 @@
+[extensions]
+mq =
+purge =
+rebase =
+share =
+transplant =
+
+[ui]
+username = tester <tester@example.com>
diff --git a/testing/mozharness/test/pip-freeze.example.txt b/testing/mozharness/test/pip-freeze.example.txt
new file mode 100644
index 0000000000..56e06923fc
--- /dev/null
+++ b/testing/mozharness/test/pip-freeze.example.txt
@@ -0,0 +1,19 @@
+MakeItSo==0.2.6
+PyYAML==3.10
+Tempita==0.5.1
+WebOb==1.2b3
+-e hg+http://k0s.org/mozilla/hg/configuration@35416ad140982c11eba0a2d6b96d683f53429e94#egg=configuration-dev
+coverage==3.5.1
+-e hg+http://k0s.org/mozilla/hg/jetperf@4645ae34d2c41a353dcdbd856b486b6d3faabb99#egg=jetperf-dev
+logilab-astng==0.23.1
+logilab-common==0.57.1
+mozdevice==0.2
+-e hg+https://hg.mozilla.org/build/mozharness@df6b7f1e14d8c472125ef7a77b8a3b40c96ae181#egg=mozharness-jetperf
+mozhttpd==0.3
+mozinfo==0.3.3
+nose==1.1.2
+pyflakes==0.5.0
+pylint==0.25.1
+-e hg+https://hg.mozilla.org/build/talos@ee5c0b090d808e81a8fc5ba5f96b012797b3e785#egg=talos-dev
+virtualenv==1.7.1.2
+wsgiref==0.1.2
diff --git a/testing/mozharness/test/test_base_config.py b/testing/mozharness/test/test_base_config.py
new file mode 100644
index 0000000000..cafdbbca73
--- /dev/null
+++ b/testing/mozharness/test/test_base_config.py
@@ -0,0 +1,376 @@
+import os
+import unittest
+from copy import deepcopy
+
+JSON_TYPE = None
+try:
+ import simplejson as json
+
+ assert json
+except ImportError:
+ import json
+
+ JSON_TYPE = "json"
+else:
+ JSON_TYPE = "simplejson"
+
+import mozharness.base.config as config
+
+MH_DIR = os.path.dirname(os.path.dirname(__file__))
+
+
+class TestParseConfigFile(unittest.TestCase):
+ def _get_json_config(
+ self,
+ filename=os.path.join(MH_DIR, "configs", "test", "test.json"),
+ output="dict",
+ ):
+ fh = open(filename)
+ contents = json.load(fh)
+ fh.close()
+ if "output" == "dict":
+ return dict(contents)
+ else:
+ return contents
+
+ def _get_python_config(
+ self, filename=os.path.join(MH_DIR, "configs", "test", "test.py"), output="dict"
+ ):
+ global_dict = {}
+ local_dict = {}
+ # exec(open(filename).read(), global_dict, local_dict)
+ exec(
+ compile(open(filename, "rb").read(), filename, "exec"),
+ global_dict,
+ local_dict,
+ )
+ return local_dict["config"]
+
+ def test_json_config(self):
+ c = config.BaseConfig(initial_config_file="test/test.json")
+ content_dict = self._get_json_config()
+ for key in content_dict.keys():
+ self.assertEqual(content_dict[key], c._config[key])
+
+ def test_python_config(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ config_dict = self._get_python_config()
+ for key in config_dict.keys():
+ self.assertEqual(config_dict[key], c._config[key])
+
+ def test_illegal_config(self):
+ self.assertRaises(
+ IOError,
+ config.parse_config_file,
+ "this_file_does_not_exist.py",
+ search_path="yadda",
+ )
+
+ def test_illegal_suffix(self):
+ self.assertRaises(
+ RuntimeError, config.parse_config_file, "test/test.illegal_suffix"
+ )
+
+ def test_malformed_json(self):
+ if JSON_TYPE == "simplejson":
+ self.assertRaises(
+ json.decoder.JSONDecodeError,
+ config.parse_config_file,
+ "test/test_malformed.json",
+ )
+ else:
+ self.assertRaises(
+ ValueError, config.parse_config_file, "test/test_malformed.json"
+ )
+
+ def test_malformed_python(self):
+ self.assertRaises(
+ SyntaxError, config.parse_config_file, "test/test_malformed.py"
+ )
+
+ def test_multiple_config_files_override_string(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(["--cfg", "test/test_override.py,test/test_override2.py"])
+ self.assertEqual(c._config["override_string"], "yay")
+
+ def test_multiple_config_files_override_list(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(["--cfg", "test/test_override.py,test/test_override2.py"])
+ self.assertEqual(c._config["override_list"], ["yay", "worked"])
+
+ def test_multiple_config_files_override_dict(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(["--cfg", "test/test_override.py,test/test_override2.py"])
+ self.assertEqual(c._config["override_dict"], {"yay": "worked"})
+
+ def test_multiple_config_files_keep_string(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(["--cfg", "test/test_override.py,test/test_override2.py"])
+ self.assertEqual(c._config["keep_string"], "don't change me")
+
+ def test_optional_config_files_override_value(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(
+ [
+ "--cfg",
+ "test/test_override.py,test/test_override2.py",
+ "--opt-cfg",
+ "test/test_optional.py",
+ ]
+ )
+ self.assertEqual(c._config["opt_override"], "new stuff")
+
+ def test_optional_config_files_missing_config(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(
+ [
+ "--cfg",
+ "test/test_override.py,test/test_override2.py",
+ "--opt-cfg",
+ "test/test_optional.py,does_not_exist.py",
+ ]
+ )
+ self.assertEqual(c._config["opt_override"], "new stuff")
+
+ def test_optional_config_files_keep_string(self):
+ c = config.BaseConfig(initial_config_file="test/test.py")
+ c.parse_args(
+ [
+ "--cfg",
+ "test/test_override.py,test/test_override2.py",
+ "--opt-cfg",
+ "test/test_optional.py",
+ ]
+ )
+ self.assertEqual(c._config["keep_string"], "don't change me")
+
+
+class TestReadOnlyDict(unittest.TestCase):
+ control_dict = {
+ "b": "2",
+ "c": {"d": "4"},
+ "h": ["f", "g"],
+ "e": ["f", "g", {"turtles": ["turtle1"]}],
+ "d": {"turtles": ["turtle1"]},
+ }
+
+ def get_unlocked_ROD(self):
+ r = config.ReadOnlyDict(self.control_dict)
+ return r
+
+ def get_locked_ROD(self):
+ r = config.ReadOnlyDict(self.control_dict)
+ r.lock()
+ return r
+
+ def test_create_ROD(self):
+ r = self.get_unlocked_ROD()
+ self.assertEqual(
+ r, self.control_dict, msg="can't transfer dict to ReadOnlyDict"
+ )
+
+ def test_pop_item(self):
+ r = self.get_unlocked_ROD()
+ r.popitem()
+ self.assertEqual(
+ len(r),
+ len(self.control_dict) - 1,
+ msg="can't popitem() ReadOnlyDict when unlocked",
+ )
+
+ def test_pop(self):
+ r = self.get_unlocked_ROD()
+ r.pop("e")
+ self.assertEqual(
+ len(r),
+ len(self.control_dict) - 1,
+ msg="can't pop() ReadOnlyDict when unlocked",
+ )
+
+ def test_set(self):
+ r = self.get_unlocked_ROD()
+ r["e"] = "yarrr"
+ self.assertEqual(
+ r["e"], "yarrr", msg="can't set var in ReadOnlyDict when unlocked"
+ )
+
+ def test_del(self):
+ r = self.get_unlocked_ROD()
+ del r["e"]
+ self.assertEqual(
+ len(r),
+ len(self.control_dict) - 1,
+ msg="can't del in ReadOnlyDict when unlocked",
+ )
+
+ def test_clear(self):
+ r = self.get_unlocked_ROD()
+ r.clear()
+ self.assertEqual(r, {}, msg="can't clear() ReadOnlyDict when unlocked")
+
+ def test_set_default(self):
+ r = self.get_unlocked_ROD()
+ for key in self.control_dict.keys():
+ r.setdefault(key, self.control_dict[key])
+ self.assertEqual(
+ r, self.control_dict, msg="can't setdefault() ReadOnlyDict when unlocked"
+ )
+
+ def test_locked_set(self):
+ r = self.get_locked_ROD()
+ # TODO use |with self.assertRaises(AssertionError):| if/when we're
+ # all on 2.7.
+ try:
+ r["e"] = 2
+ except AssertionError:
+ pass
+ else:
+ self.assertEqual(0, 1, msg="can set r['e'] when locked")
+
+ def test_locked_del(self):
+ r = self.get_locked_ROD()
+ try:
+ del r["e"]
+ except AssertionError:
+ pass
+ else:
+ self.assertEqual(0, 1, "can del r['e'] when locked")
+
+ def test_locked_popitem(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r.popitem)
+
+ def test_locked_update(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r.update, {})
+
+ def test_locked_set_default(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r.setdefault, {})
+
+ def test_locked_pop(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r.pop)
+
+ def test_locked_clear(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r.clear)
+
+ def test_locked_second_level_dict_pop(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r["c"].update, {})
+
+ def test_locked_second_level_list_pop(self):
+ r = self.get_locked_ROD()
+ with self.assertRaises(AttributeError):
+ r["e"].pop()
+
+ def test_locked_third_level_mutate(self):
+ r = self.get_locked_ROD()
+ with self.assertRaises(AttributeError):
+ r["d"]["turtles"].append("turtle2")
+
+ def test_locked_object_in_tuple_mutate(self):
+ r = self.get_locked_ROD()
+ with self.assertRaises(AttributeError):
+ r["e"][2]["turtles"].append("turtle2")
+
+ def test_locked_second_level_dict_pop2(self):
+ r = self.get_locked_ROD()
+ self.assertRaises(AssertionError, r["c"].update, {})
+
+ def test_locked_second_level_list_pop2(self):
+ r = self.get_locked_ROD()
+ with self.assertRaises(AttributeError):
+ r["e"].pop()
+
+ def test_locked_third_level_mutate2(self):
+ r = self.get_locked_ROD()
+ with self.assertRaises(AttributeError):
+ r["d"]["turtles"].append("turtle2")
+
+ def test_locked_object_in_tuple_mutate2(self):
+ r = self.get_locked_ROD()
+ with self.assertRaises(AttributeError):
+ r["e"][2]["turtles"].append("turtle2")
+
+ def test_locked_deepcopy_set(self):
+ r = self.get_locked_ROD()
+ c = deepcopy(r)
+ c["e"] = "hey"
+ self.assertEqual(c["e"], "hey", "can't set var in ROD after deepcopy")
+
+
+class TestActions(unittest.TestCase):
+ all_actions = ["a", "b", "c", "d", "e"]
+ default_actions = ["b", "c", "d"]
+
+ def test_verify_actions(self):
+ c = config.BaseConfig(initial_config_file="test/test.json")
+ try:
+ c.verify_actions(["not_a_real_action"])
+ except SystemExit:
+ pass
+ else:
+ self.assertEqual(0, 1, msg="verify_actions() didn't die on invalid action")
+ c = config.BaseConfig(initial_config_file="test/test.json")
+ returned_actions = c.verify_actions(c.all_actions)
+ self.assertEqual(
+ c.all_actions,
+ returned_actions,
+ msg="returned actions from verify_actions() changed",
+ )
+
+ def test_default_actions(self):
+ c = config.BaseConfig(
+ default_actions=self.default_actions,
+ all_actions=self.all_actions,
+ initial_config_file="test/test.json",
+ )
+ self.assertEqual(
+ self.default_actions, c.get_actions(), msg="default_actions broken"
+ )
+
+ def test_no_action1(self):
+ c = config.BaseConfig(
+ default_actions=self.default_actions,
+ all_actions=self.all_actions,
+ initial_config_file="test/test.json",
+ )
+ c.parse_args(args=["foo", "--no-action", "a"])
+ self.assertEqual(
+ self.default_actions, c.get_actions(), msg="--no-ACTION broken"
+ )
+
+ def test_no_action2(self):
+ c = config.BaseConfig(
+ default_actions=self.default_actions,
+ all_actions=self.all_actions,
+ initial_config_file="test/test.json",
+ )
+ c.parse_args(args=["foo", "--no-c"])
+ self.assertEqual(["b", "d"], c.get_actions(), msg="--no-ACTION broken")
+
+ def test_add_action(self):
+ c = config.BaseConfig(
+ default_actions=self.default_actions,
+ all_actions=self.all_actions,
+ initial_config_file="test/test.json",
+ )
+ c.parse_args(args=["foo", "--add-action", "e"])
+ self.assertEqual(
+ ["b", "c", "d", "e"], c.get_actions(), msg="--add-action ACTION broken"
+ )
+
+ def test_only_action(self):
+ c = config.BaseConfig(
+ default_actions=self.default_actions,
+ all_actions=self.all_actions,
+ initial_config_file="test/test.json",
+ )
+ c.parse_args(args=["foo", "--a", "--e"])
+ self.assertEqual(["a", "e"], c.get_actions(), msg="--ACTION broken")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_base_diskutils.py b/testing/mozharness/test/test_base_diskutils.py
new file mode 100644
index 0000000000..9c5ac59083
--- /dev/null
+++ b/testing/mozharness/test/test_base_diskutils.py
@@ -0,0 +1,89 @@
+import unittest
+from unittest import mock
+
+from mozharness.base.diskutils import DiskInfo, DiskSize, DiskutilsError, convert_to
+
+
+class TestDiskutils(unittest.TestCase):
+ def test_convert_to(self):
+ # 0 is 0 regardless from_unit/to_unit
+ self.assertTrue(convert_to(size=0, from_unit="GB", to_unit="MB") == 0)
+ size = 524288 # 512 * 1024
+ # converting from/to same unit
+ self.assertTrue(convert_to(size=size, from_unit="MB", to_unit="MB") == size)
+
+ self.assertTrue(convert_to(size=size, from_unit="MB", to_unit="GB") == 512)
+
+ self.assertRaises(
+ DiskutilsError,
+ lambda: convert_to(size="a string", from_unit="MB", to_unit="MB"),
+ )
+ self.assertRaises(
+ DiskutilsError, lambda: convert_to(size=0, from_unit="foo", to_unit="MB")
+ )
+ self.assertRaises(
+ DiskutilsError, lambda: convert_to(size=0, from_unit="MB", to_unit="foo")
+ )
+
+
+class TestDiskInfo(unittest.TestCase):
+ def testDiskinfo_to(self):
+ di = DiskInfo()
+ self.assertTrue(di.unit == "bytes")
+ self.assertTrue(di.free == 0)
+ self.assertTrue(di.used == 0)
+ self.assertTrue(di.total == 0)
+ # convert to GB
+ di._to("GB")
+ self.assertTrue(di.unit == "GB")
+ self.assertTrue(di.free == 0)
+ self.assertTrue(di.used == 0)
+ self.assertTrue(di.total == 0)
+
+
+class MockStatvfs(object):
+ def __init__(self):
+ self.f_bsize = 0
+ self.f_frsize = 0
+ self.f_blocks = 0
+ self.f_bfree = 0
+ self.f_bavail = 0
+ self.f_files = 0
+ self.f_ffree = 0
+ self.f_favail = 0
+ self.f_flag = 0
+ self.f_namemax = 0
+
+
+class TestDiskSpace(unittest.TestCase):
+ @mock.patch("mozharness.base.diskutils.os")
+ def testDiskSpacePosix(self, mock_os):
+ ds = MockStatvfs()
+ mock_os.statvfs.return_value = ds
+ di = DiskSize()._posix_size("/")
+ self.assertTrue(di.unit == "bytes")
+ self.assertTrue(di.free == 0)
+ self.assertTrue(di.used == 0)
+ self.assertTrue(di.total == 0)
+
+ @mock.patch("mozharness.base.diskutils.ctypes")
+ def testDiskSpaceWindows(self, mock_ctypes):
+ mock_ctypes.windll.kernel32.GetDiskFreeSpaceExA.return_value = 0
+ mock_ctypes.windll.kernel32.GetDiskFreeSpaceExW.return_value = 0
+ di = DiskSize()._windows_size("/c/")
+ self.assertTrue(di.unit == "bytes")
+ self.assertTrue(di.free == 0)
+ self.assertTrue(di.used == 0)
+ self.assertTrue(di.total == 0)
+
+ @mock.patch("mozharness.base.diskutils.os")
+ @mock.patch("mozharness.base.diskutils.ctypes")
+ def testUnspportedPlafrom(self, mock_ctypes, mock_os):
+ mock_os.statvfs.side_effect = AttributeError("")
+ self.assertRaises(AttributeError, lambda: DiskSize()._posix_size("/"))
+ mock_ctypes.windll.kernel32.GetDiskFreeSpaceExW.side_effect = AttributeError("")
+ mock_ctypes.windll.kernel32.GetDiskFreeSpaceExA.side_effect = AttributeError("")
+ self.assertRaises(AttributeError, lambda: DiskSize()._windows_size("/"))
+ self.assertRaises(
+ DiskutilsError, lambda: DiskSize().get_size(path="/", unit="GB")
+ )
diff --git a/testing/mozharness/test/test_base_log.py b/testing/mozharness/test/test_base_log.py
new file mode 100644
index 0000000000..769cb1e390
--- /dev/null
+++ b/testing/mozharness/test/test_base_log.py
@@ -0,0 +1,43 @@
+import os
+import shutil
+import unittest
+
+import mozharness.base.log as log
+
+tmp_dir = "test_log_dir"
+log_name = "test"
+
+
+def clean_log_dir():
+ if os.path.exists(tmp_dir):
+ shutil.rmtree(tmp_dir)
+
+
+def get_log_file_path(level=None):
+ if level:
+ return os.path.join(tmp_dir, "%s_%s.log" % (log_name, level))
+ return os.path.join(tmp_dir, "%s.log" % log_name)
+
+
+class TestLog(unittest.TestCase):
+ def setUp(self):
+ clean_log_dir()
+
+ def tearDown(self):
+ clean_log_dir()
+
+ def test_log_dir(self):
+ fh = open(tmp_dir, "w")
+ fh.write("foo")
+ fh.close()
+ l = log.SimpleFileLogger(
+ log_dir=tmp_dir, log_name=log_name, log_to_console=False
+ )
+ self.assertTrue(os.path.exists(tmp_dir))
+ l.log_message("blah")
+ self.assertTrue(os.path.exists(get_log_file_path()))
+ del l
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_base_parallel.py b/testing/mozharness/test/test_base_parallel.py
new file mode 100644
index 0000000000..272d84484d
--- /dev/null
+++ b/testing/mozharness/test/test_base_parallel.py
@@ -0,0 +1,28 @@
+import unittest
+
+from mozharness.base.parallel import ChunkingMixin
+
+
+class TestChunkingMixin(unittest.TestCase):
+ def setUp(self):
+ self.c = ChunkingMixin()
+
+ def test_one_chunk(self):
+ self.assertEqual(self.c.query_chunked_list([1, 3, 2], 1, 1), [1, 3, 2])
+
+ def test_sorted(self):
+ self.assertEqual(
+ self.c.query_chunked_list([1, 3, 2], 1, 1, sort=True), [1, 2, 3]
+ )
+
+ def test_first_chunk(self):
+ self.assertEqual(self.c.query_chunked_list([4, 5, 4, 3], 1, 2), [4, 5])
+
+ def test_last_chunk(self):
+ self.assertEqual(self.c.query_chunked_list([1, 4, 5, 7, 5, 6], 3, 3), [5, 6])
+
+ def test_not_evenly_divisble(self):
+ thing = [1, 3, 6, 4, 3, 2, 6]
+ self.assertEqual(self.c.query_chunked_list(thing, 1, 3), [1, 3, 6])
+ self.assertEqual(self.c.query_chunked_list(thing, 2, 3), [4, 3])
+ self.assertEqual(self.c.query_chunked_list(thing, 3, 3), [2, 6])
diff --git a/testing/mozharness/test/test_base_python.py b/testing/mozharness/test/test_base_python.py
new file mode 100644
index 0000000000..c05f704ef6
--- /dev/null
+++ b/testing/mozharness/test/test_base_python.py
@@ -0,0 +1,39 @@
+import os
+import unittest
+
+import mozharness.base.python as python
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class TestVirtualenvMixin(unittest.TestCase):
+ def test_package_versions(self):
+ example = os.path.join(here, "pip-freeze.example.txt")
+ output = open(example).read()
+ mixin = python.VirtualenvMixin()
+ packages = mixin.package_versions(output)
+
+ # from the file
+ expected = {
+ "MakeItSo": "0.2.6",
+ "PyYAML": "3.10",
+ "Tempita": "0.5.1",
+ "WebOb": "1.2b3",
+ "coverage": "3.5.1",
+ "logilab-astng": "0.23.1",
+ "logilab-common": "0.57.1",
+ "mozdevice": "0.2",
+ "mozhttpd": "0.3",
+ "mozinfo": "0.3.3",
+ "nose": "1.1.2",
+ "pyflakes": "0.5.0",
+ "pylint": "0.25.1",
+ "virtualenv": "1.7.1.2",
+ "wsgiref": "0.1.2",
+ }
+
+ self.assertEqual(packages, expected)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_base_script.py b/testing/mozharness/test/test_base_script.py
new file mode 100644
index 0000000000..75175dc794
--- /dev/null
+++ b/testing/mozharness/test/test_base_script.py
@@ -0,0 +1,960 @@
+import gc
+import os
+import re
+import shutil
+import tempfile
+import types
+import unittest
+from unittest import mock
+
+PYWIN32 = False
+if os.name == "nt":
+ try:
+ import win32file
+
+ PYWIN32 = True
+ except ImportError:
+ pass
+
+
+import mozharness.base.errors as errors
+import mozharness.base.log as log
+import mozharness.base.script as script
+from mozharness.base.config import parse_config_file
+from mozharness.base.log import CRITICAL, DEBUG, ERROR, FATAL, IGNORE, INFO, WARNING
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+test_string = """foo
+bar
+baz"""
+
+
+class CleanupObj(script.ScriptMixin, log.LogMixin):
+ def __init__(self):
+ super(CleanupObj, self).__init__()
+ self.log_obj = None
+ self.config = {"log_level": ERROR}
+
+
+def cleanup(files=None):
+ files = files or []
+ files.extend(("test_logs", "test_dir", "tmpfile_stdout", "tmpfile_stderr"))
+ gc.collect()
+ c = CleanupObj()
+ for f in files:
+ c.rmtree(f)
+
+
+def get_debug_script_obj():
+ s = script.BaseScript(
+ config={"log_type": "multi", "log_level": DEBUG},
+ initial_config_file="test/test.json",
+ )
+ return s
+
+
+def _post_fatal(self, **kwargs):
+ fh = open("tmpfile_stdout", "w")
+ print(test_string, file=fh)
+ fh.close()
+
+
+# TestScript {{{1
+class TestScript(unittest.TestCase):
+ def setUp(self):
+ cleanup()
+ self.s = None
+ self.tmpdir = tempfile.mkdtemp(suffix=".mozharness")
+
+ def tearDown(self):
+ # Close the logfile handles, or windows can't remove the logs
+ if hasattr(self, "s") and isinstance(self.s, object):
+ del self.s
+ cleanup([self.tmpdir])
+
+ # test _dump_config_hierarchy() when --dump-config-hierarchy is passed
+ def test_dump_config_hierarchy_valid_files_len(self):
+ try:
+ self.s = script.BaseScript(
+ initial_config_file="test/test.json",
+ option_args=["--cfg", "test/test_override.py,test/test_override2.py"],
+ config={"dump_config_hierarchy": True},
+ )
+ except SystemExit:
+ local_cfg_files = parse_config_file("test_logs/localconfigfiles.json")
+ # first let's see if the correct number of config files were
+ # realized
+ self.assertEqual(
+ len(local_cfg_files),
+ 4,
+ msg="--dump-config-hierarchy dumped wrong number of config files",
+ )
+
+ def test_dump_config_hierarchy_keys_unique_and_valid(self):
+ try:
+ self.s = script.BaseScript(
+ initial_config_file="test/test.json",
+ option_args=["--cfg", "test/test_override.py,test/test_override2.py"],
+ config={"dump_config_hierarchy": True},
+ )
+ except SystemExit:
+ local_cfg_files = parse_config_file("test_logs/localconfigfiles.json")
+ # now let's see if only unique items were added from each config
+ t_override = local_cfg_files.get("test/test_override.py", {})
+ self.assertTrue(
+ t_override.get("keep_string") == "don't change me"
+ and len(t_override.keys()) == 1,
+ msg="--dump-config-hierarchy dumped wrong keys/value for "
+ "`test/test_override.py`. There should only be one "
+ "item and it should be unique to all the other "
+ "items in test_log/localconfigfiles.json.",
+ )
+
+ def test_dump_config_hierarchy_matches_self_config(self):
+ try:
+ ######
+ # we need temp_cfg because self.s will be gcollected (NoneType) by
+ # the time we get to SystemExit exception
+ # temp_cfg will differ from self.s.config because of
+ # 'dump_config_hierarchy'. we have to make a deepcopy because
+ # config is a locked dict
+ temp_s = script.BaseScript(
+ initial_config_file="test/test.json",
+ option_args=["--cfg", "test/test_override.py,test/test_override2.py"],
+ )
+ from copy import deepcopy
+
+ temp_cfg = deepcopy(temp_s.config)
+ temp_cfg.update({"dump_config_hierarchy": True})
+ ######
+ self.s = script.BaseScript(
+ initial_config_file="test/test.json",
+ option_args=["--cfg", "test/test_override.py,test/test_override2.py"],
+ config={"dump_config_hierarchy": True},
+ )
+ except SystemExit:
+ local_cfg_files = parse_config_file("test_logs/localconfigfiles.json")
+ # finally let's just make sure that all the items added up, equals
+ # what we started with: self.config
+ target_cfg = {}
+ for cfg_file in local_cfg_files:
+ target_cfg.update(local_cfg_files[cfg_file])
+ self.assertEqual(
+ target_cfg,
+ temp_cfg,
+ msg="all of the items (combined) in each cfg file dumped via "
+ "--dump-config-hierarchy does not equal self.config ",
+ )
+
+ # test _dump_config() when --dump-config is passed
+ def test_dump_config_equals_self_config(self):
+ try:
+ ######
+ # we need temp_cfg because self.s will be gcollected (NoneType) by
+ # the time we get to SystemExit exception
+ # temp_cfg will differ from self.s.config because of
+ # 'dump_config_hierarchy'. we have to make a deepcopy because
+ # config is a locked dict
+ temp_s = script.BaseScript(
+ initial_config_file="test/test.json",
+ option_args=["--cfg", "test/test_override.py,test/test_override2.py"],
+ )
+ from copy import deepcopy
+
+ temp_cfg = deepcopy(temp_s.config)
+ temp_cfg.update({"dump_config": True})
+ ######
+ self.s = script.BaseScript(
+ initial_config_file="test/test.json",
+ option_args=["--cfg", "test/test_override.py,test/test_override2.py"],
+ config={"dump_config": True},
+ )
+ except SystemExit:
+ target_cfg = parse_config_file("test_logs/localconfig.json")
+ self.assertEqual(
+ target_cfg,
+ temp_cfg,
+ msg="all of the items (combined) in each cfg file dumped via "
+ "--dump-config does not equal self.config ",
+ )
+
+ def test_nonexistent_mkdir_p(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self.s.mkdir_p("test_dir/foo/bar/baz")
+ self.assertTrue(os.path.isdir("test_dir/foo/bar/baz"), msg="mkdir_p error")
+
+ def test_existing_mkdir_p(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ os.makedirs("test_dir/foo/bar/baz")
+ self.s.mkdir_p("test_dir/foo/bar/baz")
+ self.assertTrue(
+ os.path.isdir("test_dir/foo/bar/baz"), msg="mkdir_p error when dir exists"
+ )
+
+ def test_chdir(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ cwd = os.getcwd()
+ self.s.chdir("test_logs")
+ self.assertEqual(os.path.join(cwd, "test_logs"), os.getcwd(), msg="chdir error")
+ self.s.chdir(cwd)
+
+ def _test_log_helper(self, obj):
+ obj.debug("Testing DEBUG")
+ obj.warning("Testing WARNING")
+ obj.error("Testing ERROR")
+ obj.critical("Testing CRITICAL")
+ try:
+ obj.fatal("Testing FATAL")
+ except SystemExit:
+ pass
+ else:
+ self.assertTrue(False, msg="fatal() didn't SystemExit!")
+
+ def test_log(self):
+ self.s = get_debug_script_obj()
+ self.s.log_obj = None
+ self._test_log_helper(self.s)
+ del self.s
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self._test_log_helper(self.s)
+
+ def test_run_nonexistent_command(self):
+ self.s = get_debug_script_obj()
+ self.s.run_command(
+ command="this_cmd_should_not_exist --help",
+ env={"GARBLE": "FARG"},
+ error_list=errors.PythonErrorList,
+ )
+ error_logsize = os.path.getsize("test_logs/test_error.log")
+ self.assertTrue(error_logsize > 0, msg="command not found error not hit")
+
+ def test_run_command_in_bad_dir(self):
+ self.s = get_debug_script_obj()
+ self.s.run_command(
+ command="ls",
+ cwd="/this_dir_should_not_exist",
+ error_list=errors.PythonErrorList,
+ )
+ error_logsize = os.path.getsize("test_logs/test_error.log")
+ self.assertTrue(error_logsize > 0, msg="bad dir error not hit")
+
+ def test_get_output_from_command_in_bad_dir(self):
+ self.s = get_debug_script_obj()
+ self.s.get_output_from_command(command="ls", cwd="/this_dir_should_not_exist")
+ error_logsize = os.path.getsize("test_logs/test_error.log")
+ self.assertTrue(error_logsize > 0, msg="bad dir error not hit")
+
+ def test_get_output_from_command_with_missing_file(self):
+ self.s = get_debug_script_obj()
+ self.s.get_output_from_command(command="ls /this_file_should_not_exist")
+ error_logsize = os.path.getsize("test_logs/test_error.log")
+ self.assertTrue(error_logsize > 0, msg="bad file error not hit")
+
+ def test_get_output_from_command_with_missing_file2(self):
+ self.s = get_debug_script_obj()
+ self.s.run_command(
+ command="cat mozharness/base/errors.py",
+ error_list=[
+ {"substr": "error", "level": ERROR},
+ {
+ "regex": re.compile(",$"),
+ "level": IGNORE,
+ },
+ {
+ "substr": "]$",
+ "level": WARNING,
+ },
+ ],
+ )
+ error_logsize = os.path.getsize("test_logs/test_error.log")
+ self.assertTrue(error_logsize > 0, msg="error list not working properly")
+
+ def test_download_unpack(self):
+ # NOTE: The action is called *download*, however, it can work for files in disk
+ self.s = get_debug_script_obj()
+
+ archives_path = os.path.join(here, "helper_files", "archives")
+
+ # Test basic decompression
+ for archive in (
+ "archive.tar",
+ "archive.tar.bz2",
+ "archive.tar.gz",
+ "archive.zip",
+ ):
+ self.s.download_unpack(
+ url=os.path.join(archives_path, archive), extract_to=self.tmpdir
+ )
+ self.assertIn("script.sh", os.listdir(os.path.join(self.tmpdir, "bin")))
+ self.assertIn("lorem.txt", os.listdir(self.tmpdir))
+ shutil.rmtree(self.tmpdir)
+
+ # Test permissions for extracted entries from zip archive
+ self.s.download_unpack(
+ url=os.path.join(archives_path, "archive.zip"),
+ extract_to=self.tmpdir,
+ )
+ file_stats = os.stat(os.path.join(self.tmpdir, "bin", "script.sh"))
+ orig_fstats = os.stat(
+ os.path.join(archives_path, "reference", "bin", "script.sh")
+ )
+ self.assertEqual(file_stats.st_mode, orig_fstats.st_mode)
+ shutil.rmtree(self.tmpdir)
+
+ # Test unzip specific dirs only
+ self.s.download_unpack(
+ url=os.path.join(archives_path, "archive.zip"),
+ extract_to=self.tmpdir,
+ extract_dirs=["bin/*"],
+ )
+ self.assertIn("bin", os.listdir(self.tmpdir))
+ self.assertNotIn("lorem.txt", os.listdir(self.tmpdir))
+ shutil.rmtree(self.tmpdir)
+
+ # Test for invalid filenames (Windows only)
+ if PYWIN32:
+ with self.assertRaises(IOError):
+ self.s.download_unpack(
+ url=os.path.join(archives_path, "archive_invalid_filename.zip"),
+ extract_to=self.tmpdir,
+ )
+
+ def test_unpack(self):
+ self.s = get_debug_script_obj()
+
+ archives_path = os.path.join(here, "helper_files", "archives")
+
+ # Test basic decompression
+ for archive in (
+ "archive.tar",
+ "archive.tar.bz2",
+ "archive.tar.gz",
+ "archive.zip",
+ ):
+ self.s.unpack(os.path.join(archives_path, archive), self.tmpdir)
+ self.assertIn("script.sh", os.listdir(os.path.join(self.tmpdir, "bin")))
+ self.assertIn("lorem.txt", os.listdir(self.tmpdir))
+ shutil.rmtree(self.tmpdir)
+
+ # Test permissions for extracted entries from zip archive
+ self.s.unpack(os.path.join(archives_path, "archive.zip"), self.tmpdir)
+ file_stats = os.stat(os.path.join(self.tmpdir, "bin", "script.sh"))
+ orig_fstats = os.stat(
+ os.path.join(archives_path, "reference", "bin", "script.sh")
+ )
+ self.assertEqual(file_stats.st_mode, orig_fstats.st_mode)
+ shutil.rmtree(self.tmpdir)
+
+ # Test extract specific dirs only
+ self.s.unpack(
+ os.path.join(archives_path, "archive.zip"),
+ self.tmpdir,
+ extract_dirs=["bin/*"],
+ )
+ self.assertIn("bin", os.listdir(self.tmpdir))
+ self.assertNotIn("lorem.txt", os.listdir(self.tmpdir))
+ shutil.rmtree(self.tmpdir)
+
+ # Test for invalid filenames (Windows only)
+ if PYWIN32:
+ with self.assertRaises(IOError):
+ self.s.unpack(
+ os.path.join(archives_path, "archive_invalid_filename.zip"),
+ self.tmpdir,
+ )
+
+
+# TestHelperFunctions {{{1
+class TestHelperFunctions(unittest.TestCase):
+ temp_file = "test_dir/mozilla"
+
+ def setUp(self):
+ cleanup()
+ self.s = None
+
+ def tearDown(self):
+ # Close the logfile handles, or windows can't remove the logs
+ if hasattr(self, "s") and isinstance(self.s, object):
+ del self.s
+ cleanup()
+
+ def _create_temp_file(self, contents=test_string):
+ os.mkdir("test_dir")
+ fh = open(self.temp_file, "w+")
+ fh.write(contents)
+ fh.close
+
+ def test_mkdir_p(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self.s.mkdir_p("test_dir")
+ self.assertTrue(os.path.isdir("test_dir"), msg="mkdir_p error")
+
+ def test_get_output_from_command(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ contents = self.s.get_output_from_command(
+ ["bash", "-c", "cat %s" % self.temp_file]
+ )
+ self.assertEqual(
+ test_string,
+ contents,
+ msg="get_output_from_command('cat file') differs from fh.write",
+ )
+
+ def test_run_command(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ temp_file_name = os.path.basename(self.temp_file)
+ self.assertEqual(
+ self.s.run_command("cat %s" % temp_file_name, cwd="test_dir"),
+ 0,
+ msg="run_command('cat file') did not exit 0",
+ )
+
+ def test_move1(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ temp_file2 = "%s2" % self.temp_file
+ self.s.move(self.temp_file, temp_file2)
+ self.assertFalse(
+ os.path.exists(self.temp_file),
+ msg="%s still exists after move()" % self.temp_file,
+ )
+
+ def test_move2(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ temp_file2 = "%s2" % self.temp_file
+ self.s.move(self.temp_file, temp_file2)
+ self.assertTrue(
+ os.path.exists(temp_file2), msg="%s doesn't exist after move()" % temp_file2
+ )
+
+ def test_copyfile(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ temp_file2 = "%s2" % self.temp_file
+ self.s.copyfile(self.temp_file, temp_file2)
+ self.assertEqual(
+ os.path.getsize(self.temp_file),
+ os.path.getsize(temp_file2),
+ msg="%s and %s are different sizes after copyfile()"
+ % (self.temp_file, temp_file2),
+ )
+
+ def test_existing_rmtree(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self.s.mkdir_p("test_dir/foo/bar/baz")
+ self.s.rmtree("test_dir")
+ self.assertFalse(os.path.exists("test_dir"), msg="rmtree unsuccessful")
+
+ def test_nonexistent_rmtree(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ status = self.s.rmtree("test_dir")
+ self.assertFalse(status, msg="nonexistent rmtree error")
+
+ @unittest.skipUnless(PYWIN32, "PyWin32 specific")
+ def test_long_dir_rmtree(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ # create a very long path that the command-prompt cannot delete
+ # by using unicode format (max path length 32000)
+ path = u"\\\\?\\%s\\test_dir" % os.getcwd()
+ win32file.CreateDirectoryExW(u".", path)
+
+ for x in range(0, 20):
+ print("path=%s" % path)
+ path = path + u"\\%sxxxxxxxxxxxxxxxxxxxx" % x
+ win32file.CreateDirectoryExW(u".", path)
+ self.s.rmtree("test_dir")
+ self.assertFalse(os.path.exists("test_dir"), msg="rmtree unsuccessful")
+
+ @unittest.skipUnless(PYWIN32, "PyWin32 specific")
+ def test_chmod_rmtree(self):
+ self._create_temp_file()
+ win32file.SetFileAttributesW(self.temp_file, win32file.FILE_ATTRIBUTE_READONLY)
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self.s.rmtree("test_dir")
+ self.assertFalse(os.path.exists("test_dir"), msg="rmtree unsuccessful")
+
+ @unittest.skipIf(os.name == "nt", "Not for Windows")
+ def test_chmod(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self.s.chmod(self.temp_file, 0o100700)
+ self.assertEqual(os.stat(self.temp_file)[0], 33216, msg="chmod unsuccessful")
+
+ def test_env_normal(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ script_env = self.s.query_env()
+ self.assertEqual(
+ script_env,
+ os.environ,
+ msg="query_env() != env\n%s\n%s" % (script_env, os.environ),
+ )
+
+ def test_env_normal2(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ self.s.query_env()
+ script_env = self.s.query_env()
+ self.assertEqual(
+ script_env,
+ os.environ,
+ msg="Second query_env() != env\n%s\n%s" % (script_env, os.environ),
+ )
+
+ def test_env_partial(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ script_env = self.s.query_env(partial_env={"foo": "bar"})
+ self.assertTrue("foo" in script_env and script_env["foo"] == "bar")
+
+ def test_env_path(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ partial_path = "yaddayadda:%(PATH)s"
+ full_path = partial_path % {"PATH": os.environ["PATH"]}
+ script_env = self.s.query_env(partial_env={"PATH": partial_path})
+ self.assertEqual(script_env["PATH"], full_path)
+
+ def test_query_exe(self):
+ self.s = script.BaseScript(
+ initial_config_file="test/test.json",
+ config={"exes": {"foo": "bar"}},
+ )
+ path = self.s.query_exe("foo")
+ self.assertEqual(path, "bar")
+
+ def test_query_exe_string_replacement(self):
+ self.s = script.BaseScript(
+ initial_config_file="test/test.json",
+ config={
+ "base_work_dir": "foo",
+ "work_dir": "bar",
+ "exes": {"foo": os.path.join("%(abs_work_dir)s", "baz")},
+ },
+ )
+ path = self.s.query_exe("foo")
+ self.assertEqual(path, os.path.join("foo", "bar", "baz"))
+
+ def test_read_from_file(self):
+ self._create_temp_file()
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ contents = self.s.read_from_file(self.temp_file)
+ self.assertEqual(contents, test_string)
+
+ def test_read_from_nonexistent_file(self):
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+ contents = self.s.read_from_file("nonexistent_file!!!")
+ self.assertEqual(contents, None)
+
+
+# TestScriptLogging {{{1
+class TestScriptLogging(unittest.TestCase):
+ # I need a log watcher helper function, here and in test_log.
+ def setUp(self):
+ cleanup()
+ self.s = None
+
+ def tearDown(self):
+ # Close the logfile handles, or windows can't remove the logs
+ if hasattr(self, "s") and isinstance(self.s, object):
+ del self.s
+ cleanup()
+
+ def test_info_logsize(self):
+ self.s = script.BaseScript(
+ config={"log_type": "multi"}, initial_config_file="test/test.json"
+ )
+ info_logsize = os.path.getsize("test_logs/test_info.log")
+ self.assertTrue(info_logsize > 0, msg="initial info logfile missing/size 0")
+
+ def test_add_summary_info(self):
+ self.s = script.BaseScript(
+ config={"log_type": "multi"}, initial_config_file="test/test.json"
+ )
+ info_logsize = os.path.getsize("test_logs/test_info.log")
+ self.s.add_summary("one")
+ info_logsize2 = os.path.getsize("test_logs/test_info.log")
+ self.assertTrue(
+ info_logsize < info_logsize2, msg="add_summary() info not logged"
+ )
+
+ def test_add_summary_warning(self):
+ self.s = script.BaseScript(
+ config={"log_type": "multi"}, initial_config_file="test/test.json"
+ )
+ warning_logsize = os.path.getsize("test_logs/test_warning.log")
+ self.s.add_summary("two", level=WARNING)
+ warning_logsize2 = os.path.getsize("test_logs/test_warning.log")
+ self.assertTrue(
+ warning_logsize < warning_logsize2,
+ msg="add_summary(level=%s) not logged in warning log" % WARNING,
+ )
+
+ def test_summary(self):
+ self.s = script.BaseScript(
+ config={"log_type": "multi"}, initial_config_file="test/test.json"
+ )
+ self.s.add_summary("one")
+ self.s.add_summary("two", level=WARNING)
+ info_logsize = os.path.getsize("test_logs/test_info.log")
+ warning_logsize = os.path.getsize("test_logs/test_warning.log")
+ self.s.summary()
+ info_logsize2 = os.path.getsize("test_logs/test_info.log")
+ warning_logsize2 = os.path.getsize("test_logs/test_warning.log")
+ msg = ""
+ if info_logsize >= info_logsize2:
+ msg += "summary() didn't log to info!\n"
+ if warning_logsize >= warning_logsize2:
+ msg += "summary() didn't log to warning!\n"
+ self.assertEqual(msg, "", msg=msg)
+
+ def _test_log_level(self, log_level, log_level_file_list):
+ self.s = script.BaseScript(
+ config={"log_type": "multi"}, initial_config_file="test/test.json"
+ )
+ if log_level != FATAL:
+ self.s.log("testing", level=log_level)
+ else:
+ self.s._post_fatal = types.MethodType(_post_fatal, self.s)
+ try:
+ self.s.fatal("testing")
+ except SystemExit:
+ contents = None
+ if os.path.exists("tmpfile_stdout"):
+ fh = open("tmpfile_stdout")
+ contents = fh.read()
+ fh.close()
+ self.assertEqual(contents.rstrip(), test_string, "_post_fatal failed!")
+ del self.s
+ msg = ""
+ for level in log_level_file_list:
+ log_path = "test_logs/test_%s.log" % level
+ if not os.path.exists(log_path):
+ msg += "%s doesn't exist!\n" % log_path
+ else:
+ filesize = os.path.getsize(log_path)
+ if not filesize > 0:
+ msg += "%s is size 0!\n" % log_path
+ self.assertEqual(msg, "", msg=msg)
+
+ def test_debug(self):
+ self._test_log_level(DEBUG, [])
+
+ def test_ignore(self):
+ self._test_log_level(IGNORE, [])
+
+ def test_info(self):
+ self._test_log_level(INFO, [INFO])
+
+ def test_warning(self):
+ self._test_log_level(WARNING, [INFO, WARNING])
+
+ def test_error(self):
+ self._test_log_level(ERROR, [INFO, WARNING, ERROR])
+
+ def test_critical(self):
+ self._test_log_level(CRITICAL, [INFO, WARNING, ERROR, CRITICAL])
+
+ def test_fatal(self):
+ self._test_log_level(FATAL, [INFO, WARNING, ERROR, CRITICAL, FATAL])
+
+
+# TestRetry {{{1
+class NewError(Exception):
+ pass
+
+
+class OtherError(Exception):
+ pass
+
+
+class TestRetry(unittest.TestCase):
+ def setUp(self):
+ self.ATTEMPT_N = 1
+ self.s = script.BaseScript(initial_config_file="test/test.json")
+
+ def tearDown(self):
+ # Close the logfile handles, or windows can't remove the logs
+ if hasattr(self, "s") and isinstance(self.s, object):
+ del self.s
+ cleanup()
+
+ def _succeedOnSecondAttempt(self, foo=None, exception=Exception):
+ if self.ATTEMPT_N == 2:
+ self.ATTEMPT_N += 1
+ return
+ self.ATTEMPT_N += 1
+ raise exception("Fail")
+
+ def _raiseCustomException(self):
+ return self._succeedOnSecondAttempt(exception=NewError)
+
+ def _alwaysPass(self):
+ self.ATTEMPT_N += 1
+ return True
+
+ def _mirrorArgs(self, *args, **kwargs):
+ return args, kwargs
+
+ def _alwaysFail(self):
+ raise Exception("Fail")
+
+ def testRetrySucceed(self):
+ # Will raise if anything goes wrong
+ self.s.retry(self._succeedOnSecondAttempt, attempts=2, sleeptime=0)
+
+ def testRetryFailWithoutCatching(self):
+ self.assertRaises(
+ Exception, self.s.retry, self._alwaysFail, sleeptime=0, exceptions=()
+ )
+
+ def testRetryFailEnsureRaisesLastException(self):
+ self.assertRaises(
+ SystemExit, self.s.retry, self._alwaysFail, sleeptime=0, error_level=FATAL
+ )
+
+ def testRetrySelectiveExceptionSucceed(self):
+ self.s.retry(
+ self._raiseCustomException,
+ attempts=2,
+ sleeptime=0,
+ retry_exceptions=(NewError,),
+ )
+
+ def testRetrySelectiveExceptionFail(self):
+ self.assertRaises(
+ NewError,
+ self.s.retry,
+ self._raiseCustomException,
+ attempts=2,
+ sleeptime=0,
+ retry_exceptions=(OtherError,),
+ )
+
+ # TODO: figure out a way to test that the sleep actually happened
+ def testRetryWithSleep(self):
+ self.s.retry(self._succeedOnSecondAttempt, attempts=2, sleeptime=1)
+
+ def testRetryOnlyRunOnce(self):
+ """Tests that retry() doesn't call the action again after success"""
+ self.s.retry(self._alwaysPass, attempts=3, sleeptime=0)
+ # self.ATTEMPT_N gets increased regardless of pass/fail
+ self.assertEqual(2, self.ATTEMPT_N)
+
+ def testRetryReturns(self):
+ ret = self.s.retry(self._alwaysPass, sleeptime=0)
+ self.assertEqual(ret, True)
+
+ def testRetryCleanupIsCalled(self):
+ cleanup = mock.Mock()
+ self.s.retry(self._succeedOnSecondAttempt, cleanup=cleanup, sleeptime=0)
+ self.assertEqual(cleanup.call_count, 1)
+
+ def testRetryArgsPassed(self):
+ args = (1, "two", 3)
+ kwargs = dict(foo="a", bar=7)
+ ret = self.s.retry(
+ self._mirrorArgs, args=args, kwargs=kwargs.copy(), sleeptime=0
+ )
+ print(ret)
+ self.assertEqual(ret[0], args)
+ self.assertEqual(ret[1], kwargs)
+
+
+class BaseScriptWithDecorators(script.BaseScript):
+ def __init__(self, *args, **kwargs):
+ super(BaseScriptWithDecorators, self).__init__(*args, **kwargs)
+
+ self.pre_run_1_args = []
+ self.raise_during_pre_run_1 = False
+ self.pre_action_1_args = []
+ self.raise_during_pre_action_1 = False
+ self.pre_action_2_args = []
+ self.pre_action_3_args = []
+ self.post_action_1_args = []
+ self.raise_during_post_action_1 = False
+ self.post_action_2_args = []
+ self.post_action_3_args = []
+ self.post_run_1_args = []
+ self.raise_during_post_run_1 = False
+ self.post_run_2_args = []
+ self.raise_during_build = False
+
+ @script.PreScriptRun
+ def pre_run_1(self, *args, **kwargs):
+ self.pre_run_1_args.append((args, kwargs))
+
+ if self.raise_during_pre_run_1:
+ raise Exception(self.raise_during_pre_run_1)
+
+ @script.PreScriptAction
+ def pre_action_1(self, *args, **kwargs):
+ self.pre_action_1_args.append((args, kwargs))
+
+ if self.raise_during_pre_action_1:
+ raise Exception(self.raise_during_pre_action_1)
+
+ @script.PreScriptAction
+ def pre_action_2(self, *args, **kwargs):
+ self.pre_action_2_args.append((args, kwargs))
+
+ @script.PreScriptAction("clobber")
+ def pre_action_3(self, *args, **kwargs):
+ self.pre_action_3_args.append((args, kwargs))
+
+ @script.PostScriptAction
+ def post_action_1(self, *args, **kwargs):
+ self.post_action_1_args.append((args, kwargs))
+
+ if self.raise_during_post_action_1:
+ raise Exception(self.raise_during_post_action_1)
+
+ @script.PostScriptAction
+ def post_action_2(self, *args, **kwargs):
+ self.post_action_2_args.append((args, kwargs))
+
+ @script.PostScriptAction("build")
+ def post_action_3(self, *args, **kwargs):
+ self.post_action_3_args.append((args, kwargs))
+
+ @script.PostScriptRun
+ def post_run_1(self, *args, **kwargs):
+ self.post_run_1_args.append((args, kwargs))
+
+ if self.raise_during_post_run_1:
+ raise Exception(self.raise_during_post_run_1)
+
+ @script.PostScriptRun
+ def post_run_2(self, *args, **kwargs):
+ self.post_run_2_args.append((args, kwargs))
+
+ def build(self):
+ if self.raise_during_build:
+ raise Exception(self.raise_during_build)
+
+
+class TestScriptDecorators(unittest.TestCase):
+ def setUp(self):
+ cleanup()
+ self.s = None
+
+ def tearDown(self):
+ if hasattr(self, "s") and isinstance(self.s, object):
+ del self.s
+
+ cleanup()
+
+ def test_decorators_registered(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+
+ self.assertEqual(len(self.s._listeners["pre_run"]), 1)
+ self.assertEqual(len(self.s._listeners["pre_action"]), 3)
+ self.assertEqual(len(self.s._listeners["post_action"]), 3)
+ self.assertEqual(len(self.s._listeners["post_run"]), 2)
+
+ def test_pre_post_fired(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+ self.s.run()
+
+ self.assertEqual(len(self.s.pre_run_1_args), 1)
+ self.assertEqual(len(self.s.pre_action_1_args), 2)
+ self.assertEqual(len(self.s.pre_action_2_args), 2)
+ self.assertEqual(len(self.s.pre_action_3_args), 1)
+ self.assertEqual(len(self.s.post_action_1_args), 2)
+ self.assertEqual(len(self.s.post_action_2_args), 2)
+ self.assertEqual(len(self.s.post_action_3_args), 1)
+ self.assertEqual(len(self.s.post_run_1_args), 1)
+
+ self.assertEqual(self.s.pre_run_1_args[0], ((), {}))
+
+ self.assertEqual(self.s.pre_action_1_args[0], (("clobber",), {}))
+ self.assertEqual(self.s.pre_action_1_args[1], (("build",), {}))
+
+ # pre_action_3 should only get called for the action it is registered
+ # with.
+ self.assertEqual(self.s.pre_action_3_args[0], (("clobber",), {}))
+
+ self.assertEqual(self.s.post_action_1_args[0][0], ("clobber",))
+ self.assertEqual(self.s.post_action_1_args[0][1], dict(success=True))
+ self.assertEqual(self.s.post_action_1_args[1][0], ("build",))
+ self.assertEqual(self.s.post_action_1_args[1][1], dict(success=True))
+
+ # post_action_3 should only get called for the action it is registered
+ # with.
+ self.assertEqual(self.s.post_action_3_args[0], (("build",), dict(success=True)))
+
+ self.assertEqual(self.s.post_run_1_args[0], ((), {}))
+
+ def test_post_always_fired(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+ self.s.raise_during_build = "Testing post always fired."
+
+ with self.assertRaises(SystemExit):
+ self.s.run()
+
+ self.assertEqual(len(self.s.pre_run_1_args), 1)
+ self.assertEqual(len(self.s.pre_action_1_args), 2)
+ self.assertEqual(len(self.s.post_action_1_args), 2)
+ self.assertEqual(len(self.s.post_action_2_args), 2)
+ self.assertEqual(len(self.s.post_run_1_args), 1)
+ self.assertEqual(len(self.s.post_run_2_args), 1)
+
+ self.assertEqual(self.s.post_action_1_args[0][1], dict(success=True))
+ self.assertEqual(self.s.post_action_1_args[1][1], dict(success=False))
+ self.assertEqual(self.s.post_action_2_args[1][1], dict(success=False))
+
+ def test_pre_run_exception(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+ self.s.raise_during_pre_run_1 = "Error during pre run 1"
+
+ with self.assertRaises(SystemExit):
+ self.s.run()
+
+ self.assertEqual(len(self.s.pre_run_1_args), 1)
+ self.assertEqual(len(self.s.pre_action_1_args), 0)
+ self.assertEqual(len(self.s.post_run_1_args), 1)
+ self.assertEqual(len(self.s.post_run_2_args), 1)
+
+ def test_pre_action_exception(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+ self.s.raise_during_pre_action_1 = "Error during pre 1"
+
+ with self.assertRaises(SystemExit):
+ self.s.run()
+
+ self.assertEqual(len(self.s.pre_run_1_args), 1)
+ self.assertEqual(len(self.s.pre_action_1_args), 1)
+ self.assertEqual(len(self.s.pre_action_2_args), 0)
+ self.assertEqual(len(self.s.post_action_1_args), 1)
+ self.assertEqual(len(self.s.post_action_2_args), 1)
+ self.assertEqual(len(self.s.post_run_1_args), 1)
+ self.assertEqual(len(self.s.post_run_2_args), 1)
+
+ def test_post_action_exception(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+ self.s.raise_during_post_action_1 = "Error during post 1"
+
+ with self.assertRaises(SystemExit):
+ self.s.run()
+
+ self.assertEqual(len(self.s.pre_run_1_args), 1)
+ self.assertEqual(len(self.s.post_action_1_args), 1)
+ self.assertEqual(len(self.s.post_action_2_args), 1)
+ self.assertEqual(len(self.s.post_run_1_args), 1)
+ self.assertEqual(len(self.s.post_run_2_args), 1)
+
+ def test_post_run_exception(self):
+ self.s = BaseScriptWithDecorators(initial_config_file="test/test.json")
+ self.s.raise_during_post_run_1 = "Error during post run 1"
+
+ with self.assertRaises(SystemExit):
+ self.s.run()
+
+ self.assertEqual(len(self.s.post_run_1_args), 1)
+ self.assertEqual(len(self.s.post_run_2_args), 1)
+
+
+# main {{{1
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_base_vcs_mercurial.py b/testing/mozharness/test/test_base_vcs_mercurial.py
new file mode 100644
index 0000000000..f00e0b586c
--- /dev/null
+++ b/testing/mozharness/test/test_base_vcs_mercurial.py
@@ -0,0 +1,396 @@
+import os
+import platform
+import shutil
+import tempfile
+import unittest
+
+import mozharness.base.vcs.mercurial as mercurial
+
+test_string = """foo
+bar
+baz"""
+
+HG = ["hg"] + mercurial.HG_OPTIONS
+
+# Known default .hgrc
+os.environ["HGRCPATH"] = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "helper_files", ".hgrc")
+)
+
+
+def cleanup():
+ if os.path.exists("test_logs"):
+ shutil.rmtree("test_logs")
+ if os.path.exists("test_dir"):
+ if os.path.isdir("test_dir"):
+ shutil.rmtree("test_dir")
+ else:
+ os.remove("test_dir")
+ for filename in ("localconfig.json", "localconfig.json.bak"):
+ if os.path.exists(filename):
+ os.remove(filename)
+
+
+def get_mercurial_vcs_obj():
+ m = mercurial.MercurialVCS()
+ m.config = {}
+ return m
+
+
+def get_revisions(dest):
+ m = get_mercurial_vcs_obj()
+ retval = []
+ command = HG + ["log", "-R", dest, "--template", "{node}\n"]
+ for rev in m.get_output_from_command(command).split("\n"):
+ rev = rev.strip()
+ if not rev:
+ continue
+ retval.append(rev)
+ return retval
+
+
+class TestMakeAbsolute(unittest.TestCase):
+ # _make_absolute() doesn't play nicely with windows/msys paths.
+ # TODO: fix _make_absolute, write it out of the picture, or determine
+ # that it's not needed on windows.
+ if platform.system() not in ("Windows",):
+
+ def test_absolute_path(self):
+ m = get_mercurial_vcs_obj()
+ self.assertEqual(m._make_absolute("/foo/bar"), "/foo/bar")
+
+ def test_relative_path(self):
+ m = get_mercurial_vcs_obj()
+ self.assertEqual(m._make_absolute("foo/bar"), os.path.abspath("foo/bar"))
+
+ def test_HTTP_paths(self):
+ m = get_mercurial_vcs_obj()
+ self.assertEqual(m._make_absolute("http://foo/bar"), "http://foo/bar")
+
+ def test_absolute_file_path(self):
+ m = get_mercurial_vcs_obj()
+ self.assertEqual(m._make_absolute("file:///foo/bar"), "file:///foo/bar")
+
+ def test_relative_file_path(self):
+ m = get_mercurial_vcs_obj()
+ self.assertEqual(
+ m._make_absolute("file://foo/bar"), "file://%s/foo/bar" % os.getcwd()
+ )
+
+
+class TestHg(unittest.TestCase):
+ def _init_hg_repo(self, hg_obj, repodir):
+ hg_obj.run_command(
+ [
+ "bash",
+ os.path.join(
+ os.path.dirname(__file__), "helper_files", "init_hgrepo.sh"
+ ),
+ repodir,
+ ]
+ )
+
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.repodir = os.path.join(self.tmpdir, "repo")
+ m = get_mercurial_vcs_obj()
+ self._init_hg_repo(m, self.repodir)
+ self.revisions = get_revisions(self.repodir)
+ self.wc = os.path.join(self.tmpdir, "wc")
+ self.pwd = os.getcwd()
+
+ def tearDown(self):
+ shutil.rmtree(self.tmpdir)
+ os.chdir(self.pwd)
+
+ def test_get_branch(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc)
+ b = m.get_branch_from_path(self.wc)
+ self.assertEqual(b, "default")
+
+ def test_get_branches(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc)
+ branches = m.get_branches_from_path(self.wc)
+ self.assertEqual(sorted(branches), sorted(["branch2", "default"]))
+
+ def test_clone(self):
+ m = get_mercurial_vcs_obj()
+ rev = m.clone(self.repodir, self.wc, update_dest=False)
+ self.assertEqual(rev, None)
+ self.assertEqual(self.revisions, get_revisions(self.wc))
+ self.assertEqual(sorted(os.listdir(self.wc)), [".hg"])
+
+ def test_clone_into_non_empty_dir(self):
+ m = get_mercurial_vcs_obj()
+ m.mkdir_p(self.wc)
+ open(os.path.join(self.wc, "test.txt"), "w").write("hello")
+ m.clone(self.repodir, self.wc, update_dest=False)
+ self.assertTrue(not os.path.exists(os.path.join(self.wc, "test.txt")))
+
+ def test_clone_update(self):
+ m = get_mercurial_vcs_obj()
+ rev = m.clone(self.repodir, self.wc, update_dest=True)
+ self.assertEqual(rev, self.revisions[0])
+
+ def test_clone_branch(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc, branch="branch2", update_dest=False)
+ # On hg 1.6, we should only have a subset of the revisions
+ if m.hg_ver() >= (1, 6, 0):
+ self.assertEqual(self.revisions[1:], get_revisions(self.wc))
+ else:
+ self.assertEqual(self.revisions, get_revisions(self.wc))
+
+ def test_clone_update_branch(self):
+ m = get_mercurial_vcs_obj()
+ rev = m.clone(
+ self.repodir,
+ os.path.join(self.tmpdir, "wc"),
+ branch="branch2",
+ update_dest=True,
+ )
+ self.assertEqual(rev, self.revisions[1], self.revisions)
+
+ def test_clone_revision(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc, revision=self.revisions[0], update_dest=False)
+ # We'll only get a subset of the revisions
+ self.assertEqual(
+ self.revisions[:1] + self.revisions[2:], get_revisions(self.wc)
+ )
+
+ def test_update_revision(self):
+ m = get_mercurial_vcs_obj()
+ rev = m.clone(self.repodir, self.wc, update_dest=False)
+ self.assertEqual(rev, None)
+
+ rev = m.update(self.wc, revision=self.revisions[1])
+ self.assertEqual(rev, self.revisions[1])
+
+ def test_pull(self):
+ m = get_mercurial_vcs_obj()
+ # Clone just the first rev
+ m.clone(self.repodir, self.wc, revision=self.revisions[-1], update_dest=False)
+ self.assertEqual(get_revisions(self.wc), self.revisions[-1:])
+
+ # Now pull in new changes
+ rev = m.pull(self.repodir, self.wc, update_dest=False)
+ self.assertEqual(rev, None)
+ self.assertEqual(get_revisions(self.wc), self.revisions)
+
+ def test_pull_revision(self):
+ m = get_mercurial_vcs_obj()
+ # Clone just the first rev
+ m.clone(self.repodir, self.wc, revision=self.revisions[-1], update_dest=False)
+ self.assertEqual(get_revisions(self.wc), self.revisions[-1:])
+
+ # Now pull in just the last revision
+ rev = m.pull(
+ self.repodir, self.wc, revision=self.revisions[0], update_dest=False
+ )
+ self.assertEqual(rev, None)
+
+ # We'll be missing the middle revision (on another branch)
+ self.assertEqual(
+ get_revisions(self.wc), self.revisions[:1] + self.revisions[2:]
+ )
+
+ def test_pull_branch(self):
+ m = get_mercurial_vcs_obj()
+ # Clone just the first rev
+ m.clone(self.repodir, self.wc, revision=self.revisions[-1], update_dest=False)
+ self.assertEqual(get_revisions(self.wc), self.revisions[-1:])
+
+ # Now pull in the other branch
+ rev = m.pull(self.repodir, self.wc, branch="branch2", update_dest=False)
+ self.assertEqual(rev, None)
+
+ # On hg 1.6, we'll be missing the last revision (on another branch)
+ if m.hg_ver() >= (1, 6, 0):
+ self.assertEqual(get_revisions(self.wc), self.revisions[1:])
+ else:
+ self.assertEqual(get_revisions(self.wc), self.revisions)
+
+ def test_pull_unrelated(self):
+ m = get_mercurial_vcs_obj()
+ # Create a new repo
+ repo2 = os.path.join(self.tmpdir, "repo2")
+ self._init_hg_repo(m, repo2)
+
+ self.assertNotEqual(self.revisions, get_revisions(repo2))
+
+ # Clone the original repo
+ m.clone(self.repodir, self.wc, update_dest=False)
+ # Hide the wanted error
+ m.config = {"log_to_console": False}
+ # Try and pull in changes from the new repo
+ self.assertRaises(
+ mercurial.VCSException, m.pull, repo2, self.wc, update_dest=False
+ )
+
+ def test_push(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc, revision=self.revisions[-2])
+ m.push(src=self.repodir, remote=self.wc)
+ self.assertEqual(get_revisions(self.wc), self.revisions)
+
+ def test_push_with_branch(self):
+ m = get_mercurial_vcs_obj()
+ if m.hg_ver() >= (1, 6, 0):
+ m.clone(self.repodir, self.wc, revision=self.revisions[-1])
+ m.push(src=self.repodir, remote=self.wc, branch="branch2")
+ m.push(src=self.repodir, remote=self.wc, branch="default")
+ self.assertEqual(get_revisions(self.wc), self.revisions)
+
+ def test_push_with_revision(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc, revision=self.revisions[-2])
+ m.push(src=self.repodir, remote=self.wc, revision=self.revisions[-1])
+ self.assertEqual(get_revisions(self.wc), self.revisions[-2:])
+
+ def test_mercurial(self):
+ m = get_mercurial_vcs_obj()
+ m.vcs_config = {
+ "repo": self.repodir,
+ "dest": self.wc,
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ m.ensure_repo_and_revision()
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[0])
+
+ def test_push_new_branches_not_allowed(self):
+ m = get_mercurial_vcs_obj()
+ m.clone(self.repodir, self.wc, revision=self.revisions[0])
+ # Hide the wanted error
+ m.config = {"log_to_console": False}
+ self.assertRaises(
+ Exception, m.push, self.repodir, self.wc, push_new_branches=False
+ )
+
+ def test_mercurial_relative_dir(self):
+ m = get_mercurial_vcs_obj()
+ repo = os.path.basename(self.repodir)
+ wc = os.path.basename(self.wc)
+ m.vcs_config = {
+ "repo": repo,
+ "dest": wc,
+ "revision": self.revisions[-1],
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ m.chdir(os.path.dirname(self.repodir))
+ try:
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[-1])
+ m.info("Creating test.txt")
+ open(os.path.join(self.wc, "test.txt"), "w").write("hello!")
+
+ m = get_mercurial_vcs_obj()
+ m.vcs_config = {
+ "repo": repo,
+ "dest": wc,
+ "revision": self.revisions[0],
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[0])
+ # Make sure our local file didn't go away
+ self.assertTrue(os.path.exists(os.path.join(self.wc, "test.txt")))
+ finally:
+ m.chdir(self.pwd)
+
+ def test_mercurial_update_tip(self):
+ m = get_mercurial_vcs_obj()
+ m.vcs_config = {
+ "repo": self.repodir,
+ "dest": self.wc,
+ "revision": self.revisions[-1],
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[-1])
+ open(os.path.join(self.wc, "test.txt"), "w").write("hello!")
+
+ m = get_mercurial_vcs_obj()
+ m.vcs_config = {
+ "repo": self.repodir,
+ "dest": self.wc,
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[0])
+ # Make sure our local file didn't go away
+ self.assertTrue(os.path.exists(os.path.join(self.wc, "test.txt")))
+
+ def test_mercurial_update_rev(self):
+ m = get_mercurial_vcs_obj()
+ m.vcs_config = {
+ "repo": self.repodir,
+ "dest": self.wc,
+ "revision": self.revisions[-1],
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[-1])
+ open(os.path.join(self.wc, "test.txt"), "w").write("hello!")
+
+ m = get_mercurial_vcs_obj()
+ m.vcs_config = {
+ "repo": self.repodir,
+ "dest": self.wc,
+ "revision": self.revisions[0],
+ "vcs_share_base": os.path.join(self.tmpdir, "share"),
+ }
+ rev = m.ensure_repo_and_revision()
+ self.assertEqual(rev, self.revisions[0])
+ # Make sure our local file didn't go away
+ self.assertTrue(os.path.exists(os.path.join(self.wc, "test.txt")))
+
+ def test_make_hg_url(self):
+ # construct an hg url specific to revision, branch and filename and try to pull it down
+ file_url = mercurial.make_hg_url(
+ "hg.mozilla.org",
+ "//build/tools/",
+ revision="FIREFOX_3_6_12_RELEASE",
+ filename="/lib/python/util/hg.py",
+ protocol="https",
+ )
+ expected_url = (
+ "https://hg.mozilla.org/build/tools/raw-file/"
+ "FIREFOX_3_6_12_RELEASE/lib/python/util/hg.py"
+ )
+ self.assertEqual(file_url, expected_url)
+
+ def test_make_hg_url_no_filename(self):
+ file_url = mercurial.make_hg_url(
+ "hg.mozilla.org",
+ "/build/tools",
+ revision="default",
+ protocol="https",
+ )
+ expected_url = "https://hg.mozilla.org/build/tools/rev/default"
+ self.assertEqual(file_url, expected_url)
+
+ def test_make_hg_url_no_revision_no_filename(self):
+ repo_url = mercurial.make_hg_url(
+ "hg.mozilla.org",
+ "/build/tools",
+ protocol="https",
+ )
+ expected_url = "https://hg.mozilla.org/build/tools"
+ self.assertEqual(repo_url, expected_url)
+
+ def test_make_hg_url_different_protocol(self):
+ repo_url = mercurial.make_hg_url(
+ "hg.mozilla.org",
+ "/build/tools",
+ protocol="ssh",
+ )
+ expected_url = "ssh://hg.mozilla.org/build/tools"
+ self.assertEqual(repo_url, expected_url)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_l10n_locales.py b/testing/mozharness/test/test_l10n_locales.py
new file mode 100644
index 0000000000..678ab98004
--- /dev/null
+++ b/testing/mozharness/test/test_l10n_locales.py
@@ -0,0 +1,118 @@
+import os
+import shutil
+import unittest
+from unittest import mock
+
+import mozharness.base.script as script
+import mozharness.mozilla.l10n.locales as locales
+
+ALL_LOCALES = ["ar", "be", "de", "es-ES"]
+
+MH_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+
+def cleanup():
+ if os.path.exists("test_logs"):
+ shutil.rmtree("test_logs")
+
+
+class LocalesTest(locales.LocalesMixin, script.BaseScript):
+ def __init__(self, **kwargs):
+ if "config" not in kwargs:
+ kwargs["config"] = {"log_type": "simple", "log_level": "error"}
+ if "initial_config_file" not in kwargs:
+ kwargs["initial_config_file"] = "test/test.json"
+ super(LocalesTest, self).__init__(**kwargs)
+ self.config = {}
+ self.log_obj = None
+
+
+@mock.patch.dict("os.environ", GECKO_PATH="gecko_src")
+class TestLocalesMixin(unittest.TestCase):
+ BASE_ABS_DIRS = {
+ "abs_log_dir",
+ "abs_work_dir",
+ "base_work_dir",
+ "abs_src_dir",
+ "abs_locales_src_dir",
+ "abs_l10n_dir",
+ "abs_obj_dir",
+ "abs_locales_dir",
+ }
+
+ def setUp(self):
+ cleanup()
+
+ def tearDown(self):
+ cleanup()
+
+ def test_query_locales_locales(self):
+ l = LocalesTest()
+ l.locales = ["a", "b", "c"]
+ self.assertEqual(l.locales, l.query_locales())
+
+ def test_query_locales_ignore_locales(self):
+ l = LocalesTest()
+ l.config["locales"] = ["a", "b", "c"]
+ l.config["ignore_locales"] = ["a", "c"]
+ self.assertEqual(["b"], l.query_locales())
+
+ def test_query_locales_config(self):
+ l = LocalesTest()
+ l.config["locales"] = ["a", "b", "c"]
+ self.assertEqual(l.config["locales"], l.query_locales())
+
+ def test_query_locales_json(self):
+ l = LocalesTest()
+ l.config["locales_file"] = os.path.join(
+ MH_DIR, "test/helper_files/locales.json"
+ )
+ l.config["base_work_dir"] = "."
+ l.config["work_dir"] = "."
+ l.config["locales_dir"] = "locales_dir"
+ l.config["objdir"] = "objdir"
+ locales = l.query_locales()
+ locales.sort()
+ self.assertEqual(ALL_LOCALES, locales)
+
+ # Commenting out til we can hide the FATAL ?
+ # def test_query_locales_no_file(self):
+ # l = LocalesTest()
+ # l.config['base_work_dir'] = '.'
+ # l.config['work_dir'] = '.'
+ # try:
+ # l.query_locales()
+ # except SystemExit:
+ # pass # Good
+ # else:
+ # self.assertTrue(False, "query_locales with no file doesn't fatal()!")
+
+ def test_parse_locales_file(self):
+ l = LocalesTest()
+ self.assertEqual(
+ ALL_LOCALES,
+ l.parse_locales_file(os.path.join(MH_DIR, "test/helper_files/locales.txt")),
+ )
+
+ def _get_query_abs_dirs_obj(self):
+ l = LocalesTest()
+ l.config["base_work_dir"] = "base_work_dir"
+ l.config["work_dir"] = "work_dir"
+ l.config["locales_dir"] = "locales_dir"
+ l.config["objdir"] = "objdir"
+ return l
+
+ def test_query_abs_dirs_base(self):
+ l = self._get_query_abs_dirs_obj()
+ dirs = set(l.query_abs_dirs().keys())
+ self.assertEqual(dirs, self.BASE_ABS_DIRS)
+
+ def test_query_abs_dirs_base2(self):
+ l = self._get_query_abs_dirs_obj()
+ l.query_abs_dirs().keys()
+ dirs = set(l.query_abs_dirs().keys())
+ self.assertEqual(dirs, self.BASE_ABS_DIRS)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_mozilla_automation.py b/testing/mozharness/test/test_mozilla_automation.py
new file mode 100644
index 0000000000..e76fec0249
--- /dev/null
+++ b/testing/mozharness/test/test_mozilla_automation.py
@@ -0,0 +1,45 @@
+import gc
+import unittest
+
+import mozharness.base.log as log
+import mozharness.base.script as script
+from mozharness.base.log import ERROR
+from mozharness.mozilla.automation import AutomationMixin
+
+
+class CleanupObj(script.ScriptMixin, log.LogMixin):
+ def __init__(self):
+ super(CleanupObj, self).__init__()
+ self.log_obj = None
+ self.config = {"log_level": ERROR}
+
+
+def cleanup():
+ gc.collect()
+ c = CleanupObj()
+ for f in ("test_logs", "test_dir", "tmpfile_stdout", "tmpfile_stderr"):
+ c.rmtree(f)
+
+
+class AutomationScript(AutomationMixin, script.BaseScript):
+ def __init__(self, **kwargs):
+ super(AutomationScript, self).__init__(**kwargs)
+
+
+# TestAutomationStatus {{{1
+class TestAutomationStatus(unittest.TestCase):
+ # I need a log watcher helper function, here and in test_log.
+ def setUp(self):
+ cleanup()
+ self.s = None
+
+ def tearDown(self):
+ # Close the logfile handles, or windows can't remove the logs
+ if hasattr(self, "s") and isinstance(self.s, object):
+ del self.s
+ cleanup()
+
+
+# main {{{1
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/mozharness/test/test_mozilla_building_buildbase.py b/testing/mozharness/test/test_mozilla_building_buildbase.py
new file mode 100644
index 0000000000..483196d094
--- /dev/null
+++ b/testing/mozharness/test/test_mozilla_building_buildbase.py
@@ -0,0 +1,146 @@
+import os
+import unittest
+
+from mozharness.base.log import LogMixin
+from mozharness.base.script import ScriptMixin
+from mozharness.mozilla.building.buildbase import MozconfigPathError, get_mozconfig_path
+
+
+class FakeLogger(object):
+ def log_message(self, *args, **kwargs):
+ pass
+
+
+class FakeScriptMixin(LogMixin, ScriptMixin, object):
+ def __init__(self):
+ self.script_obj = self
+ self.log_obj = FakeLogger()
+
+
+class TestMozconfigPath(unittest.TestCase):
+ """
+ Tests for :func:`get_mozconfig_path`.
+ """
+
+ def test_path(self):
+ """
+ Passing just ``src_mozconfig`` gives that file in ``abs_src_dir``.
+ """
+ script = FakeScriptMixin()
+
+ abs_src_path = get_mozconfig_path(
+ script,
+ config={"src_mozconfig": "path/to/mozconfig"},
+ dirs={"abs_src_dir": "/src"},
+ )
+ self.assertEqual(abs_src_path, "/src/path/to/mozconfig")
+
+ def test_composite(self):
+ """
+ Passing ``app_name``, ``mozconfig_platform``, and ``mozconfig_variant``
+ find the file in the ``config/mozconfigs`` subdirectory of that app
+ directory.
+ """
+ script = FakeScriptMixin()
+
+ config = {
+ "app_name": "the-app",
+ "mozconfig_variant": "variant",
+ "mozconfig_platform": "platform9000",
+ }
+ abs_src_path = get_mozconfig_path(
+ script,
+ config=config,
+ dirs={"abs_src_dir": "/src"},
+ )
+ self.assertEqual(
+ abs_src_path,
+ "/src/the-app/config/mozconfigs/platform9000/variant",
+ )
+
+ def test_manifest(self):
+ """
+ Passing just ``src_mozconfig_manifest`` looks in that file in
+ ``abs_work_dir``, and finds the mozconfig file specified there in
+ ``abs_src_dir``.
+ """
+ script = FakeScriptMixin()
+
+ test_dir = os.path.dirname(__file__)
+ config = {"src_mozconfig_manifest": "helper_files/mozconfig_manifest.json"}
+ abs_src_path = get_mozconfig_path(
+ script,
+ config=config,
+ dirs={
+ "abs_src_dir": "/src",
+ "abs_work_dir": test_dir,
+ },
+ )
+ self.assertEqual(abs_src_path, "/src/path/to/mozconfig")
+
+ def test_errors(self):
+ script = FakeScriptMixin()
+
+ configs = [
+ # Not specifying any parts of a mozconfig path
+ {},
+ # Specifying both src_mozconfig and src_mozconfig_manifest
+ {"src_mozconfig": "path", "src_mozconfig_manifest": "path"},
+ # Specifying src_mozconfig with some or all of a composite
+ # mozconfig path
+ {
+ "src_mozconfig": "path",
+ "app_name": "app",
+ "mozconfig_platform": "platform",
+ "mozconfig_variant": "variant",
+ },
+ {
+ "src_mozconfig": "path",
+ "mozconfig_platform": "platform",
+ "mozconfig_variant": "variant",
+ },
+ {
+ "src_mozconfig": "path",
+ "app_name": "app",
+ "mozconfig_variant": "variant",
+ },
+ {
+ "src_mozconfig": "path",
+ "app_name": "app",
+ "mozconfig_platform": "platform",
+ },
+ # Specifying src_mozconfig_manifest with some or all of a composite
+ # mozconfig path
+ {
+ "src_mozconfig_manifest": "path",
+ "app_name": "app",
+ "mozconfig_platform": "platform",
+ "mozconfig_variant": "variant",
+ },
+ {
+ "src_mozconfig_manifest": "path",
+ "mozconfig_platform": "platform",
+ "mozconfig_variant": "variant",
+ },
+ {
+ "src_mozconfig_manifest": "path",
+ "app_name": "app",
+ "mozconfig_variant": "variant",
+ },
+ {
+ "src_mozconfig_manifest": "path",
+ "app_name": "app",
+ "mozconfig_platform": "platform",
+ },
+ # Specifying only some parts of a compsite mozconfig path
+ {"mozconfig_platform": "platform", "mozconfig_variant": "variant"},
+ {"app_name": "app", "mozconfig_variant": "variant"},
+ {"app_name": "app", "mozconfig_platform": "platform"},
+ {"app_name": "app"},
+ {"mozconfig_variant": "variant"},
+ {"mozconfig_platform": "platform"},
+ ]
+
+ for config in configs:
+ with self.assertRaises(MozconfigPathError):
+ get_mozconfig_path(script, config=config, dirs={})
diff --git a/testing/mozharness/test/test_mozilla_merkle.py b/testing/mozharness/test/test_mozilla_merkle.py
new file mode 100644
index 0000000000..226499142f
--- /dev/null
+++ b/testing/mozharness/test/test_mozilla_merkle.py
@@ -0,0 +1,134 @@
+import codecs
+import hashlib
+import random
+import unittest
+
+from mozharness.mozilla.merkle import InclusionProof, MerkleTree
+
+decode_hex = codecs.getdecoder("hex_codec")
+encode_hex = codecs.getencoder("hex_codec")
+
+# Pre-computed tree on 7 inputs
+#
+# ______F_____
+# / \
+# __D__ _E_
+# / \ / \
+# A B C |
+# / \ / \ / \ |
+# 0 1 2 3 4 5 6
+hash_fn = hashlib.sha256
+
+data = [
+ decode_hex("fbc459361fc111024c6d1fd83d23a9ff")[0],
+ decode_hex("ae3a44925afec860451cd8658b3cadde")[0],
+ decode_hex("418903fe6ef29fc8cab93d778a7b018b")[0],
+ decode_hex("3d1c53c00b2e137af8c4c23a06388c6b")[0],
+ decode_hex("e656ebd8e2758bc72599e5896be357be")[0],
+ decode_hex("81aae91cf90be172eedd1c75c349bf9e")[0],
+ decode_hex("00c262edf8b0bc345aca769e8733e25e")[0],
+]
+
+leaves = [
+ decode_hex("5cb551f87797381a24a5359a986e2cef25b1f2113b387197fe48e8babc9ad5c7")[0],
+ decode_hex("9899dc0be00306bda2a8e69cec32525ca6244f132479bcf840d8c1bc8bdfbff2")[0],
+ decode_hex("fdd27d0393e32637b474efb9b3efad29568c3ec9b091fdda40fd57ec9196f06d")[0],
+ decode_hex("c87292a6c8528c2a0679b6c1eefb47e4dbac7840d23645d5b7cb47cf1a8d365f")[0],
+ decode_hex("2ff3bdac9bec3580b82da8a357746f15919414d9cbe517e2dd96910c9814c30c")[0],
+ decode_hex("883e318240eccc0e2effafebdb0fd4fd26d0996da1b01439566cb9babef8725f")[0],
+ decode_hex("bb13dfb7b202a95f241ea1715c8549dc048d9936ec747028002f7c795de72fcf")[0],
+]
+
+nodeA = decode_hex("06447a7baa079cb0b4b6119d0f575bec508915403fdc30923eba982b63759805")[
+ 0
+]
+nodeB = decode_hex("3db98027c655ead4fe897bef3a4b361839a337941a9e624b475580c9d4e882ee")[
+ 0
+]
+nodeC = decode_hex("17524f8b0169b2745c67846925d55449ae80a8022ef8189dcf4cbb0ec7fcc470")[
+ 0
+]
+nodeD = decode_hex("380d0dc6fd7d4f37859a12dbfc7171b3cce29ab0688c6cffd2b15f3e0b21af49")[
+ 0
+]
+nodeE = decode_hex("3a9c2886a5179a6e1948876034f99d52a8f393f47a09887adee6d1b4a5c5fbd6")[
+ 0
+]
+nodeF = decode_hex("d1a0d3947db4ae8305f2ac32985957e02659b2ea3c10da52a48d2526e9af3bbc")[
+ 0
+]
+
+proofs = [
+ [leaves[1], nodeB, nodeE],
+ [leaves[0], nodeB, nodeE],
+ [leaves[3], nodeA, nodeE],
+ [leaves[2], nodeA, nodeE],
+ [leaves[5], leaves[6], nodeD],
+ [leaves[4], leaves[6], nodeD],
+ [nodeC, nodeD],
+]
+
+known_proof5 = decode_hex(
+ "020000"
+ + "0000000000000007"
+ + "0000000000000005"
+ + "0063"
+ + "20"
+ + encode_hex(leaves[4])[0].decode()
+ + "20"
+ + encode_hex(leaves[6])[0].decode()
+ + "20"
+ + encode_hex(nodeD)[0].decode()
+)[0]
+
+
+class TestMerkleTree(unittest.TestCase):
+ def testPreComputed(self):
+ tree = MerkleTree(hash_fn, data)
+ head = tree.head()
+ self.assertEqual(head, nodeF)
+
+ for i in range(len(data)):
+ proof = tree.inclusion_proof(i)
+
+ self.assertTrue(proof.verify(hash_fn, data[i], i, len(data), head))
+ self.assertEqual(proof.leaf_index, i)
+ self.assertEqual(proof.tree_size, tree.n)
+ self.assertEqual(proof.path_elements, proofs[i])
+
+ def testInclusionProofEncodeDecode(self):
+ tree = MerkleTree(hash_fn, data)
+
+ # Inclusion proof encode/decode round trip test
+ proof5 = tree.inclusion_proof(5)
+ serialized5 = proof5.to_rfc6962_bis()
+ deserialized5 = InclusionProof.from_rfc6962_bis(serialized5)
+ reserialized5 = deserialized5.to_rfc6962_bis()
+ self.assertEqual(serialized5, reserialized5)
+
+ # Inclusion proof encode known answer test
+ serialized5 = proof5.to_rfc6962_bis()
+ self.assertEqual(serialized5, known_proof5)
+
+ # Inclusion proof decode known answer test
+ known_deserialized5 = InclusionProof.from_rfc6962_bis(known_proof5)
+ self.assertEqual(proof5.leaf_index, known_deserialized5.leaf_index)
+ self.assertEqual(proof5.tree_size, known_deserialized5.tree_size)
+ self.assertEqual(proof5.path_elements, known_deserialized5.path_elements)
+
+ def testLargeTree(self):
+ TEST_SIZE = 5000
+ ELEM_SIZE_BYTES = 16
+ data = [
+ bytearray(random.getrandbits(8) for _ in range(ELEM_SIZE_BYTES))
+ for _ in range(TEST_SIZE)
+ ]
+ tree = MerkleTree(hash_fn, data)
+ head = tree.head()
+
+ for i in range(len(data)):
+ proof = tree.inclusion_proof(i)
+
+ self.assertTrue(proof.verify(hash_fn, data[i], i, len(data), head))
+ self.assertEqual(proof.leaf_index, i)
+ self.assertEqual(proof.tree_size, tree.n)
diff --git a/testing/mozharness/test/test_mozilla_structured.py b/testing/mozharness/test/test_mozilla_structured.py
new file mode 100644
index 0000000000..8bc30d5f8c
--- /dev/null
+++ b/testing/mozharness/test/test_mozilla_structured.py
@@ -0,0 +1,68 @@
+import unittest
+
+from mozharness.base.log import INFO, WARNING
+from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WARNING
+from mozharness.mozilla.mozbase import MozbaseMixin
+from mozharness.mozilla.structuredlog import StructuredOutputParser
+from mozlog.handlers.statushandler import RunSummary
+
+success_summary = RunSummary(
+ unexpected_statuses={},
+ expected_statuses={"PASS": 3, "OK": 1, "FAIL": 1},
+ known_intermittent_statuses={"FAIL": 1},
+ log_level_counts={"info": 5},
+ action_counts={"test_status": 4, "test_end": 1, "suite_end": 1},
+)
+
+failure_summary = RunSummary(
+ unexpected_statuses={"FAIL": 2},
+ expected_statuses={"PASS": 2, "OK": 1},
+ known_intermittent_statuses={},
+ log_level_counts={"warning": 2, "info": 3},
+ action_counts={"test_status": 3, "test_end": 2, "suite_end": 1},
+)
+
+
+class TestParser(MozbaseMixin, StructuredOutputParser):
+ def __init__(self, *args, **kwargs):
+ super(TestParser, self).__init__(*args, **kwargs)
+ self.config = {}
+
+
+class TestStructuredOutputParser(unittest.TestCase):
+ def setUp(self):
+ self.parser = TestParser()
+
+ def test_evaluate_parser_success(self):
+ self.parser.handler.expected_statuses = {"PASS": 3, "OK": 1, "FAIL": 1}
+ self.parser.handler.log_level_counts = {"info": 5}
+ self.parser.handler.action_counts = {
+ "test_status": 4,
+ "test_end": 1,
+ "suite_end": 1,
+ }
+ self.parser.handler.known_intermittent_statuses = {"FAIL": 1}
+ result = self.parser.evaluate_parser(
+ return_code=TBPL_SUCCESS, success_codes=[TBPL_SUCCESS]
+ )
+ tbpl_status, worst_log_level, joined_summary = result
+ self.assertEqual(tbpl_status, TBPL_SUCCESS)
+ self.assertEqual(worst_log_level, INFO)
+ self.assertEqual(joined_summary, success_summary)
+
+ def test_evaluate_parser_failure(self):
+ self.parser.handler.unexpected_statuses = {"FAIL": 2}
+ self.parser.handler.expected_statuses = {"PASS": 2, "OK": 1}
+ self.parser.handler.log_level_counts = {"warning": 2, "info": 3}
+ self.parser.handler.action_counts = {
+ "test_status": 3,
+ "test_end": 2,
+ "suite_end": 1,
+ }
+ result = self.parser.evaluate_parser(
+ return_code=TBPL_SUCCESS, success_codes=[TBPL_SUCCESS]
+ )
+ tbpl_status, worst_log_level, joined_summary = result
+ self.assertEqual(tbpl_status, TBPL_WARNING)
+ self.assertEqual(worst_log_level, WARNING)
+ self.assertEqual(joined_summary, failure_summary)
diff --git a/testing/mozharness/tox.ini b/testing/mozharness/tox.ini
new file mode 100644
index 0000000000..32e5963f91
--- /dev/null
+++ b/testing/mozharness/tox.ini
@@ -0,0 +1,26 @@
+[tox]
+envlist = py39-hg5.2
+
+[base]
+deps =
+ coverage
+ distro
+ nose
+ rednose
+ PyYAML==6.0
+ {toxinidir}/../mozbase/mozlog
+mozbase = {toxinidir}/../mozbase/
+
+
+[testenv]
+setenv =
+ HGRCPATH = {toxinidir}/test/hgrc
+ PYTHONPATH = $PYTHONPATH:{[base]mozbase}/manifestparser:{[base]mozbase}/mozfile:{[base]mozbase}/mozinfo:{[base]mozbase}/mozprocess
+
+commands =
+ coverage run --source configs,mozharness,scripts --branch {envbindir}/nosetests -v --with-xunit --rednose --force-color {posargs}
+
+[testenv:py39-hg5.2]
+deps =
+ {[base]deps}
+ mercurial==5.2.1
diff --git a/testing/mozharness/unit.sh b/testing/mozharness/unit.sh
new file mode 100755
index 0000000000..1b96f495f2
--- /dev/null
+++ b/testing/mozharness/unit.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+###########################################################################
+# This requires coverage and nosetests:
+#
+# pip install -r requirements.txt
+#
+# test_base_vcs_mercurial.py requires hg >= 1.6.0 with mq, rebase, share
+# extensions to fully test.
+###########################################################################
+
+COVERAGE_ARGS="--omit='/usr/*,/opt/*'"
+OS_TYPE='linux'
+uname -v | grep -q Darwin
+if [ $? -eq 0 ] ; then
+ OS_TYPE='osx'
+ COVERAGE_ARGS="--omit='/Library/*,/usr/*,/opt/*'"
+fi
+uname -s | egrep -q MINGW32 # Cygwin will be linux in this case?
+if [ $? -eq 0 ] ; then
+ OS_TYPE='windows'
+fi
+NOSETESTS=`env which nosetests`
+
+echo "### Finding mozharness/ .py files..."
+files=`find mozharness -name [a-z]\*.py`
+if [ $OS_TYPE == 'windows' ] ; then
+ MOZHARNESS_PY_FILES=""
+ for f in $files; do
+ file $f | grep -q "Assembler source"
+ if [ $? -ne 0 ] ; then
+ MOZHARNESS_PY_FILES="$MOZHARNESS_PY_FILES $f"
+ fi
+ done
+else
+ MOZHARNESS_PY_FILES=$files
+fi
+echo "### Finding scripts/ .py files..."
+files=`find scripts -name [a-z]\*.py`
+if [ $OS_TYPE == 'windows' ] ; then
+ SCRIPTS_PY_FILES=""
+ for f in $files; do
+ file $f | grep -q "Assembler source"
+ if [ $? -ne 0 ] ; then
+ SCRIPTS_PY_FILES="$SCRIPTS_PY_FILES $f"
+ fi
+ done
+else
+ SCRIPTS_PY_FILES=$files
+fi
+export PYTHONPATH=`env pwd`:$PYTHONPATH
+
+echo "### Running pyflakes"
+pyflakes $MOZHARNESS_PY_FILES $SCRIPTS_PY_FILES | grep -v "local variable 'url' is assigned to" | grep -v "redefinition of unused 'json'"
+
+echo "### Running pylint"
+pylint -E -e F -f parseable $MOZHARNESS_PY_FILES $SCRIPTS_PY_FILES 2>&1 | egrep -v '(No config file found, using default configuration|Instance of .* has no .* member|Unable to import .devicemanager|Undefined variable .DMError|Module .hashlib. has no .sha512. member)'
+
+rm -rf build logs
+if [ $OS_TYPE != 'windows' ] ; then
+ echo "### Testing non-networked unit tests"
+ coverage run -a --branch $COVERAGE_ARGS $NOSETESTS test/test_*.py
+ echo "### Running *.py [--list-actions]"
+ for filename in $MOZHARNESS_PY_FILES; do
+ coverage run -a --branch $COVERAGE_ARGS $filename
+ done
+ for filename in $SCRIPTS_PY_FILES ; do
+ coverage run -a --branch $COVERAGE_ARGS $filename --list-actions > /dev/null
+ done
+ echo "### Running scripts/configtest.py --log-level warning"
+ coverage run -a --branch $COVERAGE_ARGS scripts/configtest.py --log-level warning
+
+ echo "### Creating coverage html"
+ coverage html $COVERAGE_ARGS -d coverage.new
+ if [ -e coverage ] ; then
+ mv coverage coverage.old
+ mv coverage.new coverage
+ rm -rf coverage.old
+ else
+ mv coverage.new coverage
+ fi
+else
+ echo "### Running nosetests..."
+ nosetests test/
+fi
+rm -rf build logs