summaryrefslogtreecommitdiffstats
path: root/taskcluster/scripts
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /taskcluster/scripts
parentInitial commit. (diff)
downloadthunderbird-upstream.tar.xz
thunderbird-upstream.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'taskcluster/scripts')
-rwxr-xr-xtaskcluster/scripts/builder/build-haz-linux.sh184
-rwxr-xr-xtaskcluster/scripts/builder/build-l10n.sh90
-rwxr-xr-xtaskcluster/scripts/builder/build-linux.sh125
-rwxr-xr-xtaskcluster/scripts/builder/build-sm-package.sh35
-rwxr-xr-xtaskcluster/scripts/builder/build-sm.sh64
-rwxr-xr-xtaskcluster/scripts/builder/repackage.sh95
-rwxr-xr-xtaskcluster/scripts/copy.sh9
-rw-r--r--taskcluster/scripts/misc/afl-nyx.patch847
-rwxr-xr-xtaskcluster/scripts/misc/android-gradle-dependencies-lite.sh21
-rwxr-xr-xtaskcluster/scripts/misc/android-gradle-dependencies.sh21
-rwxr-xr-xtaskcluster/scripts/misc/android-gradle-dependencies/after.sh32
-rwxr-xr-xtaskcluster/scripts/misc/android-gradle-dependencies/before.sh30
-rw-r--r--taskcluster/scripts/misc/android-gradle-dependencies/nexus.xml413
-rw-r--r--taskcluster/scripts/misc/are-we-esmified-yet.py193
-rwxr-xr-xtaskcluster/scripts/misc/browsertime.sh19
-rwxr-xr-xtaskcluster/scripts/misc/build-afl.sh24
-rwxr-xr-xtaskcluster/scripts/misc/build-binutils-linux.sh14
-rwxr-xr-xtaskcluster/scripts/misc/build-breakpad-injector.sh30
-rwxr-xr-xtaskcluster/scripts/misc/build-cctools-port.sh100
-rwxr-xr-xtaskcluster/scripts/misc/build-clang-mingw.sh254
-rwxr-xr-xtaskcluster/scripts/misc/build-clang-tidy-external.sh11
-rwxr-xr-xtaskcluster/scripts/misc/build-clang.sh72
-rwxr-xr-xtaskcluster/scripts/misc/build-compiler-rt-wasi.sh32
-rwxr-xr-xtaskcluster/scripts/misc/build-compiler-rt.sh39
-rwxr-xr-xtaskcluster/scripts/misc/build-cpython.sh70
-rwxr-xr-xtaskcluster/scripts/misc/build-custom-car-linux.sh65
-rw-r--r--taskcluster/scripts/misc/build-custom-car-win64.sh102
-rwxr-xr-xtaskcluster/scripts/misc/build-custom-v8.sh47
-rwxr-xr-xtaskcluster/scripts/misc/build-dist-toolchains.sh12
-rwxr-xr-xtaskcluster/scripts/misc/build-dmg-hfsplus.sh36
-rwxr-xr-xtaskcluster/scripts/misc/build-gcc-linux.sh28
-rwxr-xr-xtaskcluster/scripts/misc/build-gcc-sixgill-plugin-linux.sh80
-rwxr-xr-xtaskcluster/scripts/misc/build-geckodriver.sh60
-rwxr-xr-xtaskcluster/scripts/misc/build-gn-common.sh36
-rwxr-xr-xtaskcluster/scripts/misc/build-gn-linux.sh13
-rwxr-xr-xtaskcluster/scripts/misc/build-gn-macosx.sh23
-rwxr-xr-xtaskcluster/scripts/misc/build-gn-win64.sh16
-rwxr-xr-xtaskcluster/scripts/misc/build-hfsplus-linux.sh14
-rwxr-xr-xtaskcluster/scripts/misc/build-libunwind.sh23
-rwxr-xr-xtaskcluster/scripts/misc/build-llvm-common.sh206
-rwxr-xr-xtaskcluster/scripts/misc/build-llvm-symbolizer.sh5
-rwxr-xr-xtaskcluster/scripts/misc/build-mar-tools.sh28
-rwxr-xr-xtaskcluster/scripts/misc/build-mingw-fxc2-x86.sh25
-rwxr-xr-xtaskcluster/scripts/misc/build-mingw32-nsis.sh70
-rwxr-xr-xtaskcluster/scripts/misc/build-mkbom-linux.sh17
-rwxr-xr-xtaskcluster/scripts/misc/build-mozmake.sh66
-rwxr-xr-xtaskcluster/scripts/misc/build-msix-packaging.sh33
-rwxr-xr-xtaskcluster/scripts/misc/build-nasm.sh63
-rwxr-xr-xtaskcluster/scripts/misc/build-nsis.sh32
-rwxr-xr-xtaskcluster/scripts/misc/build-pkgconf.sh44
-rwxr-xr-xtaskcluster/scripts/misc/build-resourcemonitor.sh40
-rwxr-xr-xtaskcluster/scripts/misc/build-rust-based-toolchain.sh66
-rwxr-xr-xtaskcluster/scripts/misc/build-sysroot-wasi.sh46
-rwxr-xr-xtaskcluster/scripts/misc/build-sysroot.sh127
-rwxr-xr-xtaskcluster/scripts/misc/build-upx.sh26
-rwxr-xr-xtaskcluster/scripts/misc/build-winchecksec.sh54
-rwxr-xr-xtaskcluster/scripts/misc/build-wine.sh29
-rwxr-xr-xtaskcluster/scripts/misc/build-xar-linux.sh18
-rw-r--r--taskcluster/scripts/misc/fetch-chromium.py235
-rwxr-xr-xtaskcluster/scripts/misc/fetch-content881
-rwxr-xr-xtaskcluster/scripts/misc/get_vs.py111
-rw-r--r--taskcluster/scripts/misc/mingw-composition.patch50
-rw-r--r--taskcluster/scripts/misc/mingw-dispatchqueue.patch157
-rw-r--r--taskcluster/scripts/misc/mingw-dwrite_3.patch87
-rw-r--r--taskcluster/scripts/misc/mingw-enum.patch25
-rw-r--r--taskcluster/scripts/misc/mingw-ts_sd.patch33
-rw-r--r--taskcluster/scripts/misc/mingw-unknown.patch46
-rw-r--r--taskcluster/scripts/misc/mingw-widl.patch35
-rw-r--r--taskcluster/scripts/misc/moz.build8
-rwxr-xr-xtaskcluster/scripts/misc/osx-cross-linker8
-rwxr-xr-xtaskcluster/scripts/misc/pack-cpython.sh36
-rwxr-xr-xtaskcluster/scripts/misc/pack.sh24
-rwxr-xr-xtaskcluster/scripts/misc/private_local_toolchain.sh14
-rwxr-xr-xtaskcluster/scripts/misc/repack-android-avd-linux.sh30
-rwxr-xr-xtaskcluster/scripts/misc/repack-android-emulator-linux.sh21
-rwxr-xr-xtaskcluster/scripts/misc/repack-android-ndk-linux.sh17
-rwxr-xr-xtaskcluster/scripts/misc/repack-android-sdk-linux.sh15
-rwxr-xr-xtaskcluster/scripts/misc/repack-android-system-images-linux.sh17
-rwxr-xr-xtaskcluster/scripts/misc/repack-clang.sh52
-rwxr-xr-xtaskcluster/scripts/misc/repack-jdk-linux.sh17
-rwxr-xr-xtaskcluster/scripts/misc/repack-node.sh14
-rwxr-xr-xtaskcluster/scripts/misc/repack_rust.py647
-rwxr-xr-xtaskcluster/scripts/misc/run-profileserver-macos.sh20
-rwxr-xr-xtaskcluster/scripts/misc/run-profileserver.sh42
-rwxr-xr-xtaskcluster/scripts/misc/source-test-clang-setup.sh27
-rwxr-xr-xtaskcluster/scripts/misc/source-test-common.sh16
-rwxr-xr-xtaskcluster/scripts/misc/source-test-infer-setup.sh18
-rw-r--r--taskcluster/scripts/misc/summarize-tgdiff.py54
-rw-r--r--taskcluster/scripts/misc/tooltool-download.sh21
-rwxr-xr-xtaskcluster/scripts/misc/unify.sh42
-rw-r--r--taskcluster/scripts/misc/unpack-sdk.py87
-rw-r--r--taskcluster/scripts/misc/verify-devtools-bundle.py85
-rw-r--r--taskcluster/scripts/misc/vs-cleanup.sh13
-rw-r--r--taskcluster/scripts/misc/vs-setup.sh42
-rwxr-xr-xtaskcluster/scripts/misc/wr-cargotest-macos-build.sh24
-rwxr-xr-xtaskcluster/scripts/misc/wr-macos-cross-build-setup.sh60
-rwxr-xr-xtaskcluster/scripts/misc/wrench-android-build.sh26
-rwxr-xr-xtaskcluster/scripts/misc/wrench-deps-vendoring.sh30
-rwxr-xr-xtaskcluster/scripts/misc/wrench-macos-build.sh58
-rw-r--r--taskcluster/scripts/misc/wrench-windows-tests.sh28
-rwxr-xr-xtaskcluster/scripts/misc/zstdpy79
-rwxr-xr-xtaskcluster/scripts/run-task1005
-rwxr-xr-xtaskcluster/scripts/tester/run-wizard176
-rwxr-xr-xtaskcluster/scripts/tester/test-linux.sh287
104 files changed, 9194 insertions, 0 deletions
diff --git a/taskcluster/scripts/builder/build-haz-linux.sh b/taskcluster/scripts/builder/build-haz-linux.sh
new file mode 100755
index 0000000000..b8253037bf
--- /dev/null
+++ b/taskcluster/scripts/builder/build-haz-linux.sh
@@ -0,0 +1,184 @@
+#!/bin/bash -ex
+
+function usage() {
+ echo "Usage: $0 [--project <js|browser>] <workspace-dir> flags..."
+ echo "flags are treated the same way as a commit message would be"
+ echo "(as in, they are scanned for directives just like a try: ... line)"
+}
+
+PROJECT=js
+WORKSPACE=
+while [[ $# -gt 0 ]]; do
+ if [[ "$1" == "-h" ]] || [[ "$1" == "--help" ]]; then
+ usage
+ exit 0
+ elif [[ "$1" == "--project" ]]; then
+ shift
+ PROJECT="$1"
+ shift
+ elif [[ "$1" == "--no-tooltool" ]]; then
+ shift
+ elif [[ -z "$WORKSPACE" ]]; then
+ WORKSPACE=$( cd "$1" && pwd )
+ shift
+ break
+ fi
+done
+
+function check_commit_msg () {
+ ( set +e;
+ if [[ -n "$AUTOMATION" ]]; then
+ hg --cwd "$GECKO_PATH" log -r. --template '{desc}\n' | grep -F -q -- "$1"
+ else
+ echo -- "$SCRIPT_FLAGS" | grep -F -q -- "$1"
+ fi
+ )
+}
+
+if check_commit_msg "--dep"; then
+ HAZ_DEP=1
+fi
+
+SCRIPT_FLAGS=$*
+
+ANALYSIS_DIR="$WORKSPACE/haz-$PROJECT"
+
+# Ensure all the scripts in this dir are on the path....
+DIRNAME=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+PATH=$DIRNAME:$PATH
+
+# Use GECKO_BASE_REPOSITORY as a signal for whether we are running in automation.
+export AUTOMATION=${GECKO_BASE_REPOSITORY:+1}
+
+: "${GECKO_PATH:="$DIRNAME"/../../..}"
+
+if ! [ -d "$GECKO_PATH" ]; then
+ echo "GECKO_PATH must be set to a directory containing a gecko source checkout" >&2
+ exit 1
+fi
+
+# Directory to hold the compiled JS shell that will run the analysis.
+HAZARD_SHELL_OBJDIR=$WORKSPACE/obj-haz-shell
+
+export NO_MERCURIAL_SETUP_CHECK=1
+
+if [[ "$PROJECT" = "browser" ]]; then (
+ cd "$WORKSPACE"
+ set "$WORKSPACE"
+ # Mozbuild config:
+ export MOZBUILD_STATE_PATH=$WORKSPACE/mozbuild/
+ # Create .mozbuild so mach doesn't complain about this
+ mkdir -p "$MOZBUILD_STATE_PATH"
+) fi
+
+# Build the shell
+export HAZARD_SHELL_OBJDIR # This will be picked up by mozconfig.haz_shell.
+$GECKO_PATH/mach hazards build-shell
+
+# Run a self-test
+$GECKO_PATH/mach hazards self-test --shell-objdir="$HAZARD_SHELL_OBJDIR"
+
+# Artifacts folder is outside of the cache.
+mkdir -p "$HOME"/artifacts/ || true
+
+function grab_artifacts () {
+ local artifacts
+ artifacts="$HOME/artifacts"
+
+ [ -d "$ANALYSIS_DIR" ] && (
+ cd "$ANALYSIS_DIR"
+ ls -lah
+
+ # Do not error out if no files found
+ shopt -s nullglob
+ set +e
+ local important
+ important=(refs.txt unnecessary.txt hazards.txt gcFunctions.txt allFunctions.txt heapWriteHazards.txt rootingHazards.json hazards.html)
+
+ # Bundle up the less important but still useful intermediate outputs,
+ # just to cut down on the clutter in treeherder's Job Details pane.
+ tar -acvf "${artifacts}/hazardIntermediates.tar.xz" --exclude-from <(IFS=$'\n'; echo "${important[*]}") *.txt *.lst build_xgill.log
+
+ # Upload the important outputs individually, so that they will be
+ # visible in Job Details and accessible to automated jobs.
+ for f in "${important[@]}"; do
+ gzip -9 -c "$f" > "${artifacts}/$f.gz"
+ done
+
+ # Check whether the user requested .xdb file upload in the top commit comment
+ if check_commit_msg "--upload-xdbs"; then
+ HAZ_UPLOAD_XDBS=1
+ fi
+
+ if [ -n "$HAZ_UPLOAD_XDBS" ]; then
+ for f in *.xdb; do
+ xz -c "$f" > "${artifacts}/$f.bz2"
+ done
+ fi
+ )
+}
+
+function check_hazards () {
+ (
+ set +e
+ NUM_HAZARDS=$(grep -c 'Function.*has unrooted.*live across GC call' "$1"/hazards.txt)
+ NUM_UNSAFE=$(grep -c '^Function.*takes unsafe address of unrooted' "$1"/refs.txt)
+ NUM_UNNECESSARY=$(grep -c '^Function.* has unnecessary root' "$1"/unnecessary.txt)
+ NUM_DROPPED=$(grep -c '^Dropped CFG' "$1"/build_xgill.log)
+ NUM_WRITE_HAZARDS=$(perl -lne 'print $1 if m!found (\d+)/\d+ allowed errors!' "$1"/heapWriteHazards.txt)
+ NUM_MISSING=$(grep -c '^Function.*expected hazard.*but none were found' "$1"/hazards.txt)
+
+ set +x
+ echo "TinderboxPrint: rooting hazards<br/>$NUM_HAZARDS"
+ echo "TinderboxPrint: (unsafe references to unrooted GC pointers)<br/>$NUM_UNSAFE"
+ echo "TinderboxPrint: (unnecessary roots)<br/>$NUM_UNNECESSARY"
+ echo "TinderboxPrint: missing expected hazards<br/>$NUM_MISSING"
+ echo "TinderboxPrint: heap write hazards<br/>$NUM_WRITE_HAZARDS"
+
+ # Display errors in a way that will get picked up by the taskcluster scraper.
+ perl -lne 'print "TEST-UNEXPECTED-FAIL | hazards | $1 $2" if /^Function.* has (unrooted .*live across GC call).* (at .*)$/' "$1"/hazards.txt
+
+ exit_status=0
+
+ if [ $NUM_HAZARDS -gt 0 ]; then
+ echo "TEST-UNEXPECTED-FAIL | hazards | $NUM_HAZARDS rooting hazards detected" >&2
+ echo "TinderboxPrint: documentation<br/><a href='https://wiki.mozilla.org/Javascript:Hazard_Builds#Diagnosing_a_rooting_hazards_failure'>static rooting hazard analysis failures</a>, visit \"Inspect Task\" link for hazard details"
+ exit_status=1
+ fi
+
+ if [ $NUM_MISSING -gt 0 ]; then
+ echo "TEST-UNEXPECTED-FAIL | hazards | $NUM_MISSING expected hazards went undetected" >&2
+ echo "TinderboxPrint: documentation<br/><a href='https://wiki.mozilla.org/Javascript:Hazard_Builds#Diagnosing_a_rooting_hazards_failure'>static rooting hazard analysis failures</a>, visit \"Inspect Task\" link for hazard details"
+ exit_status=1
+ fi
+
+ NUM_ALLOWED_WRITE_HAZARDS=0
+ if [ $NUM_WRITE_HAZARDS -gt $NUM_ALLOWED_WRITE_HAZARDS ]; then
+ echo "TEST-UNEXPECTED-FAIL | heap-write-hazards | $NUM_WRITE_HAZARDS heap write hazards detected out of $NUM_ALLOWED_WRITE_HAZARDS allowed" >&2
+ echo "TinderboxPrint: documentation<br/><a href='https://wiki.mozilla.org/Javascript:Hazard_Builds#Diagnosing_a_heap_write_hazard_failure'>heap write hazard analysis failures</a>, visit \"Inspect Task\" link for hazard details"
+ exit_status = 1
+ fi
+
+ if [ $NUM_DROPPED -gt 0 ]; then
+ echo "TEST-UNEXPECTED-FAIL | hazards | $NUM_DROPPED CFGs dropped" >&2
+ echo "TinderboxPrint: sixgill unable to handle constructs<br/>$NUM_DROPPED"
+ exit_status=1
+ fi
+
+ if [ $exit_status -ne 0 ]; then
+ exit $exit_status
+ fi
+ )
+}
+
+trap grab_artifacts EXIT
+
+# Gather the information from the source tree by compiling it.
+$GECKO_PATH/mach hazards gather --project=$PROJECT --work-dir="$ANALYSIS_DIR"
+
+# Analyze the collected information.
+$GECKO_PATH/mach hazards analyze --project=$PROJECT --shell-objdir="$HAZARD_SHELL_OBJDIR" --work-dir="$ANALYSIS_DIR"
+
+check_hazards "$ANALYSIS_DIR"
+
+################################### script end ###################################
diff --git a/taskcluster/scripts/builder/build-l10n.sh b/taskcluster/scripts/builder/build-l10n.sh
new file mode 100755
index 0000000000..0a324c7479
--- /dev/null
+++ b/taskcluster/scripts/builder/build-l10n.sh
@@ -0,0 +1,90 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+####
+# Taskcluster friendly wrapper for performing fx desktop l10n repacks via mozharness.
+# Based on ./build-linux.sh
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: MOZHARNESS_CONFIG_PATHS ${MOZHARNESS_CONFIG_PATHS}
+: MOZHARNESS_ACTIONS ${MOZHARNESS_ACTIONS}
+: MOZHARNESS_OPTIONS ${MOZHARNESS_OPTIONS}
+
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/builds/worker/tooltool-cache}
+
+: MOZ_SCM_LEVEL ${MOZ_SCM_LEVEL:=1}
+
+: MOZ_SCM_LEVEL ${MOZ_SCM_LEVEL:=1}
+
+: WORKSPACE ${WORKSPACE:=/builds/worker/workspace}
+: MOZ_OBJDIR ${MOZ_OBJDIR:=$WORKSPACE/obj-build}
+
+set -v
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[build-l10n.sh:error]" "${@}"
+ exit 1
+}
+
+export MOZ_CRASHREPORTER_NO_REPORT=1
+export TINDERBOX_OUTPUT=1
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z "${MOZHARNESS_CONFIG}" && -z "${EXTRA_MOZHARNESS_CONFIG}" ]]; then fail "MOZHARNESS_CONFIG or EXTRA_MOZHARNESS_CONFIG is not set"; fi
+
+# set up mozharness configuration, via command line, env, etc.
+
+# $TOOLTOOL_CACHE bypasses mozharness completely and is read by tooltool_wrapper.sh to set the
+# cache. However, only some mozharness scripts use tooltool_wrapper.sh, so this may not be
+# entirely effective.
+export TOOLTOOL_CACHE
+
+export MOZ_OBJDIR
+
+config_path_cmds=""
+for path in ${MOZHARNESS_CONFIG_PATHS}; do
+ config_path_cmds="${config_path_cmds} --extra-config-path ${GECKO_PATH}/${path}"
+done
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config ${cfg}"
+done
+
+# if MOZHARNESS_ACTIONS is given, only run those actions (completely overriding default_actions
+# in the mozharness configuration)
+if [ -n "$MOZHARNESS_ACTIONS" ]; then
+ actions=""
+ for action in $MOZHARNESS_ACTIONS; do
+ actions="$actions --$action"
+ done
+fi
+
+# if MOZHARNESS_OPTIONS is given, append them to mozharness command line run
+if [ -n "$MOZHARNESS_OPTIONS" ]; then
+ options=""
+ for option in $MOZHARNESS_OPTIONS; do
+ options="$options --$option"
+ done
+fi
+
+cd /builds/worker
+
+$GECKO_PATH/mach python -- \
+ $GECKO_PATH/testing/${MOZHARNESS_SCRIPT} \
+ ${config_path_cmds} \
+ ${config_cmds} \
+ $actions \
+ $options \
+ --log-level=debug \
+ --work-dir=$WORKSPACE \
diff --git a/taskcluster/scripts/builder/build-linux.sh b/taskcluster/scripts/builder/build-linux.sh
new file mode 100755
index 0000000000..35c54788b4
--- /dev/null
+++ b/taskcluster/scripts/builder/build-linux.sh
@@ -0,0 +1,125 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+####
+# Taskcluster friendly wrapper for performing fx desktop builds via mozharness.
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: MOZHARNESS_CONFIG_PATHS ${MOZHARNESS_CONFIG_PATHS}
+: MOZHARNESS_ACTIONS ${MOZHARNESS_ACTIONS}
+: MOZHARNESS_OPTIONS ${MOZHARNESS_OPTIONS}
+
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/builds/worker/tooltool-cache}
+
+: MOZ_SCM_LEVEL ${MOZ_SCM_LEVEL:=1}
+
+: NEED_XVFB ${NEED_XVFB:=false}
+
+: MH_CUSTOM_BUILD_VARIANT_CFG ${MH_CUSTOM_BUILD_VARIANT_CFG}
+: MH_BRANCH ${MH_BRANCH:=mozilla-central}
+: MH_BUILD_POOL ${MH_BUILD_POOL:=staging}
+
+: WORKSPACE ${WORKSPACE:=/builds/worker/workspace}
+: MOZ_OBJDIR ${MOZ_OBJDIR:=$WORKSPACE/obj-build}
+
+set -v
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[build-linux.sh:error]" "${@}"
+ exit 1
+}
+
+export MOZ_CRASHREPORTER_NO_REPORT=1
+export TINDERBOX_OUTPUT=1
+
+# use "simple" package names so that they can be hard-coded in the task's
+# extras.locations
+export MOZ_SIMPLE_PACKAGE_NAME=target
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z "${MOZHARNESS_CONFIG}" && -z "${EXTRA_MOZHARNESS_CONFIG}" ]]; then fail "MOZHARNESS_CONFIG or EXTRA_MOZHARNESS_CONFIG is not set"; fi
+
+# run XVfb in the background, if necessary
+if $NEED_XVFB; then
+ . /builds/worker/scripts/xvfb.sh
+
+ cleanup() {
+ local rv=$?
+ cleanup_xvfb
+ exit $rv
+ }
+ trap cleanup EXIT INT
+
+ start_xvfb '1024x768x24' 2
+fi
+
+# set up mozharness configuration, via command line, env, etc.
+
+debug_flag=""
+if [ 0$DEBUG -ne 0 ]; then
+ debug_flag='--debug'
+fi
+
+custom_build_variant_cfg_flag=""
+if [ -n "${MH_CUSTOM_BUILD_VARIANT_CFG}" ]; then
+ custom_build_variant_cfg_flag="--custom-build-variant-cfg=${MH_CUSTOM_BUILD_VARIANT_CFG}"
+fi
+
+# $TOOLTOOL_CACHE bypasses mozharness completely and is read by tooltool_wrapper.sh to set the
+# cache. However, only some mozharness scripts use tooltool_wrapper.sh, so this may not be
+# entirely effective.
+export TOOLTOOL_CACHE
+
+export MOZ_OBJDIR
+
+config_path_cmds=""
+for path in ${MOZHARNESS_CONFIG_PATHS}; do
+ config_path_cmds="${config_path_cmds} --extra-config-path ${GECKO_PATH}/${path}"
+done
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config ${cfg}"
+done
+
+# if MOZHARNESS_ACTIONS is given, only run those actions (completely overriding default_actions
+# in the mozharness configuration)
+if [ -n "$MOZHARNESS_ACTIONS" ]; then
+ actions=""
+ for action in $MOZHARNESS_ACTIONS; do
+ actions="$actions --$action"
+ done
+fi
+
+# if MOZHARNESS_OPTIONS is given, append them to mozharness command line run
+if [ -n "$MOZHARNESS_OPTIONS" ]; then
+ options=""
+ for option in $MOZHARNESS_OPTIONS; do
+ options="$options --$option"
+ done
+fi
+
+cd /builds/worker
+
+$GECKO_PATH/mach python -- \
+ $GECKO_PATH/testing/${MOZHARNESS_SCRIPT} \
+ ${config_path_cmds} \
+ ${config_cmds} \
+ $debug_flag \
+ $custom_build_variant_cfg_flag \
+ $actions \
+ $options \
+ --log-level=debug \
+ --work-dir=$WORKSPACE \
+ --branch=${MH_BRANCH} \
+ --build-pool=${MH_BUILD_POOL}
diff --git a/taskcluster/scripts/builder/build-sm-package.sh b/taskcluster/scripts/builder/build-sm-package.sh
new file mode 100755
index 0000000000..816256ea9c
--- /dev/null
+++ b/taskcluster/scripts/builder/build-sm-package.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+set -xe
+
+# Default variables values.
+: ${WORK:=$HOME/workspace}
+
+mkdir -p $UPLOAD_DIR
+
+# Package up the sources into the release tarball.
+AUTOMATION=1 DIST=$UPLOAD_DIR $GECKO_PATH/js/src/make-source-package.py
+
+# Extract the tarball into a new directory in the workspace.
+
+PACKAGE_DIR=$WORK/sm-package
+
+# Do not use -p option because the package directory should not exist.
+mkdir $PACKAGE_DIR
+pushd $PACKAGE_DIR
+
+tar -xvf $UPLOAD_DIR/mozjs-*.tar.*z*
+
+: ${PYTHON3:=python3}
+
+status=0
+(
+ # Build the freshly extracted, packaged SpiderMonkey.
+ cd ./mozjs-*
+ AUTOMATION=1 $PYTHON3 js/src/devtools/automation/autospider.py --skip-tests=checks $SPIDERMONKEY_VARIANT
+) || status=$?
+
+# Copy artifacts for upload by TaskCluster
+cp -rL ./mozjs-*/obj-spider/dist/bin/{js,jsapi-tests,js-gdb.py,libmozjs*} $UPLOAD_DIR
+
+exit $status
diff --git a/taskcluster/scripts/builder/build-sm.sh b/taskcluster/scripts/builder/build-sm.sh
new file mode 100755
index 0000000000..7ee31bbcf4
--- /dev/null
+++ b/taskcluster/scripts/builder/build-sm.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+set -x
+
+# Default variables values.
+: ${SPIDERMONKEY_VARIANT:=plain}
+: ${WORK:=$HOME/workspace}
+: ${PYTHON3:=python3}
+
+# Ensure upload dir exists
+mkdir -p $UPLOAD_DIR
+
+# Run the script
+export MOZ_UPLOAD_DIR="$(cd "$UPLOAD_DIR"; pwd)"
+export OBJDIR=$WORK/obj-spider
+AUTOMATION=1 $PYTHON3 $GECKO_PATH/js/src/devtools/automation/autospider.py ${SPIDERMONKEY_PLATFORM:+--platform=$SPIDERMONKEY_PLATFORM} $SPIDERMONKEY_VARIANT
+BUILD_STATUS=$?
+
+# Copy artifacts for upload by TaskCluster.
+upload=${MOZ_JS_UPLOAD_BINARIES_DEFAULT-1}
+# User-provided override switch.
+if [ -n "$MOZ_JS_UPLOAD_BINARIES" ]; then
+ upload=1
+fi
+if [ "$upload" = "1" ]; then
+ (
+ cd "$OBJDIR/dist/bin"
+ zip "$UPLOAD_DIR/target.jsshell.zip" {js,jsapi-tests,js-gdb.py,libnspr4.so,libplds4.so,libplc4.so}
+ )
+ cp -L "$OBJDIR/mozinfo.json" "$UPLOAD_DIR/target.mozinfo.json"
+
+ # Fuzzing users want the correct version of llvm-symbolizer available in the
+ # same directory as the built output.
+ if [ -d "$MOZ_FETCHES_DIR/llvm-symbolizer" ]; then
+ for f in "$MOZ_FETCHES_DIR/llvm-symbolizer/bin/llvm-symbolizer"*; do
+ gzip -c "$f" > "$UPLOAD_DIR/llvm-symbolizer.gz" || echo "gzip $f failed" >&2
+ break
+ done
+ fi
+else # !upload
+# Provide a note for users on why we don't include artifacts for these builds
+# by default, and how they can get the artifacts if they really need them.
+ cat >"$UPLOAD_DIR"/README-artifacts.txt <<'EOF'
+Artifact upload has been disabled for this build due to infrequent usage of the
+generated artifacts. If you find yourself in a position where you need the
+shell or similar artifacts from this build, please redo your push with the
+environment variable MOZ_JS_UPLOAD_BINARIES set to 1. You can provide this as
+the option `--env MOZ_JS_UPLOAD_BINARIES=1` to `mach try fuzzy` or `mach try auto`.
+EOF
+fi
+
+# Fuzzing also uses a few fields in target.json file for automated downloads to
+# identify what was built.
+if [ -n "$MOZ_BUILD_DATE" ] && [ -n "$GECKO_HEAD_REV" ]; then
+ cat >"$UPLOAD_DIR"/target.json <<EOF
+{
+ "buildid": "$MOZ_BUILD_DATE",
+ "moz_source_stamp": "$GECKO_HEAD_REV"
+}
+EOF
+ cp "$GECKO_PATH"/mozconfig.autospider "$UPLOAD_DIR"
+fi
+
+exit $BUILD_STATUS
diff --git a/taskcluster/scripts/builder/repackage.sh b/taskcluster/scripts/builder/repackage.sh
new file mode 100755
index 0000000000..6ff67693af
--- /dev/null
+++ b/taskcluster/scripts/builder/repackage.sh
@@ -0,0 +1,95 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+####
+# Taskcluster friendly wrapper for performing fx desktop builds via mozharness.
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: MOZHARNESS_CONFIG_PATHS ${MOZHARNESS_CONFIG_PATHS}
+: MOZHARNESS_ACTIONS ${MOZHARNESS_ACTIONS}
+: MOZHARNESS_OPTIONS ${MOZHARNESS_OPTIONS}
+
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/builds/worker/tooltool-cache}
+
+: MOZ_SCM_LEVEL ${MOZ_SCM_LEVEL:=1}
+
+: WORKSPACE ${WORKSPACE:=/builds/worker/workspace}
+: MOZ_OBJDIR ${MOZ_OBJDIR:=$WORKSPACE/obj-build}
+
+set -v
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[build-linux.sh:error]" "${@}"
+ exit 1
+}
+
+export MOZ_CRASHREPORTER_NO_REPORT=1
+export TINDERBOX_OUTPUT=1
+
+# use "simple" package names so that they can be hard-coded in the task's
+# extras.locations
+export MOZ_SIMPLE_PACKAGE_NAME=target
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z "${MOZHARNESS_CONFIG}" && -z "${EXTRA_MOZHARNESS_CONFIG}" ]]; then fail "MOZHARNESS_CONFIG or EXTRA_MOZHARNESS_CONFIG is not set"; fi
+
+# set up mozharness configuration, via command line, env, etc.
+
+debug_flag=""
+if [ 0$DEBUG -ne 0 ]; then
+ debug_flag='--debug'
+fi
+
+# $TOOLTOOL_CACHE bypasses mozharness completely and is read by tooltool_wrapper.sh to set the
+# cache. However, only some mozharness scripts use tooltool_wrapper.sh, so this may not be
+# entirely effective.
+export TOOLTOOL_CACHE
+
+export MOZ_OBJDIR
+
+config_path_cmds=""
+for path in ${MOZHARNESS_CONFIG_PATHS}; do
+ config_path_cmds="${config_path_cmds} --extra-config-path ${GECKO_PATH}/${path}"
+done
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config ${cfg}"
+done
+
+# if MOZHARNESS_ACTIONS is given, only run those actions (completely overriding default_actions
+# in the mozharness configuration)
+if [ -n "$MOZHARNESS_ACTIONS" ]; then
+ actions=""
+ for action in $MOZHARNESS_ACTIONS; do
+ actions="$actions --$action"
+ done
+fi
+
+# if MOZHARNESS_OPTIONS is given, append them to mozharness command line run
+if [ -n "$MOZHARNESS_OPTIONS" ]; then
+ options=""
+ for option in $MOZHARNESS_OPTIONS; do
+ options="$options --$option"
+ done
+fi
+
+cd /builds/worker
+
+$GECKO_PATH/mach python $GECKO_PATH/testing/${MOZHARNESS_SCRIPT} \
+ ${config_path_cmds} \
+ ${config_cmds} \
+ $actions \
+ $options \
+ --log-level=debug \
+ --work-dir=$WORKSPACE \
diff --git a/taskcluster/scripts/copy.sh b/taskcluster/scripts/copy.sh
new file mode 100755
index 0000000000..931145a3b6
--- /dev/null
+++ b/taskcluster/scripts/copy.sh
@@ -0,0 +1,9 @@
+#! /bin/bash -ex
+
+# This script copies the contents of the "scripts" folder into a docker
+# container using tar/untar the container id must be passed.
+
+DIRNAME=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+docker exec $1 mkdir -p $2
+cd $DIRNAME
+tar -cv * | docker exec -i $1 tar -x -C $2
diff --git a/taskcluster/scripts/misc/afl-nyx.patch b/taskcluster/scripts/misc/afl-nyx.patch
new file mode 100644
index 0000000000..1be1a5fb1b
--- /dev/null
+++ b/taskcluster/scripts/misc/afl-nyx.patch
@@ -0,0 +1,847 @@
+From 705d24fb3ad80af5544b43ade6927d24a9367a69 Mon Sep 17 00:00:00 2001
+From: "Christian Holler (:decoder)" <choller@mozilla.com>
+Date: Thu, 14 Oct 2021 20:59:27 +0200
+Subject: [PATCH 01/10] Initial commit for compiler, preload and userspace
+ tools
+
+---
+ config.h | 2 +
+ llvm_mode/Makefile | 20 +++------
+ llvm_mode/afl-llvm-pass.so.cc | 3 +-
+ llvm_mode/afl-llvm-rt.o.c | 83 ++++++++++++++++++++++++++++++++---
+ 4 files changed, 87 insertions(+), 21 deletions(-)
+
+diff --git a/config.h b/config.h
+index ea6aac4..b21298d 100644
+--- a/config.h
++++ b/config.h
+@@ -328,6 +328,8 @@
+ #define MAP_SIZE_POW2 16
+ #define MAP_SIZE (1 << MAP_SIZE_POW2)
+
++#define STATE_STR_LEN 12
++
+ /* Maximum allocator request size (keep well under INT_MAX): */
+
+ #define MAX_ALLOC 0x40000000
+diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile
+index 7617f91..823e959 100644
+--- a/llvm_mode/Makefile
++++ b/llvm_mode/Makefile
+@@ -23,6 +23,7 @@ BIN_PATH = $(PREFIX)/bin
+ VERSION = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2)
+
+ LLVM_CONFIG ?= llvm-config
++LLVM_BINPATH = $(shell $(LLVM_CONFIG) --bindir)
+
+ CFLAGS ?= -O3 -funroll-loops
+ CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
+@@ -51,8 +52,8 @@ endif
+ # probably better.
+
+ ifeq "$(origin CC)" "default"
+- CC = clang
+- CXX = clang++
++ CC = $(LLVM_BINPATH)/clang
++ CXX = $(LLVM_BINPATH)/clang++
+ endif
+
+ ifndef AFL_TRACE_PC
+@@ -61,7 +62,7 @@ else
+ PROGS = ../afl-clang-fast ../afl-llvm-rt.o ../afl-llvm-rt-32.o ../afl-llvm-rt-64.o
+ endif
+
+-all: test_deps $(PROGS) test_build all_done
++all: test_deps $(PROGS) all_done
+
+ test_deps:
+ ifndef AFL_TRACE_PC
+@@ -94,18 +95,7 @@ endif
+ @printf "[*] Building 64-bit variant of the runtime (-m64)... "
+ @$(CC) $(CFLAGS) -m64 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi
+
+-test_build: $(PROGS)
+- @echo "[*] Testing the CC wrapper and instrumentation output..."
+- unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS)
+-# Use /dev/null to avoid problems with optimization messing up expected
+-# branches. See https://github.com/google/AFL/issues/30.
+- ../afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
+- echo 1 | ../afl-showmap -m none -q -o .test-instr1 ./test-instr
+- @rm -f test-instr
+- @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
+- @echo "[+] All right, the instrumentation seems to be working!"
+-
+-all_done: test_build
++all_done:
+ @echo "[+] All done! You can now use '../afl-clang-fast' to compile programs."
+
+ .NOTPARALLEL: clean
+diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc
+index 154a5db..0bfbfdf 100644
+--- a/llvm_mode/afl-llvm-pass.so.cc
++++ b/llvm_mode/afl-llvm-pass.so.cc
+@@ -105,7 +105,8 @@ bool AFLCoverage::runOnModule(Module &M) {
+
+ GlobalVariable *AFLMapPtr =
+ new GlobalVariable(M, PointerType::get(Int8Ty, 0), false,
+- GlobalValue::ExternalLinkage, 0, "__afl_area_ptr");
++ GlobalVariable::ExternalLinkage, 0, "__afl_area_ptr",
++ 0, GlobalVariable::GeneralDynamicTLSModel, 0, false);
+
+ GlobalVariable *AFLPrevLoc = new GlobalVariable(
+ M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc",
+diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c
+index 60475c9..536adb9 100644
+--- a/llvm_mode/afl-llvm-rt.o.c
++++ b/llvm_mode/afl-llvm-rt.o.c
+@@ -41,6 +41,10 @@
+ #include <sys/shm.h>
+ #include <sys/wait.h>
+ #include <sys/types.h>
++#include <syscall.h>
++
++#define gettid() ((pid_t)syscall(SYS_gettid))
++
+
+ /* This is a somewhat ugly hack for the experimental 'trace-pc-guard' mode.
+ Basically, we need to make sure that the forkserver is initialized after
+@@ -53,12 +57,23 @@
+ #endif /* ^USE_TRACE_PC */
+
+
++void enable_afl_tracing(void);
++void disable_afl_tracing(void);
++void init_afl_tracing(void);
++
++
+ /* Globals needed by the injected instrumentation. The __afl_area_initial region
+ is used for instrumentation output before __afl_map_shm() has a chance to run.
+ It will end up as .comm, so it shouldn't be too wasteful. */
+
++#define FIREFOX_CONTROL_AREA_ADDR 0x100000
++
++u8*** __firefox_afl_control_areas = NULL;
++
+ u8 __afl_area_initial[MAP_SIZE];
+-u8* __afl_area_ptr = __afl_area_initial;
++__thread u8* __afl_area_ptr = __afl_area_initial;
++
++u8* __afl_area_ptr_pre = __afl_area_initial;
+
+ __thread u32 __afl_prev_loc;
+
+@@ -82,17 +97,15 @@ static void __afl_map_shm(void) {
+
+ u32 shm_id = atoi(id_str);
+
+- __afl_area_ptr = shmat(shm_id, NULL, 0);
++ __afl_area_ptr_pre = shmat(shm_id, NULL, 0);
+
+ /* Whooooops. */
+
+- if (__afl_area_ptr == (void *)-1) _exit(1);
++ if (__afl_area_ptr_pre == (void *)-1) _exit(1);
+
+ /* Write something into the bitmap so that even with low AFL_INST_RATIO,
+ our parent doesn't give up on us. */
+
+- __afl_area_ptr[0] = 1;
+-
+ }
+
+ }
+@@ -256,6 +269,16 @@ void __afl_manual_init(void) {
+
+ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
+
++ __firefox_afl_control_areas = mmap((void*)FIREFOX_CONTROL_AREA_ADDR, 0x1000, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED , 0, 0);
++ if(__firefox_afl_control_areas == (void*)-1){
++ exit(1);
++ }
++
++ __firefox_afl_control_areas[0] = (u8**) enable_afl_tracing;
++ __firefox_afl_control_areas[1] = (u8**) disable_afl_tracing;
++ __firefox_afl_control_areas[2] = (u8**) init_afl_tracing;
++ __firefox_afl_control_areas[3] = (u8**) 1337;
++
+ is_persistent = !!getenv(PERSIST_ENV_VAR);
+
+ if (getenv(DEFER_ENV_VAR)) return;
+@@ -310,5 +333,55 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) {
+ start++;
+
+ }
++}
++
++void enable_afl_tracing(void){
++ if(__afl_area_ptr == __afl_area_initial && __afl_area_ptr_pre != __afl_area_initial){
++ __afl_area_ptr = __afl_area_ptr_pre;
++ }
++}
++
++void disable_afl_tracing(void){
++ if(__afl_area_ptr != __afl_area_initial){
++ __afl_area_ptr = __afl_area_initial;
++ }
++}
++
++void init_afl_tracing(){
++ __afl_area_ptr_pre[0] = 1;
++}
++
++void print_afl_bitmap(void){
++ if(__afl_area_ptr_pre == __afl_area_initial){
++ return;
++ }
++ void* data = __afl_area_ptr_pre;
++ int size = 2 << 15;
++ char ascii[17];
++ size_t i, j;
++ ascii[16] = '\0';
++ for (i = 0; i < size; ++i) {
++ printf("%02X ", ((unsigned char*)data)[i]);
++ if (((unsigned char*)data)[i] >= ' ' && ((unsigned char*)data)[i] <= '~') {
++ ascii[i % 16] = ((unsigned char*)data)[i];
++ } else {
++ ascii[i % 16] = '.';
++ }
++ if ((i+1) % 8 == 0 || i+1 == size) {
++ printf(" ");
++ if ((i+1) % 16 == 0) {
++ printf("| %s \n", ascii);
++ } else if (i+1 == size) {
++ ascii[(i+1) % 16] = '\0';
++ if ((i+1) % 16 <= 8) {
++ printf(" ");
++ }
++ for (j = (i+1) % 16; j < 16; ++j) {
++ printf(" ");
++ }
++ printf("| %s \n", ascii);
++ }
++ }
++ }
+
+ }
+--
+2.37.1
+
+From 003221dd9fec462177445040c7fa57c09397c684 Mon Sep 17 00:00:00 2001
+From: "Christian Holler (:decoder)" <choller@mozilla.com>
+Date: Fri, 15 Oct 2021 11:55:02 +0200
+Subject: [PATCH 02/10] [compiler] Add selective instrumentation through
+ AFL_INST_FILTER
+
+---
+ llvm_mode/afl-clang-fast.c | 57 +++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 56 insertions(+), 1 deletion(-)
+
+diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
+index c154e01..b5aa521 100644
+--- a/llvm_mode/afl-clang-fast.c
++++ b/llvm_mode/afl-clang-fast.c
+@@ -29,6 +29,9 @@
+ */
+
+ #define AFL_MAIN
++#ifndef _GNU_SOURCE
++#define _GNU_SOURCE
++#endif
+
+ #include "../config.h"
+ #include "../types.h"
+@@ -39,6 +42,7 @@
+ #include <unistd.h>
+ #include <stdlib.h>
+ #include <string.h>
++#include <linux/limits.h>
+
+ static u8* obj_path; /* Path to runtime libraries */
+ static u8** cc_params; /* Parameters passed to the real CC */
+@@ -119,13 +123,63 @@ static void edit_params(u32 argc, char** argv) {
+ cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
+ }
+
++#define CPP_SUFF ".cpp"
++#define CPP_SLEN (sizeof(CPP_SUFF)-1)
++#define C_SUFF ".c"
++#define C_SLEN (sizeof(C_SUFF)-1)
++ u8 should_instrument = 1;
++
++ u8* instfilter = getenv("AFL_INST_FILTER");
++
++ if (instfilter) {
++
++ should_instrument = 0;
++
++ char cwd [PATH_MAX];
++ getcwd(cwd, sizeof(cwd));
++
++ for (u32 argi = 0; argi < argc; ++argi) {
++ u8 is_source = 0;
++ u32 arglen = strlen(argv[argi]);
++ //SAYF("Checking: %s\n", argv[argi]);
++ if (arglen > CPP_SLEN) {
++ if (!memcmp(argv[argi] + arglen - CPP_SLEN, CPP_SUFF, CPP_SLEN)) {
++ is_source = 1;
++ }
++ }
++
++ if (!is_source && arglen > C_SLEN) {
++ if (!memcmp(argv[argi] + arglen - C_SLEN, C_SUFF, C_SLEN)) {
++ is_source = 1;
++ }
++ }
++
++ if (is_source) {
++ //SAYF("This is a source file: %s\n", argv[argi]);
++ char relpath [PATH_MAX];
++ strcat(relpath, cwd);
++ strcat(relpath, "/");
++ strcat(relpath, argv[argi]);
++ char abspath [PATH_MAX];
++ if (realpath(relpath, abspath)) {
++ if (strcasestr(abspath, instfilter)) {
++ should_instrument = 1;
++ SAYF("Instrumenting file %s\n", argv[argi]);
++ break;
++ }
++ }
++ }
++ }
++
++ }
++
+ /* There are two ways to compile afl-clang-fast. In the traditional mode, we
+ use afl-llvm-pass.so to inject instrumentation. In the experimental
+ 'trace-pc-guard' mode, we use native LLVM instrumentation callbacks
+ instead. The latter is a very recent addition - see:
+
+ http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */
+-
++if (should_instrument) {
+ #ifdef USE_TRACE_PC
+ cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard";
+ cc_params[cc_par_cnt++] = "-mllvm";
+@@ -136,6 +190,7 @@ static void edit_params(u32 argc, char** argv) {
+ cc_params[cc_par_cnt++] = "-Xclang";
+ cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-pass.so", obj_path);
+ #endif /* ^USE_TRACE_PC */
++}
+
+ cc_params[cc_par_cnt++] = "-Qunused-arguments";
+
+--
+2.37.1
+
+From 3e126e0f9bf21c32cb650d49f5f088b213538854 Mon Sep 17 00:00:00 2001
+From: "Christian Holler (:decoder)" <choller@mozilla.com>
+Date: Tue, 22 Feb 2022 16:44:27 +0100
+Subject: [PATCH 03/10] Fix AFL compiler to ignore wasm-compiled code
+
+---
+ llvm_mode/afl-clang-fast.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
+index 226ee36..6d4171c 100644
+--- a/llvm_mode/afl-clang-fast.c
++++ b/llvm_mode/afl-clang-fast.c
+@@ -213,6 +213,7 @@ if (should_instrument) {
+ if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
+
+ if (!strcmp(cur, "-shared")) maybe_linking = 0;
++ if (!strcmp(cur, "--target=wasm32-wasi")) maybe_linking = 0;
+
+ if (!strcmp(cur, "-Wl,-z,defs") ||
+ !strcmp(cur, "-Wl,--no-undefined")) continue;
+--
+2.37.1
+
+From e2e269e9d00b47cc6a139045688f32b26d30fc85 Mon Sep 17 00:00:00 2001
+From: "Christian Holler (:decoder)" <choller@mozilla.com>
+Date: Thu, 9 Jun 2022 10:20:34 +0200
+Subject: [PATCH 04/10] Update IRBuilder calls to LLVM 14 API
+
+---
+ llvm_mode/afl-llvm-pass.so.cc | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc
+index 0bfbfdf..203cffa 100644
+--- a/llvm_mode/afl-llvm-pass.so.cc
++++ b/llvm_mode/afl-llvm-pass.so.cc
+@@ -38,12 +38,14 @@
+ #include <stdlib.h>
+ #include <unistd.h>
+
++#include "llvm/Pass.h"
+ #include "llvm/ADT/Statistic.h"
+ #include "llvm/IR/IRBuilder.h"
+ #include "llvm/IR/LegacyPassManager.h"
+ #include "llvm/IR/Module.h"
+ #include "llvm/Support/Debug.h"
+ #include "llvm/Transforms/IPO/PassManagerBuilder.h"
++#include "llvm/Passes/OptimizationLevel.h"
+
+ using namespace llvm;
+
+@@ -132,20 +134,20 @@ bool AFLCoverage::runOnModule(Module &M) {
+
+ /* Load prev_loc */
+
+- LoadInst *PrevLoc = IRB.CreateLoad(AFLPrevLoc);
++ LoadInst *PrevLoc = IRB.CreateLoad(IRB.getInt32Ty(), AFLPrevLoc);
+ PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+ Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty());
+
+ /* Load SHM pointer */
+
+- LoadInst *MapPtr = IRB.CreateLoad(AFLMapPtr);
++ LoadInst *MapPtr = IRB.CreateLoad(PointerType::get(Int8Ty, 0), AFLMapPtr);
+ MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+ Value *MapPtrIdx =
+- IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc));
++ IRB.CreateGEP(Int8Ty, MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc));
+
+ /* Update bitmap */
+
+- LoadInst *Counter = IRB.CreateLoad(MapPtrIdx);
++ LoadInst *Counter = IRB.CreateLoad(IRB.getInt8Ty(), MapPtrIdx);
+ Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+ Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1));
+ IRB.CreateStore(Incr, MapPtrIdx)
+--
+2.37.1
+
+From be3f79c5b472e5a8a06266d7a74ebb162b3d8cba Mon Sep 17 00:00:00 2001
+From: "Christian Holler (:decoder)" <choller@mozilla.com>
+Date: Thu, 9 Jun 2022 11:37:44 +0200
+Subject: [PATCH 05/10] Switch AFLCoverage pass to new pass manager
+
+---
+ llvm_mode/afl-clang-fast.c | 7 ++---
+ llvm_mode/afl-llvm-pass.so.cc | 58 +++++++++++++++++------------------
+ 2 files changed, 31 insertions(+), 34 deletions(-)
+
+diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
+index 6d4171c..5e00286 100644
+--- a/llvm_mode/afl-clang-fast.c
++++ b/llvm_mode/afl-clang-fast.c
+@@ -178,14 +178,12 @@ static void edit_params(u32 argc, char** argv) {
+ http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */
+ if (should_instrument) {
+ #ifdef USE_TRACE_PC
++ #error "unsupported"
+ cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard";
+ cc_params[cc_par_cnt++] = "-mllvm";
+ cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
+ #else
+- cc_params[cc_par_cnt++] = "-Xclang";
+- cc_params[cc_par_cnt++] = "-load";
+- cc_params[cc_par_cnt++] = "-Xclang";
+- cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-pass.so", obj_path);
++ cc_params[cc_par_cnt++] = alloc_printf("-fpass-plugin=%s/afl-llvm-pass.so", obj_path);
+ #endif /* ^USE_TRACE_PC */
+ }
+
+diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc
+index 203cffa..1483943 100644
+--- a/llvm_mode/afl-llvm-pass.so.cc
++++ b/llvm_mode/afl-llvm-pass.so.cc
+@@ -41,44 +41,57 @@
+ #include "llvm/Pass.h"
+ #include "llvm/ADT/Statistic.h"
+ #include "llvm/IR/IRBuilder.h"
+-#include "llvm/IR/LegacyPassManager.h"
+ #include "llvm/IR/Module.h"
+-#include "llvm/Support/Debug.h"
+-#include "llvm/Transforms/IPO/PassManagerBuilder.h"
++#include "llvm/IR/PassManager.h"
+ #include "llvm/Passes/OptimizationLevel.h"
++#include "llvm/Passes/PassPlugin.h"
++#include "llvm/Passes/PassBuilder.h"
++#include "llvm/Support/Debug.h"
+
+ using namespace llvm;
+
+ namespace {
+
+- class AFLCoverage : public ModulePass {
++ class AFLCoverage : public PassInfoMixin<AFLCoverage> {
+
+ public:
+
+- static char ID;
+- AFLCoverage() : ModulePass(ID) { }
+-
+- bool runOnModule(Module &M) override;
+-
+- // StringRef getPassName() const override {
+- // return "American Fuzzy Lop Instrumentation";
+- // }
++ AFLCoverage() { }
+
++ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+ };
+
+ }
+
++extern "C" ::llvm::PassPluginLibraryInfo LLVM_ATTRIBUTE_WEAK
++llvmGetPassPluginInfo() {
++
++ return {LLVM_PLUGIN_API_VERSION, "AFLCoverage", "v0.1",
++ /* lambda to insert our pass into the pass pipeline. */
++ [](PassBuilder &PB) {
+
+-char AFLCoverage::ID = 0;
++ #if LLVM_VERSION_MAJOR <= 13
++ using OptimizationLevel = typename PassBuilder::OptimizationLevel;
++ #endif
++ PB.registerOptimizerLastEPCallback(
++ [](ModulePassManager &MPM, OptimizationLevel OL) {
+
++ MPM.addPass(AFLCoverage());
+
+-bool AFLCoverage::runOnModule(Module &M) {
++ });
++ }};
++
++}
++
++PreservedAnalyses AFLCoverage::run(Module &M, ModuleAnalysisManager &MAM) {
+
+ LLVMContext &C = M.getContext();
+
+ IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
+ IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
+
++ auto PA = PreservedAnalyses::all();
++
+ /* Show a banner */
+
+ char be_quiet = 0;
+@@ -175,21 +188,6 @@ bool AFLCoverage::runOnModule(Module &M) {
+
+ }
+
+- return true;
++ return PA;
+
+ }
+-
+-
+-static void registerAFLPass(const PassManagerBuilder &,
+- legacy::PassManagerBase &PM) {
+-
+- PM.add(new AFLCoverage());
+-
+-}
+-
+-
+-static RegisterStandardPasses RegisterAFLPass(
+- PassManagerBuilder::EP_ModuleOptimizerEarly, registerAFLPass);
+-
+-static RegisterStandardPasses RegisterAFLPass0(
+- PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass);
+--
+2.37.1
+
+From bd47b9066e616fdfdad1808ec0365992a4962ff2 Mon Sep 17 00:00:00 2001
+From: Jesse Schwartzentruber <truber@mozilla.com>
+Date: Tue, 9 Aug 2022 17:18:15 -0400
+Subject: [PATCH 06/10] Add install step for afl-clang-fast only
+
+---
+ llvm_mode/Makefile | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile
+index 823e959..b155eb5 100644
+--- a/llvm_mode/Makefile
++++ b/llvm_mode/Makefile
+@@ -103,3 +103,13 @@ all_done:
+ clean:
+ rm -f *.o *.so *~ a.out core core.[1-9][0-9]* test-instr .test-instr0 .test-instr1
+ rm -f $(PROGS) ../afl-clang-fast++
++
++install: all
++ mkdir -p -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH)
++ifndef AFL_TRACE_PC
++ if [ -f ../afl-clang-fast -a -f ../afl-llvm-pass.so -a -f ../afl-llvm-rt.o ]; then set -e; install -m 755 ../afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 ../afl-llvm-pass.so ../afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
++else
++ if [ -f ../afl-clang-fast -a -f ../afl-llvm-rt.o ]; then set -e; install -m 755 ../afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 ../afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
++endif
++ if [ -f ../afl-llvm-rt-32.o ]; then set -e; install -m 755 ../afl-llvm-rt-32.o $${DESTDIR}$(HELPER_PATH); fi
++ if [ -f ../afl-llvm-rt-64.o ]; then set -e; install -m 755 ../afl-llvm-rt-64.o $${DESTDIR}$(HELPER_PATH); fi
+--
+2.37.1
+
+From 11f8b04786239bc8daa2c7a207b5e19f5c19ec6e Mon Sep 17 00:00:00 2001
+From: Jesse Schwartzentruber <truber@mozilla.com>
+Date: Thu, 11 Aug 2022 11:39:37 -0400
+Subject: [PATCH 07/10] Reenable instrumentation tests
+
+---
+ config.h | 4 ++++
+ llvm_mode/Makefile | 15 +++++++++++++--
+ llvm_mode/afl-llvm-rt.o.c | 1 +
+ 3 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/config.h b/config.h
+index b21298d..c035af2 100644
+--- a/config.h
++++ b/config.h
+@@ -285,6 +285,10 @@
+ #define PERSIST_ENV_VAR "__AFL_PERSISTENT"
+ #define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV"
+
++/* Enable tracing by default at startup */
++
++#define TRACE_ENV_VAR "__AFL_ENABLE_TRACE"
++
+ /* In-code signatures for deferred and persistent mode. */
+
+ #define PERSIST_SIG "##SIG_AFL_PERSISTENT##"
+diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile
+index b155eb5..4f460ff 100644
+--- a/llvm_mode/Makefile
++++ b/llvm_mode/Makefile
+@@ -62,7 +62,7 @@ else
+ PROGS = ../afl-clang-fast ../afl-llvm-rt.o ../afl-llvm-rt-32.o ../afl-llvm-rt-64.o
+ endif
+
+-all: test_deps $(PROGS) all_done
++all: test_deps $(PROGS) test_build all_done
+
+ test_deps:
+ ifndef AFL_TRACE_PC
+@@ -95,7 +95,18 @@ endif
+ @printf "[*] Building 64-bit variant of the runtime (-m64)... "
+ @$(CC) $(CFLAGS) -m64 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi
+
+-all_done:
++test_build: $(PROGS)
++ @echo "[*] Testing the CC wrapper and instrumentation output..."
++ unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS)
++# Use /dev/null to avoid problems with optimization messing up expected
++# branches. See https://github.com/google/AFL/issues/30.
++ __AFL_ENABLE_TRACE=1 ../afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
++ echo 1 | __AFL_ENABLE_TRACE=1 ../afl-showmap -m none -q -o .test-instr1 ./test-instr
++ @rm -f test-instr
++ @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
++ @echo "[+] All right, the instrumentation seems to be working!"
++
++all_done: test_build
+ @echo "[+] All done! You can now use '../afl-clang-fast' to compile programs."
+
+ .NOTPARALLEL: clean
+diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c
+index 536adb9..c3b710f 100644
+--- a/llvm_mode/afl-llvm-rt.o.c
++++ b/llvm_mode/afl-llvm-rt.o.c
+@@ -285,6 +285,7 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
+
+ __afl_manual_init();
+
++ if (getenv(TRACE_ENV_VAR)) enable_afl_tracing();
+ }
+
+
+--
+2.37.1
+
+From dd1050393281f2ea4c9b6521f5e48bec365b0a8a Mon Sep 17 00:00:00 2001
+From: Jesse Schwartzentruber <truber@mozilla.com>
+Date: Thu, 11 Aug 2022 13:17:34 -0400
+Subject: [PATCH 08/10] Add search in HELPER_PATH for libraries.
+
+---
+ llvm_mode/afl-clang-fast.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
+index 5e00286..70b6af2 100644
+--- a/llvm_mode/afl-clang-fast.c
++++ b/llvm_mode/afl-clang-fast.c
+@@ -85,6 +85,16 @@ static void find_obj(u8* argv0) {
+ return;
+ }
+
++ ck_free(tmp);
++ tmp = alloc_printf("%s/../lib/afl/afl-llvm-rt.o", dir);
++
++ if (!access(tmp, R_OK)) {
++ ck_free(tmp);
++ obj_path = alloc_printf("%s/../lib/afl", dir);
++ ck_free(dir);
++ return;
++ }
++
+ ck_free(tmp);
+ ck_free(dir);
+
+--
+2.37.1
+
+From 9eb9eaf26d473bb8479df380f918a1bf83250029 Mon Sep 17 00:00:00 2001
+From: Jesse Schwartzentruber <truber@mozilla.com>
+Date: Thu, 11 Aug 2022 19:16:36 -0400
+Subject: [PATCH 09/10] Don't instrument at all for wasm
+
+---
+ llvm_mode/afl-clang-fast.c | 41 ++++++++++++++++++++------------------
+ 1 file changed, 22 insertions(+), 19 deletions(-)
+
+diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
+index 70b6af2..0d1e76b 100644
+--- a/llvm_mode/afl-clang-fast.c
++++ b/llvm_mode/afl-clang-fast.c
+@@ -180,23 +180,6 @@ static void edit_params(u32 argc, char** argv) {
+
+ }
+
+- /* There are two ways to compile afl-clang-fast. In the traditional mode, we
+- use afl-llvm-pass.so to inject instrumentation. In the experimental
+- 'trace-pc-guard' mode, we use native LLVM instrumentation callbacks
+- instead. The latter is a very recent addition - see:
+-
+- http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */
+-if (should_instrument) {
+-#ifdef USE_TRACE_PC
+- #error "unsupported"
+- cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard";
+- cc_params[cc_par_cnt++] = "-mllvm";
+- cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
+-#else
+- cc_params[cc_par_cnt++] = alloc_printf("-fpass-plugin=%s/afl-llvm-pass.so", obj_path);
+-#endif /* ^USE_TRACE_PC */
+-}
+-
+ cc_params[cc_par_cnt++] = "-Qunused-arguments";
+
+ /* Detect stray -v calls from ./configure scripts. */
+@@ -222,7 +204,10 @@ if (should_instrument) {
+ if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
+
+ if (!strcmp(cur, "-shared")) maybe_linking = 0;
+- if (!strcmp(cur, "--target=wasm32-wasi")) maybe_linking = 0;
++ if (!strcmp(cur, "--target=wasm32-wasi")) {
++ maybe_linking = 0;
++ should_instrument = 0;
++ }
+
+ if (!strcmp(cur, "-Wl,-z,defs") ||
+ !strcmp(cur, "-Wl,--no-undefined")) continue;
+@@ -231,6 +216,23 @@ if (should_instrument) {
+
+ }
+
++ /* There are two ways to compile afl-clang-fast. In the traditional mode, we
++ use afl-llvm-pass.so to inject instrumentation. In the experimental
++ 'trace-pc-guard' mode, we use native LLVM instrumentation callbacks
++ instead. The latter is a very recent addition - see:
++
++ http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */
++ if (should_instrument) {
++#ifdef USE_TRACE_PC
++ #error "unsupported"
++ cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard";
++ cc_params[cc_par_cnt++] = "-mllvm";
++ cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
++#else
++ cc_params[cc_par_cnt++] = alloc_printf("-fpass-plugin=%s/afl-llvm-pass.so", obj_path);
++#endif /* ^USE_TRACE_PC */
++ }
++
+ if (getenv("AFL_HARDEN")) {
+
+ cc_params[cc_par_cnt++] = "-fstack-protector-all";
+--
+2.37.1
+
+From 6ea1771e95d6f4c19453047996b0fc4ffa3fdeda Mon Sep 17 00:00:00 2001
+From: Jesse Schwartzentruber <truber@mozilla.com>
+Date: Wed, 20 Apr 2022 15:39:28 -0400
+Subject: [PATCH 10/10] fix instrumentation for
+ -Werror,-Wunused-but-set-variable
+
+`used` is so it isn't optimized out. `unused` is to avoid the warning.
+---
+ llvm_mode/afl-clang-fast.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
+index 0d1e76b..3bc0daa 100644
+--- a/llvm_mode/afl-clang-fast.c
++++ b/llvm_mode/afl-clang-fast.c
+@@ -321,7 +321,7 @@ static void edit_params(u32 argc, char** argv) {
+ */
+
+ cc_params[cc_par_cnt++] = "-D__AFL_LOOP(_A)="
+- "({ static volatile char *_B __attribute__((used)); "
++ "({ static volatile char *_B __attribute__((used,unused)); "
+ " _B = (char*)\"" PERSIST_SIG "\"; "
+ #ifdef __APPLE__
+ "__attribute__((visibility(\"default\"))) "
+@@ -333,7 +333,7 @@ static void edit_params(u32 argc, char** argv) {
+ "_L(_A); })";
+
+ cc_params[cc_par_cnt++] = "-D__AFL_INIT()="
+- "do { static volatile char *_A __attribute__((used)); "
++ "do { static volatile char *_A __attribute__((used,unused)); "
+ " _A = (char*)\"" DEFER_SIG "\"; "
+ #ifdef __APPLE__
+ "__attribute__((visibility(\"default\"))) "
+--
+2.37.1
+
+From 0884906de0cdd007b28b15aae35cee484d1bc31d Mon Sep 17 00:00:00 2001
+From: Mike Hommey <mh@glandium.org>
+Date: Tue, 6 Sep 2022 11:08:55 +0900
+Subject: [PATCH] Fix build failures with clang 15
+
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 5e800db..c875f2d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -50,7 +50,7 @@ ifndef AFL_NO_X86
+
+ test_x86:
+ @echo "[*] Checking for the ability to compile x86 code..."
+- @echo 'main() { __asm__("xorb %al, %al"); }' | $(CC) -w -x c - -o .test || ( echo; echo "Oops, looks like your compiler can't generate x86 code."; echo; echo "Don't panic! You can use the LLVM or QEMU mode, but see docs/INSTALL first."; echo "(To ignore this error, set AFL_NO_X86=1 and try again.)"; echo; exit 1 )
++ @echo 'int main() { __asm__("xorb %al, %al"); }' | $(CC) -w -x c - -o .test || ( echo; echo "Oops, looks like your compiler can't generate x86 code."; echo; echo "Don't panic! You can use the LLVM or QEMU mode, but see docs/INSTALL first."; echo "(To ignore this error, set AFL_NO_X86=1 and try again.)"; echo; exit 1 )
+ @rm -f .test
+ @echo "[+] Everything seems to be working, ready to compile."
+
+--
+2.37.1.1.g659da70093
+
+From 0544d02715a26a032f109984d5f70360b80f3875 Mon Sep 17 00:00:00 2001
+From: Mike Hommey <mh@glandium.org>
+Date: Wed, 14 Dec 2022 16:25:53 +0900
+Subject: [PATCH] Add missing include
+
+---
+ llvm_mode/afl-llvm-pass.so.cc | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc
+index 1483943..0a7c37a 100644
+--- a/llvm_mode/afl-llvm-pass.so.cc
++++ b/llvm_mode/afl-llvm-pass.so.cc
+@@ -39,6 +39,7 @@
+ #include <unistd.h>
+
+ #include "llvm/Pass.h"
++#include "llvm/ADT/None.h"
+ #include "llvm/ADT/Statistic.h"
+ #include "llvm/IR/IRBuilder.h"
+ #include "llvm/IR/Module.h"
+--
+2.38.1.1.g6d9df9d320
+
diff --git a/taskcluster/scripts/misc/android-gradle-dependencies-lite.sh b/taskcluster/scripts/misc/android-gradle-dependencies-lite.sh
new file mode 100755
index 0000000000..bf91d64709
--- /dev/null
+++ b/taskcluster/scripts/misc/android-gradle-dependencies-lite.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+set -v
+
+cd $GECKO_PATH
+
+# Needed for Nexus
+export PATH=$MOZ_FETCHES_DIR/jdk-8/bin:$PATH
+
+. taskcluster/scripts/misc/android-gradle-dependencies/before.sh
+
+export MOZCONFIG=mobile/android/config/mozconfigs/android-arm-gradle-dependencies/nightly-lite
+./mach build
+./mach gradle downloadDependencies
+./mach android gradle-dependencies
+
+. taskcluster/scripts/misc/android-gradle-dependencies/after.sh
diff --git a/taskcluster/scripts/misc/android-gradle-dependencies.sh b/taskcluster/scripts/misc/android-gradle-dependencies.sh
new file mode 100755
index 0000000000..2624dc961a
--- /dev/null
+++ b/taskcluster/scripts/misc/android-gradle-dependencies.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+set -v
+
+cd $GECKO_PATH
+
+# Nexus needs Java 8
+export PATH=$MOZ_FETCHES_DIR/jdk-8/bin:$PATH
+
+. taskcluster/scripts/misc/android-gradle-dependencies/before.sh
+
+export MOZCONFIG=mobile/android/config/mozconfigs/android-arm-gradle-dependencies/nightly
+./mach build
+./mach gradle downloadDependencies
+./mach android gradle-dependencies
+
+. taskcluster/scripts/misc/android-gradle-dependencies/after.sh
diff --git a/taskcluster/scripts/misc/android-gradle-dependencies/after.sh b/taskcluster/scripts/misc/android-gradle-dependencies/after.sh
new file mode 100755
index 0000000000..446f40db12
--- /dev/null
+++ b/taskcluster/scripts/misc/android-gradle-dependencies/after.sh
@@ -0,0 +1,32 @@
+#!/bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+: WORKSPACE ${WORKSPACE:=/builds/worker/workspace}
+
+set -v
+
+# Package everything up.
+pushd $WORKSPACE
+mkdir -p android-gradle-dependencies /builds/worker/artifacts
+
+# NEXUS_WORK is exported by `before.sh`.
+cp -R ${NEXUS_WORK}/storage/mozilla android-gradle-dependencies
+cp -R ${NEXUS_WORK}/storage/central android-gradle-dependencies
+cp -R ${NEXUS_WORK}/storage/google android-gradle-dependencies
+cp -R ${NEXUS_WORK}/storage/gradle-plugins android-gradle-dependencies
+
+# The Gradle wrapper will have downloaded and verified the hash of exactly one
+# Gradle distribution. It will be located in $GRADLE_USER_HOME, like
+# ~/.gradle/wrapper/dists/gradle-2.7-all/$PROJECT_HASH/gradle-2.7-all.zip. We
+# want to remove the version from the internal directory for use via tooltool in
+# a mozconfig.
+cp ${GRADLE_USER_HOME}/wrapper/dists/gradle-*-*/*/gradle-*-*.zip gradle.zip
+unzip -q gradle.zip
+mv gradle-* android-gradle-dependencies/gradle-dist
+
+tar cavf /builds/worker/artifacts/android-gradle-dependencies.tar.zst android-gradle-dependencies
+
+popd
diff --git a/taskcluster/scripts/misc/android-gradle-dependencies/before.sh b/taskcluster/scripts/misc/android-gradle-dependencies/before.sh
new file mode 100755
index 0000000000..7150731d73
--- /dev/null
+++ b/taskcluster/scripts/misc/android-gradle-dependencies/before.sh
@@ -0,0 +1,30 @@
+#!/bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+: WORKSPACE ${WORKSPACE:=/builds/worker/workspace}
+
+set -v
+
+# Export NEXUS_WORK so that `after.sh` can use it.
+export NEXUS_WORK=/builds/worker/workspace/sonatype-nexus-work
+mkdir -p ${NEXUS_WORK}/conf
+cp /builds/worker/workspace/build/src/taskcluster/scripts/misc/android-gradle-dependencies/nexus.xml ${NEXUS_WORK}/conf/nexus.xml
+
+RUN_AS_USER=worker $MOZ_FETCHES_DIR/sonatype-nexus/bin/nexus restart
+
+# Wait "a while" for Nexus to actually start. Don't fail if this fails.
+wget --quiet --retry-connrefused --waitretry=2 --tries=100 \
+ http://localhost:8081/nexus/service/local/status || true
+rm -rf status
+
+# It's helpful when debugging to see the "latest state".
+curl http://localhost:8081/nexus/service/local/status || true
+
+# Verify Nexus has actually started. Fail if this fails.
+curl --fail --silent --location http://localhost:8081/nexus/service/local/status | grep '<state>STARTED</state>'
+
+# It's helpful when debugging to see the repository configurations.
+curl http://localhost:8081/nexus/service/local/repositories || true
diff --git a/taskcluster/scripts/misc/android-gradle-dependencies/nexus.xml b/taskcluster/scripts/misc/android-gradle-dependencies/nexus.xml
new file mode 100644
index 0000000000..e3e37373d8
--- /dev/null
+++ b/taskcluster/scripts/misc/android-gradle-dependencies/nexus.xml
@@ -0,0 +1,413 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this
+ - file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
+
+<nexusConfiguration>
+ <version>2.8.0</version>
+ <nexusVersion>2.12.0-01</nexusVersion>
+ <globalConnectionSettings>
+ <connectionTimeout>20000</connectionTimeout>
+ <retrievalRetryCount>3</retrievalRetryCount>
+ <queryString></queryString>
+ </globalConnectionSettings>
+ <restApi>
+ <uiTimeout>60000</uiTimeout>
+ </restApi>
+ <httpProxy>
+ <enabled>true</enabled>
+ <port>8082</port>
+ <proxyPolicy>strict</proxyPolicy>
+ </httpProxy>
+ <routing>
+ <resolveLinks>true</resolveLinks>
+ </routing>
+ <repositories>
+ <repository>
+ <id>mozilla</id>
+ <name>Mozilla Maven</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheActive>true</notFoundCacheActive>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <remoteStorage>
+ <url>https://maven.mozilla.org/maven2/</url>
+ </remoteStorage>
+ <externalConfiguration>
+ <repositoryPolicy>RELEASE</repositoryPolicy>
+ <checksumPolicy>STRICT</checksumPolicy>
+ <fileTypeValidation>true</fileTypeValidation>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <artifactMaxAge>-1</artifactMaxAge>
+ <metadataMaxAge>1440</metadataMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <autoBlockActive>true</autoBlockActive>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>gradle-plugins</id>
+ <name>Gradle Plugins</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheActive>true</notFoundCacheActive>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <remoteStorage>
+ <url>https://plugins.gradle.org/m2/</url>
+ </remoteStorage>
+ <externalConfiguration>
+ <repositoryPolicy>RELEASE</repositoryPolicy>
+ <checksumPolicy>STRICT</checksumPolicy>
+ <fileTypeValidation>true</fileTypeValidation>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <artifactMaxAge>-1</artifactMaxAge>
+ <metadataMaxAge>1440</metadataMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <autoBlockActive>true</autoBlockActive>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>google</id>
+ <name>google</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheActive>true</notFoundCacheActive>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <remoteStorage>
+ <url>https://maven.google.com/</url>
+ </remoteStorage>
+ <externalConfiguration>
+ <repositoryPolicy>RELEASE</repositoryPolicy>
+ <!-- Google doesn't publish checksums. Why, Google, why? -->
+ <checksumPolicy>STRICT_IF_EXISTS</checksumPolicy>
+ <fileTypeValidation>true</fileTypeValidation>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <artifactMaxAge>-1</artifactMaxAge>
+ <metadataMaxAge>1440</metadataMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <autoBlockActive>true</autoBlockActive>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>central</id>
+ <name>Central</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheActive>true</notFoundCacheActive>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <remoteStorage>
+ <url>https://repo1.maven.org/maven2/</url>
+ </remoteStorage>
+ <externalConfiguration>
+ <proxyMode>ALLOW</proxyMode>
+ <artifactMaxAge>-1</artifactMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <cleanseRepositoryMetadata>false</cleanseRepositoryMetadata>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <checksumPolicy>WARN</checksumPolicy>
+ <repositoryPolicy>RELEASE</repositoryPolicy>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>apache-snapshots</id>
+ <name>Apache Snapshots</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheActive>true</notFoundCacheActive>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <remoteStorage>
+ <url>https://repository.apache.org/snapshots/</url>
+ </remoteStorage>
+ <externalConfiguration>
+ <proxyMode>ALLOW</proxyMode>
+ <artifactMaxAge>1440</artifactMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <cleanseRepositoryMetadata>false</cleanseRepositoryMetadata>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <checksumPolicy>WARN</checksumPolicy>
+ <repositoryPolicy>SNAPSHOT</repositoryPolicy>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>releases</id>
+ <name>Releases</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>ALLOW_WRITE_ONCE</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <externalConfiguration>
+ <proxyMode>ALLOW</proxyMode>
+ <artifactMaxAge>-1</artifactMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <cleanseRepositoryMetadata>false</cleanseRepositoryMetadata>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <checksumPolicy>WARN</checksumPolicy>
+ <repositoryPolicy>RELEASE</repositoryPolicy>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>snapshots</id>
+ <name>Snapshots</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>ALLOW_WRITE</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <externalConfiguration>
+ <proxyMode>ALLOW</proxyMode>
+ <artifactMaxAge>1440</artifactMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <cleanseRepositoryMetadata>false</cleanseRepositoryMetadata>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <checksumPolicy>WARN</checksumPolicy>
+ <repositoryPolicy>SNAPSHOT</repositoryPolicy>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>thirdparty</id>
+ <name>3rd party</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.Repository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheTTL>1440</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>ALLOW_WRITE_ONCE</writePolicy>
+ <indexable>true</indexable>
+ <searchable>true</searchable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <externalConfiguration>
+ <proxyMode>ALLOW</proxyMode>
+ <artifactMaxAge>-1</artifactMaxAge>
+ <itemMaxAge>1440</itemMaxAge>
+ <cleanseRepositoryMetadata>false</cleanseRepositoryMetadata>
+ <downloadRemoteIndex>false</downloadRemoteIndex>
+ <checksumPolicy>WARN</checksumPolicy>
+ <repositoryPolicy>RELEASE</repositoryPolicy>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>central-m1</id>
+ <name>Central M1 shadow</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.ShadowRepository</providerRole>
+ <providerHint>m2-m1-shadow</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheTTL>15</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <externalConfiguration>
+ <masterRepositoryId>central</masterRepositoryId>
+ <syncAtStartup>false</syncAtStartup>
+ </externalConfiguration>
+ </repository>
+ <repository>
+ <id>public</id>
+ <name>Public Repositories</name>
+ <providerRole>org.sonatype.nexus.proxy.repository.GroupRepository</providerRole>
+ <providerHint>maven2</providerHint>
+ <localStatus>IN_SERVICE</localStatus>
+ <notFoundCacheTTL>15</notFoundCacheTTL>
+ <userManaged>true</userManaged>
+ <exposed>true</exposed>
+ <browseable>true</browseable>
+ <writePolicy>READ_ONLY</writePolicy>
+ <indexable>true</indexable>
+ <localStorage>
+ <provider>file</provider>
+ </localStorage>
+ <externalConfiguration>
+ <mergeMetadata>true</mergeMetadata>
+ <memberRepositories>
+ <memberRepository>releases</memberRepository>
+ <memberRepository>snapshots</memberRepository>
+ <memberRepository>thirdparty</memberRepository>
+ <memberRepository>central</memberRepository>
+ </memberRepositories>
+ </externalConfiguration>
+ </repository>
+ </repositories>
+ <repositoryGrouping>
+ <pathMappings>
+ <pathMapping>
+ <id>inhouse-stuff</id>
+ <groupId>*</groupId>
+ <routeType>inclusive</routeType>
+ <routePatterns>
+ <routePattern>^/(com|org)/somecompany/.*</routePattern>
+ </routePatterns>
+ <repositories>
+ <repository>snapshots</repository>
+ <repository>releases</repository>
+ </repositories>
+ </pathMapping>
+ <pathMapping>
+ <id>apache-stuff</id>
+ <groupId>*</groupId>
+ <routeType>exclusive</routeType>
+ <routePatterns>
+ <routePattern>^/org/some-oss/.*</routePattern>
+ </routePatterns>
+ <repositories>
+ <repository>releases</repository>
+ <repository>snapshots</repository>
+ </repositories>
+ </pathMapping>
+ </pathMappings>
+ </repositoryGrouping>
+ <repositoryTargets>
+ <repositoryTarget>
+ <id>1</id>
+ <name>All (Maven2)</name>
+ <contentClass>maven2</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>2</id>
+ <name>All (Maven1)</name>
+ <contentClass>maven1</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>3</id>
+ <name>All but sources (Maven2)</name>
+ <contentClass>maven2</contentClass>
+ <patterns>
+ <pattern>(?!.*-sources.*).*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>4</id>
+ <name>All Metadata (Maven2)</name>
+ <contentClass>maven2</contentClass>
+ <patterns>
+ <pattern>.*maven-metadata\.xml.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>any</id>
+ <name>All (Any Repository)</name>
+ <contentClass>any</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>site</id>
+ <name>All (site)</name>
+ <contentClass>site</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>npm</id>
+ <name>All (npm)</name>
+ <contentClass>npm</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>nuget</id>
+ <name>All (nuget)</name>
+ <contentClass>nuget</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ <repositoryTarget>
+ <id>rubygems</id>
+ <name>All (rubygems)</name>
+ <contentClass>rubygems</contentClass>
+ <patterns>
+ <pattern>.*</pattern>
+ </patterns>
+ </repositoryTarget>
+ </repositoryTargets>
+ <smtpConfiguration>
+ <hostname>smtp-host</hostname>
+ <port>25</port>
+ <username>smtp-username</username>
+ <password>{jyU2gDFaNz8HQ4ybBAIdtJ6KL+YB08GXQs7vLPnia3o=}</password>
+ <systemEmailAddress>system@nexus.org</systemEmailAddress>
+ </smtpConfiguration>
+ <notification />
+</nexusConfiguration>
diff --git a/taskcluster/scripts/misc/are-we-esmified-yet.py b/taskcluster/scripts/misc/are-we-esmified-yet.py
new file mode 100644
index 0000000000..c16658baee
--- /dev/null
+++ b/taskcluster/scripts/misc/are-we-esmified-yet.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python3
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import pathlib
+import re
+import subprocess
+import sys
+
+TBPL_FAILURE = 2
+
+excluded_files = [
+ # Testcase for loader.
+ "js/xpconnect/tests/chrome/file_expandosharing.jsm",
+ "js/xpconnect/tests/unit/environment_script.js",
+ "js/xpconnect/tests/unit/bogus_element_type.jsm",
+ "js/xpconnect/tests/unit/bogus_exports_type.jsm",
+ "js/xpconnect/tests/unit/envChain.jsm",
+ "js/xpconnect/tests/unit/envChain_subscript.jsm",
+ "js/xpconnect/tests/unit/environment_checkscript.jsm",
+ "js/xpconnect/tests/unit/environment_loadscript.jsm",
+ "js/xpconnect/tests/unit/import_stack.jsm",
+ "js/xpconnect/tests/unit/importer.jsm",
+ "js/xpconnect/tests/unit/jsm_loaded-1.jsm",
+ "js/xpconnect/tests/unit/jsm_loaded-2.jsm",
+ "js/xpconnect/tests/unit/jsm_loaded-3.jsm",
+ "js/xpconnect/tests/unit/not-esmified-not-exported.jsm",
+ "js/xpconnect/tests/unit/recursive_importA.jsm",
+ "js/xpconnect/tests/unit/recursive_importB.jsm",
+ "js/xpconnect/tests/unit/ReturnCodeChild.jsm",
+ "js/xpconnect/tests/unit/syntax_error.jsm",
+ "js/xpconnect/tests/unit/TestBlob.jsm",
+ "js/xpconnect/tests/unit/TestFile.jsm",
+ "js/xpconnect/tests/unit/uninitialized_lexical.jsm",
+ "dom/url/tests/file_url.jsm",
+ "dom/url/tests/file_worker_url.jsm",
+ "dom/url/tests/test_bug883784.jsm",
+ "dom/workers/test/WorkerTest.jsm",
+ "dom/encoding/test/file_stringencoding.jsm",
+ "remote/shared/messagehandler/test/browser/resources/modules/root/invalid.jsm",
+ "toolkit/actors/TestProcessActorChild.jsm",
+ "toolkit/actors/TestProcessActorParent.jsm",
+ "toolkit/actors/TestWindowChild.jsm",
+ "toolkit/actors/TestWindowParent.jsm",
+ # Testcase for build system.
+ "python/mozbuild/mozbuild/test/backend/data/build/bar.jsm",
+ "python/mozbuild/mozbuild/test/backend/data/build/baz.jsm",
+ "python/mozbuild/mozbuild/test/backend/data/build/foo.jsm",
+ "python/mozbuild/mozbuild/test/backend/data/build/qux.jsm",
+ # Testcase for test harness.
+ "testing/mochitest/tests/Harness_sanity/ImportTesting.jsm",
+ # EXPORTED_SYMBOLS inside testcase.
+ "tools/lint/eslint/eslint-plugin-mozilla/tests/mark-exported-symbols-as-used.js",
+]
+
+if pathlib.Path(".hg").exists():
+ mode = "hg"
+elif pathlib.Path(".git").exists():
+ mode = "git"
+else:
+ print(
+ "Error: This script needs to be run inside mozilla-central checkout "
+ "of either mercurial or git.",
+ file=sys.stderr,
+ )
+ sys.exit(TBPL_FAILURE)
+
+
+def new_files_struct():
+ return {
+ "jsm": [],
+ "esm": [],
+ "subdir": {},
+ }
+
+
+def put_file(files, kind, path):
+ """Put a path into files tree structure."""
+
+ if str(path) in excluded_files:
+ return
+
+ name = path.name
+
+ current_files = files
+ for part in path.parent.parts:
+ if part not in current_files["subdir"]:
+ current_files["subdir"][part] = new_files_struct()
+ current_files = current_files["subdir"][part]
+
+ current_files[kind].append(name)
+
+
+def run(cmd):
+ """Run command and return output as lines, excluding empty line."""
+ lines = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode()
+ return filter(lambda x: x != "", lines.split("\n"))
+
+
+def collect_jsm(files):
+ """Collect JSM files."""
+ kind = "jsm"
+
+ # jsm files
+ if mode == "hg":
+ cmd = ["hg", "files", "set:glob:**/*.jsm"]
+ else:
+ cmd = ["git", "ls-files", "*.jsm"]
+ for line in run(cmd):
+ put_file(files, kind, pathlib.Path(line))
+
+ # js files with EXPORTED_SYMBOLS
+ if mode == "hg":
+ cmd = ["hg", "files", "set:grep('EXPORTED_SYMBOLS = \[') and glob:**/*.js"]
+ for line in run(cmd):
+ put_file(files, kind, pathlib.Path(line))
+ else:
+ handled = {}
+ cmd = ["git", "grep", "EXPORTED_SYMBOLS = \[", "*.js"]
+ for line in run(cmd):
+ m = re.search("^([^:]+):", line)
+ if not m:
+ continue
+ path = m.group(1)
+ if path in handled:
+ continue
+ handled[path] = True
+ put_file(files, kind, pathlib.Path(path))
+
+
+def collect_esm(files):
+ """Collect system ESM files."""
+ kind = "esm"
+
+ # sys.mjs files
+ if mode == "hg":
+ cmd = ["hg", "files", "set:glob:**/*.sys.mjs"]
+ else:
+ cmd = ["git", "ls-files", "*.sys.mjs"]
+ for line in run(cmd):
+ put_file(files, kind, pathlib.Path(line))
+
+
+def to_stat(files):
+ """Convert files tree into status tree."""
+ jsm = len(files["jsm"])
+ esm = len(files["esm"])
+ subdir = {}
+
+ for key, sub_files in files["subdir"].items():
+ sub_stat = to_stat(sub_files)
+
+ subdir[key] = sub_stat
+ jsm += sub_stat["jsm"]
+ esm += sub_stat["esm"]
+
+ stat = {
+ "jsm": jsm,
+ "esm": esm,
+ }
+ if len(subdir):
+ stat["subdir"] = subdir
+
+ return stat
+
+
+if mode == "hg":
+ cmd = ["hg", "parent", "--template", "{node}"]
+ commit_hash = list(run(cmd))[0]
+
+ cmd = ["hg", "parent", "--template", "{date|shortdate}"]
+ date = list(run(cmd))[0]
+else:
+ cmd = ["git", "log", "-1", "--pretty=%H"]
+ git_hash = list(run(cmd))[0]
+ cmd = ["git", "cinnabar", "git2hg", git_hash]
+ commit_hash = list(run(cmd))[0]
+
+ cmd = ["git", "log", "-1", "--pretty=%cs"]
+ date = list(run(cmd))[0]
+
+files = new_files_struct()
+collect_jsm(files)
+collect_esm(files)
+
+stat = to_stat(files)
+stat["hash"] = commit_hash
+stat["date"] = date
+
+print(json.dumps(stat, indent=2))
diff --git a/taskcluster/scripts/misc/browsertime.sh b/taskcluster/scripts/misc/browsertime.sh
new file mode 100755
index 0000000000..27ef83c095
--- /dev/null
+++ b/taskcluster/scripts/misc/browsertime.sh
@@ -0,0 +1,19 @@
+#!/bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+set -v
+
+cd $GECKO_PATH
+
+export PATH=$PATH:$MOZ_FETCHES_DIR/node/bin
+
+./mach browsertime --setup
+
+# We have tools/browsertime/{package.json,node_modules,...} and want
+# browsertime/{package.json,node_modules}.
+mkdir -p /builds/worker/artifacts
+cd tools
+tar caf /builds/worker/artifacts/browsertime.tar.zst browsertime
diff --git a/taskcluster/scripts/misc/build-afl.sh b/taskcluster/scripts/misc/build-afl.sh
new file mode 100755
index 0000000000..371000aa2a
--- /dev/null
+++ b/taskcluster/scripts/misc/build-afl.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+set -e -x
+
+artifact=$(basename "$TOOLCHAIN_ARTIFACT")
+dir="${artifact%.tar.*}"
+scripts="$(realpath "${0%/*}")"
+
+cd "$MOZ_FETCHES_DIR/AFL"
+patch -p1 -i "$scripts/afl-nyx.patch"
+make afl-showmap \
+ CC="$MOZ_FETCHES_DIR/clang/bin/clang"
+# -O3 -funroll-loops as per llvm_mode/Makefile
+CFLAGS="-O3 -funroll-loops --sysroot $MOZ_FETCHES_DIR/sysroot" \
+CXXFLAGS="-O3 -funroll-loops --sysroot $MOZ_FETCHES_DIR/sysroot" \
+make -C llvm_mode install \
+ DESTDIR="../$dir" \
+ PREFIX=/ \
+ LLVM_CONFIG="$MOZ_FETCHES_DIR/clang/bin/llvm-config"
+
+tar caf "$artifact" "$dir"
+
+mkdir -p "$UPLOAD_DIR"
+mv "$artifact" "$UPLOAD_DIR"
diff --git a/taskcluster/scripts/misc/build-binutils-linux.sh b/taskcluster/scripts/misc/build-binutils-linux.sh
new file mode 100755
index 0000000000..b4e1d8bf6d
--- /dev/null
+++ b/taskcluster/scripts/misc/build-binutils-linux.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building binutils for Linux.
+
+cd $GECKO_PATH
+
+PATH=$MOZ_FETCHES_DIR/gcc/bin:$PATH
+
+build/unix/build-binutils/build-binutils.sh $MOZ_FETCHES_DIR
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp $MOZ_FETCHES_DIR/binutils.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-breakpad-injector.sh b/taskcluster/scripts/misc/build-breakpad-injector.sh
new file mode 100755
index 0000000000..ecd53e13d1
--- /dev/null
+++ b/taskcluster/scripts/misc/build-breakpad-injector.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building libbreakpadinjector.so, currently for linux only
+
+COMPRESS_EXT=zst
+
+cd $GECKO_PATH
+
+export MOZ_OBJDIR=obj-injector
+
+echo ac_add_options --enable-project=tools/crashreporter/injector > .mozconfig
+echo ac_add_options --enable-linker=lld >> .mozconfig
+
+INJECTOR=libbreakpadinjector.so
+
+TOOLCHAINS="rustc clang"
+
+for t in $TOOLCHAINS; do
+ PATH="$MOZ_FETCHES_DIR/$t/bin:$PATH"
+done
+
+./mach build -v
+
+mkdir injector
+cp $MOZ_OBJDIR/dist/bin/$INJECTOR injector/
+
+tar -acf injector.tar.$COMPRESS_EXT injector/
+mkdir -p $UPLOAD_DIR
+cp injector.tar.$COMPRESS_EXT $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-cctools-port.sh b/taskcluster/scripts/misc/build-cctools-port.sh
new file mode 100755
index 0000000000..d0ea6b78f4
--- /dev/null
+++ b/taskcluster/scripts/misc/build-cctools-port.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+# cctools sometimes needs to be rebuilt when clang is modified.
+# Until bug 1471905 is addressed, increase the following number
+# when a forced rebuild of cctools is necessary: 1
+
+set -x -e -v
+
+# This script is for building cctools (Apple's binutils) for Linux using
+# cctools-port (https://github.com/tpoechtrager/cctools-port).
+WORKSPACE=$HOME/workspace
+
+# Set some crosstools-port and libtapi directories
+CROSSTOOLS_SOURCE_DIR=$MOZ_FETCHES_DIR/cctools-port
+CROSSTOOLS_CCTOOLS_DIR=$CROSSTOOLS_SOURCE_DIR/cctools
+CROSSTOOLS_BUILD_DIR=$WORKSPACE/cctools
+LIBTAPI_SOURCE_DIR=$MOZ_FETCHES_DIR/apple-libtapi
+LIBTAPI_BUILD_DIR=$WORKSPACE/libtapi-build
+LDID_SOURCE_DIR=$MOZ_FETCHES_DIR/ldid
+CLANG_DIR=$MOZ_FETCHES_DIR/clang
+
+# Create our directories
+mkdir -p $CROSSTOOLS_BUILD_DIR $LIBTAPI_BUILD_DIR
+
+cd $GECKO_PATH
+
+# Common setup for libtapi and cctools
+export CC=$CLANG_DIR/bin/clang
+export CXX=$CLANG_DIR/bin/clang++
+# We also need this LD_LIBRARY_PATH at build time, since tapi builds bits of
+# clang build tools, and then executes those tools.
+export LD_LIBRARY_PATH=$CLANG_DIR/lib
+
+# Build libtapi; the included build.sh is not sufficient for our purposes.
+cd $LIBTAPI_BUILD_DIR
+
+# Values taken from build.sh
+TAPI_REPOSITORY=tapi-1000.10.8
+TAPI_VERSION=10.0.0
+
+INCLUDE_FIX="-I $LIBTAPI_SOURCE_DIR/src/llvm/projects/clang/include -I $PWD/projects/clang/include"
+
+cmake $LIBTAPI_SOURCE_DIR/src/llvm \
+ -GNinja \
+ -DCMAKE_CXX_FLAGS="$INCLUDE_FIX" \
+ -DLLVM_INCLUDE_TESTS=OFF \
+ -DCMAKE_BUILD_TYPE=RELEASE \
+ -DCMAKE_INSTALL_PREFIX=$CROSSTOOLS_BUILD_DIR \
+ -DCMAKE_SYSROOT=$MOZ_FETCHES_DIR/sysroot \
+ -DCMAKE_EXE_LINKER_FLAGS=-fuse-ld=lld \
+ -DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=lld \
+ -DLLVM_TARGETS_TO_BUILD="X86;ARM;AArch64" \
+ -DTAPI_REPOSITORY_STRING=$TAPI_REPOSITORY \
+ -DTAPI_FULL_VERSION=$TAPI_VERSION
+
+ninja clangBasic -v
+ninja libtapi install-libtapi install-tapi-headers -v
+
+# Setup LDFLAGS late so run-at-build-time tools in the basic clang build don't
+# pick up the possibly-incompatible libstdc++ from clang.
+# Also set it up such that loading libtapi doesn't require a LD_LIBRARY_PATH.
+# (this requires two dollars and extra backslashing because it's used verbatim
+# via a Makefile)
+export LDFLAGS="-fuse-ld=lld -lpthread -Wl,-rpath-link,$MOZ_FETCHES_DIR/sysroot/lib/x86_64-linux-gnu -Wl,-rpath-link,$MOZ_FETCHES_DIR/sysroot/usr/lib/x86_64-linux-gnu -Wl,-rpath,\\\$\$ORIGIN/../lib,-rpath,\\\$\$ORIGIN/../../clang/lib"
+
+export CC="$CC --sysroot=$MOZ_FETCHES_DIR/sysroot"
+export CXX="$CXX --sysroot=$MOZ_FETCHES_DIR/sysroot"
+
+# Configure crosstools-port
+cd $CROSSTOOLS_CCTOOLS_DIR
+# Force re-libtoolization to overwrite files with the new libtool bits.
+perl -pi -e 's/(LIBTOOLIZE -c)/\1 -f/' autogen.sh
+./autogen.sh
+./configure \
+ --prefix=$CROSSTOOLS_BUILD_DIR \
+ --target=x86_64-apple-darwin \
+ --with-llvm-config=$CLANG_DIR/bin/llvm-config \
+ --enable-lto-support \
+ --enable-tapi-support \
+ --with-libtapi=$CROSSTOOLS_BUILD_DIR
+
+# Build cctools
+make -j `nproc --all` install
+
+# Build ldid
+cd $LDID_SOURCE_DIR
+# The crypto library in the sysroot cannot be linked in a PIE executable so we use -no-pie
+make -j `nproc --all` install INSTALLPREFIX=$CROSSTOOLS_BUILD_DIR LDFLAGS="-no-pie -Wl,-Bstatic -lcrypto -Wl,-Bdynamic -ldl -pthread"
+
+strip $CROSSTOOLS_BUILD_DIR/bin/*
+# various build scripts based on cmake want to find `lipo` without a prefix
+cp $CROSSTOOLS_BUILD_DIR/bin/x86_64-apple-darwin-lipo $CROSSTOOLS_BUILD_DIR/bin/lipo
+
+(cd $CROSSTOOLS_BUILD_DIR/bin/; for i in x86_64-apple-darwin-*; do
+ ln $i aarch64${i#x86_64}
+done)
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+tar caf $UPLOAD_DIR/cctools.tar.zst -C $CROSSTOOLS_BUILD_DIR/.. `basename $CROSSTOOLS_BUILD_DIR`
diff --git a/taskcluster/scripts/misc/build-clang-mingw.sh b/taskcluster/scripts/misc/build-clang-mingw.sh
new file mode 100755
index 0000000000..2e8543b38e
--- /dev/null
+++ b/taskcluster/scripts/misc/build-clang-mingw.sh
@@ -0,0 +1,254 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building a mingw-clang toolchain for use on Linux.
+
+if [[ $# -eq 0 ]]; then
+ echo "Provide either x86 or x64 to specify a toolchain."
+ exit 1;
+elif [ "$1" == "x86" ]; then
+ machine="i686"
+ compiler_rt_machine="i386"
+ crt_flags="--enable-lib32 --disable-lib64"
+ WRAPPER_FLAGS=""
+elif [ "$1" == "x64" ]; then
+ machine="x86_64"
+ compiler_rt_machine="x86_64"
+ crt_flags="--disable-lib32 --enable-lib64"
+ WRAPPER_FLAGS=""
+else
+ echo "Provide either x86 or x64 to specify a toolchain."
+ exit 1;
+fi
+
+TOOLCHAIN_DIR=$MOZ_FETCHES_DIR/llvm-project
+INSTALL_DIR=$MOZ_FETCHES_DIR/clang
+CROSS_PREFIX_DIR=$INSTALL_DIR/$machine-w64-mingw32
+
+make_flags="-j$(nproc)"
+
+if [ -d "$MOZ_FETCHES_DIR/binutils/bin" ]; then
+ export PATH="$MOZ_FETCHES_DIR/binutils/bin:$PATH"
+fi
+
+# This is default value of _WIN32_WINNT. Gecko configure script explicitly sets this,
+# so this is not used to build Gecko itself. We default to 0x601, which is Windows 7.
+default_win32_winnt=0x601
+
+cd $GECKO_PATH
+
+patch_file2="$(pwd)/taskcluster/scripts/misc/mingw-dwrite_3.patch"
+patch_file3="$(pwd)/taskcluster/scripts/misc/mingw-unknown.patch"
+patch_file4="$(pwd)/taskcluster/scripts/misc/mingw-enum.patch"
+patch_file5="$(pwd)/taskcluster/scripts/misc/mingw-widl.patch"
+patch_file6="$(pwd)/taskcluster/scripts/misc/mingw-dispatchqueue.patch"
+patch_file10="$(pwd)/taskcluster/scripts/misc/mingw-ts_sd.patch"
+patch_file11="$(pwd)/taskcluster/scripts/misc/mingw-composition.patch"
+
+prepare() {
+ pushd $MOZ_FETCHES_DIR/mingw-w64
+ patch -p1 <$patch_file2
+ patch -p1 <$patch_file3
+ patch -p1 <$patch_file4
+ patch -p1 <$patch_file5
+ patch -p1 <$patch_file6
+ patch -p1 <$patch_file10
+ patch -p1 <$patch_file11
+ popd
+}
+
+install_wrappers() {
+ pushd $INSTALL_DIR/bin
+
+ compiler_flags="--sysroot \$DIR/../$machine-w64-mingw32 -rtlib=compiler-rt -stdlib=libc++ -fuse-ld=lld $WRAPPER_FLAGS -fuse-cxa-atexit -Qunused-arguments"
+
+ cat <<EOF >$machine-w64-mingw32-clang
+#!/bin/sh
+DIR="\$(cd "\$(dirname "\$0")" && pwd)"
+\$DIR/clang -target $machine-w64-mingw32 $compiler_flags "\$@"
+EOF
+ chmod +x $machine-w64-mingw32-clang
+
+ cat <<EOF >$machine-w64-mingw32-clang++
+#!/bin/sh
+DIR="\$(cd "\$(dirname "\$0")" && pwd)"
+\$DIR/clang -target $machine-w64-mingw32 --driver-mode=g++ $compiler_flags "\$@"
+EOF
+ chmod +x $machine-w64-mingw32-clang++
+
+ CC="$machine-w64-mingw32-clang"
+ CXX="$machine-w64-mingw32-clang++"
+
+ popd
+}
+
+build_mingw() {
+ mkdir mingw-w64-headers
+ pushd mingw-w64-headers
+ $MOZ_FETCHES_DIR/mingw-w64/mingw-w64-headers/configure \
+ --host=$machine-w64-mingw32 \
+ --enable-sdk=all \
+ --enable-idl \
+ --with-default-msvcrt=ucrt \
+ --with-default-win32-winnt=$default_win32_winnt \
+ --prefix=$CROSS_PREFIX_DIR
+ make $make_flags install
+ popd
+
+ mkdir mingw-w64-crt
+ pushd mingw-w64-crt
+ $MOZ_FETCHES_DIR/mingw-w64/mingw-w64-crt/configure \
+ --host=$machine-w64-mingw32 \
+ $crt_flags \
+ --with-default-msvcrt=ucrt \
+ CC="$CC" \
+ AR=llvm-ar \
+ RANLIB=llvm-ranlib \
+ DLLTOOL=llvm-dlltool \
+ --prefix=$CROSS_PREFIX_DIR
+ make $make_flags
+ make $make_flags install
+ popd
+
+ mkdir widl
+ pushd widl
+ $MOZ_FETCHES_DIR/mingw-w64/mingw-w64-tools/widl/configure --target=$machine-w64-mingw32 --prefix=$INSTALL_DIR
+ make $make_flags
+ make $make_flags install
+ popd
+}
+
+build_compiler_rt() {
+ CLANG_VERSION=$(basename $(dirname $(dirname $(dirname $($CC --print-libgcc-file-name -rtlib=compiler-rt)))))
+ mkdir compiler-rt
+ pushd compiler-rt
+ cmake \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_SYSTEM_NAME=Windows \
+ -DCMAKE_AR=$INSTALL_DIR/bin/llvm-ar \
+ -DCMAKE_RANLIB=$INSTALL_DIR/bin/llvm-ranlib \
+ -DCMAKE_C_COMPILER_WORKS=1 \
+ -DCMAKE_C_COMPILER_TARGET=$compiler_rt_machine-windows-gnu \
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=TRUE \
+ $TOOLCHAIN_DIR/compiler-rt/lib/builtins
+ make $make_flags
+ mkdir -p $INSTALL_DIR/lib/clang/$CLANG_VERSION/lib/windows
+ cp lib/windows/libclang_rt.builtins-$compiler_rt_machine.a $INSTALL_DIR/lib/clang/$CLANG_VERSION/lib/windows/
+ popd
+}
+
+build_runtimes() {
+ # Below, we specify -g -gcodeview to build static libraries with debug information.
+ # Because we're not distributing these builds, this is fine. If one were to distribute
+ # the builds, perhaps one would want to make those flags conditional or investigation
+ # other options.
+ DEBUG_FLAGS="-g -gcodeview"
+
+ # First configure libcxx
+ mkdir runtimes
+ pushd runtimes
+ cmake \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_INSTALL_PREFIX=$CROSS_PREFIX_DIR \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_CROSSCOMPILING=TRUE \
+ -DCMAKE_SYSTEM_NAME=Windows \
+ -DCMAKE_C_COMPILER_WORKS=TRUE \
+ -DCMAKE_CXX_COMPILER_WORKS=TRUE \
+ -DLLVM_COMPILER_CHECKED=TRUE \
+ -DCMAKE_AR=$INSTALL_DIR/bin/llvm-ar \
+ -DCMAKE_RANLIB=$INSTALL_DIR/bin/llvm-ranlib \
+ -DCMAKE_CXX_FLAGS="${DEBUG_FLAGS} -D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS" \
+ -DLIBCXX_USE_COMPILER_RT=ON \
+ -DLIBCXX_INSTALL_HEADERS=ON \
+ -DLIBCXX_ENABLE_EXCEPTIONS=ON \
+ -DLIBCXX_ENABLE_THREADS=ON \
+ -DLIBCXX_HAS_WIN32_THREAD_API=ON \
+ -DLIBCXX_ENABLE_MONOTONIC_CLOCK=ON \
+ -DLIBCXX_ENABLE_SHARED=OFF \
+ -DLIBCXX_SUPPORTS_STD_EQ_CXX11_FLAG=TRUE \
+ -DLIBCXX_HAVE_CXX_ATOMICS_WITHOUT_LIB=TRUE \
+ -DLIBCXX_ENABLE_EXPERIMENTAL_LIBRARY=OFF \
+ -DLIBCXX_ENABLE_FILESYSTEM=OFF \
+ -DLIBCXX_ENABLE_STATIC_ABI_LIBRARY=TRUE \
+ -DLIBCXX_CXX_ABI=libcxxabi \
+ -DLIBCXXABI_USE_LLVM_UNWINDER=TRUE \
+ -DLIBCXXABI_ENABLE_STATIC_UNWINDER=TRUE \
+ -DLLVM_NO_OLD_LIBSTDCXX=TRUE \
+ -DLIBUNWIND_USE_COMPILER_RT=TRUE \
+ -DLIBUNWIND_ENABLE_THREADS=TRUE \
+ -DLIBUNWIND_ENABLE_SHARED=FALSE \
+ -DLIBUNWIND_ENABLE_CROSS_UNWINDING=FALSE \
+ -DLIBUNWIND_CXX_FLAGS="${DEBUG_FLAGS} -Wno-dll-attribute-on-redeclaration -nostdinc++ -DPSAPI_VERSION=2" \
+ -DLIBUNWIND_C_FLAGS="-Wno-dll-attribute-on-redeclaration" \
+ -DLIBCXXABI_USE_COMPILER_RT=ON \
+ -DLIBCXXABI_ENABLE_EXCEPTIONS=ON \
+ -DLIBCXXABI_ENABLE_THREADS=ON \
+ -DLIBCXXABI_TARGET_TRIPLE=$machine-w64-mingw32 \
+ -DLIBCXXABI_ENABLE_SHARED=OFF \
+ -DLIBCXXABI_CXX_FLAGS="${DEBUG_FLAGS} -D_LIBCPP_HAS_THREAD_API_WIN32" \
+ -DLLVM_ENABLE_RUNTIMES="libcxxabi;libcxx;libunwind" \
+ $TOOLCHAIN_DIR/runtimes
+
+ make $make_flags VERBOSE=1
+ make $make_flags install
+
+ popd
+}
+
+build_libssp() {
+ pushd $MOZ_FETCHES_DIR/gcc-source/
+
+ # Massage the environment for the build-libssp.sh script
+ mkdir -p ./$machine-w64-mingw32/lib
+ cp $MOZ_FETCHES_DIR/llvm-mingw/libssp-Makefile .
+ sed -i 's/set -e/set -x -e -v/' $MOZ_FETCHES_DIR/llvm-mingw/build-libssp.sh
+ sed -i 's/(CROSS)gcc/(CROSS)clang/' libssp-Makefile
+ sed -i 's/\$(CROSS)ar/llvm-ar/' libssp-Makefile
+ OLDPATH=$PATH
+ PATH=$INSTALL_DIR/bin:$PATH
+
+ # Run the script
+ TOOLCHAIN_ARCHS=$machine $MOZ_FETCHES_DIR/llvm-mingw/build-libssp.sh .
+
+ # Grab the artifacts, cleanup
+ cp $MOZ_FETCHES_DIR/gcc-source/$machine-w64-mingw32/lib/{libssp.a,libssp_nonshared.a} $INSTALL_DIR/$machine-w64-mingw32/lib/
+ unset TOOLCHAIN_ARCHS
+ PATH=$OLDPATH
+ popd
+}
+
+build_utils() {
+ pushd $INSTALL_DIR/bin/
+ for prog in ar nm objcopy ranlib readobj strip; do
+ ln -s llvm-$prog $machine-w64-mingw32-$prog
+ done
+ ./clang $MOZ_FETCHES_DIR/llvm-mingw/wrappers/windres-wrapper.c -O2 -Wl,-s -o $machine-w64-mingw32-windres
+ popd
+}
+
+export PATH=$INSTALL_DIR/bin:$PATH
+
+prepare
+
+mkdir $TOOLCHAIN_DIR/build
+pushd $TOOLCHAIN_DIR/build
+
+install_wrappers
+build_mingw
+build_compiler_rt
+build_runtimes
+build_libssp
+build_utils
+
+popd
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+
+pushd $(dirname $INSTALL_DIR)
+tar caf clangmingw.tar.zst clang
+mv clangmingw.tar.zst $UPLOAD_DIR
+popd
diff --git a/taskcluster/scripts/misc/build-clang-tidy-external.sh b/taskcluster/scripts/misc/build-clang-tidy-external.sh
new file mode 100755
index 0000000000..8647432e7c
--- /dev/null
+++ b/taskcluster/scripts/misc/build-clang-tidy-external.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+set -x
+
+# # Delete the external directory
+rm -rf $GECKO_PATH/build/clang-plugin/external/*
+
+# Move external repository into its place
+cp -r $MOZ_FETCHES_DIR/civet.git/* $GECKO_PATH/build/clang-plugin/external
+
+# Call build-clang.sh with this script's first argument (our JSON config)
+$GECKO_PATH/taskcluster/scripts/misc/build-clang.sh $1
diff --git a/taskcluster/scripts/misc/build-clang.sh b/taskcluster/scripts/misc/build-clang.sh
new file mode 100755
index 0000000000..9307774062
--- /dev/null
+++ b/taskcluster/scripts/misc/build-clang.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building clang.
+
+ORIGPWD="$PWD"
+CONFIGS=$(for c; do echo -n " -c $GECKO_PATH/$c"; done)
+
+cd $GECKO_PATH
+
+if [ -d "$MOZ_FETCHES_DIR/binutils/bin" ]; then
+ export PATH="$MOZ_FETCHES_DIR/binutils/bin:$PATH"
+fi
+
+# Make the installed compiler-rt(s) available to clang.
+UPLOAD_DIR= taskcluster/scripts/misc/repack-clang.sh
+
+case "$CONFIGS" in
+*macosx64*)
+ # cmake makes decisions based on the output of the mac-only sw_vers, which is
+ # obviously missing when cross-compiling, so create a fake one. The exact
+ # version doesn't really matter: as of writing, cmake checks at most for 10.5.
+ mkdir -p $ORIGPWD/bin
+ echo "#!/bin/sh" > $ORIGPWD/bin/sw_vers
+ echo echo 10.12 >> $ORIGPWD/bin/sw_vers
+ chmod +x $ORIGPWD/bin/sw_vers
+ # these variables are used in build-clang.py
+ export CROSS_SYSROOT=$(ls -d $MOZ_FETCHES_DIR/MacOSX1*.sdk)
+ export PATH=$PATH:$ORIGPWD/bin
+ ;;
+*win64*)
+ case "$(uname -s)" in
+ MINGW*|MSYS*)
+ export UPLOAD_DIR=$ORIGPWD/public/build
+ # Set up all the Visual Studio paths.
+ . taskcluster/scripts/misc/vs-setup.sh
+
+ # LLVM_ENABLE_DIA_SDK is set if the directory "$ENV{VSINSTALLDIR}DIA SDK"
+ # exists.
+ export VSINSTALLDIR="${VSPATH}/"
+
+ export PATH="$(cd $MOZ_FETCHES_DIR/cmake && pwd)/bin:${PATH}"
+ export PATH="$(cd $MOZ_FETCHES_DIR/ninja && pwd)/bin:${PATH}"
+ ;;
+ *)
+ export VSINSTALLDIR="$MOZ_FETCHES_DIR/vs"
+ ;;
+ esac
+ ;;
+*linux64*|*android*)
+ ;;
+*)
+ echo Cannot figure out build configuration for $CONFIGS
+ exit 1
+ ;;
+esac
+
+# gets a bit too verbose here
+set +x
+
+cd $MOZ_FETCHES_DIR/llvm-project
+python3 $GECKO_PATH/build/build-clang/build-clang.py $CONFIGS
+
+set -x
+
+if [ -f clang*.tar.zst ]; then
+ # Put a tarball in the artifacts dir
+ mkdir -p $UPLOAD_DIR
+ cp clang*.tar.zst $UPLOAD_DIR
+fi
+
+. $GECKO_PATH/taskcluster/scripts/misc/vs-cleanup.sh
diff --git a/taskcluster/scripts/misc/build-compiler-rt-wasi.sh b/taskcluster/scripts/misc/build-compiler-rt-wasi.sh
new file mode 100755
index 0000000000..d23e4b1e5a
--- /dev/null
+++ b/taskcluster/scripts/misc/build-compiler-rt-wasi.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -x -e -v
+
+artifact=$(basename $TOOLCHAIN_ARTIFACT)
+dir=${artifact%.tar.*}
+
+cd $MOZ_FETCHES_DIR/wasi-sdk
+LLVM_PROJ_DIR=$MOZ_FETCHES_DIR/llvm-project
+
+mkdir -p build/install/wasi
+# The wasi-sdk build system wants to build clang itself. We trick it into
+# thinking it did, and put our own clang where it would have built its own.
+ln -s $MOZ_FETCHES_DIR/clang build/llvm
+touch build/llvm.BUILT
+
+# The wasi-sdk build system wants a clang and an ar binary in
+# build/install/$PREFIX/bin
+ln -s $MOZ_FETCHES_DIR/clang/bin build/install/wasi/bin
+ln -s llvm-ar build/install/wasi/bin/ar
+
+# Build compiler-rt
+make \
+ LLVM_PROJ_DIR=$LLVM_PROJ_DIR \
+ PREFIX=/wasi \
+ build/compiler-rt.BUILT \
+ -j$(nproc)
+
+mkdir -p $dir/lib
+mv build/install/wasi/lib/clang/*/lib/wasi $dir/lib
+tar --zstd -cf $artifact $dir
+mkdir -p $UPLOAD_DIR
+mv $artifact $UPLOAD_DIR/
diff --git a/taskcluster/scripts/misc/build-compiler-rt.sh b/taskcluster/scripts/misc/build-compiler-rt.sh
new file mode 100755
index 0000000000..b5665381d1
--- /dev/null
+++ b/taskcluster/scripts/misc/build-compiler-rt.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+set -e -x
+
+artifact=$(basename $TOOLCHAIN_ARTIFACT)
+dir=${artifact%.tar.*}
+target=${dir#compiler-rt-}
+
+case "$target" in
+*-linux-android)
+ EXTRA_CMAKE_FLAGS="
+ -DCOMPILER_RT_BUILD_LIBFUZZER=OFF
+ -DCOMPILER_RT_BUILD_ORC=OFF
+ -DCOMPILER_RT_BUILTINS_HIDE_SYMBOLS=OFF
+ "
+ ;;
+*-apple-darwin)
+ EXTRA_CMAKE_FLAGS="
+ -DCOMPILER_RT_ENABLE_IOS=OFF
+ -DCOMPILER_RT_ENABLE_WATCHOS=OFF
+ -DCOMPILER_RT_ENABLE_TVOS=OFF
+ "
+ ;;
+*-windows-msvc)
+ EXTRA_CMAKE_FLAGS="
+ -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded
+ "
+ ;;
+esac
+
+EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON
+ -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF
+"
+
+export EXTRA_CMAKE_FLAGS
+
+$(dirname $0)/build-llvm-common.sh compiler-rt install $target "$@"
diff --git a/taskcluster/scripts/misc/build-cpython.sh b/taskcluster/scripts/misc/build-cpython.sh
new file mode 100755
index 0000000000..f6a683cc7d
--- /dev/null
+++ b/taskcluster/scripts/misc/build-cpython.sh
@@ -0,0 +1,70 @@
+#!/bin/sh
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+# This script builds the official interpreter for the python language,
+# while also packing in a few default extra packages.
+
+set -e
+set -x
+
+# Required fetch artifact
+clang_bindir=${MOZ_FETCHES_DIR}/clang/bin
+clang_libdir=${MOZ_FETCHES_DIR}/clang/lib
+python_src=${MOZ_FETCHES_DIR}/cpython-source
+
+# Make the compiler-rt available to clang.
+env UPLOAD_DIR= $GECKO_PATH/taskcluster/scripts/misc/repack-clang.sh
+
+# Setup environment
+export PATH=${clang_bindir}:${PATH}
+export CC=clang
+export CXX=clang++
+export LDFLAGS=-fuse-ld=lld
+
+# Extra setup for OSX
+case `uname -s` in
+ Darwin)
+ case `uname -m` in
+ aarch64)
+ macosx_version_min=11.0
+ ;;
+ *)
+ macosx_version_min=10.12
+ ;;
+ esac
+ macosx_sdk=13.3
+ # NOTE: both CFLAGS and CPPFLAGS need to be set here, otherwise
+ # configure step fails.
+ sysroot_flags="-isysroot ${MOZ_FETCHES_DIR}/MacOSX${macosx_sdk}.sdk -mmacosx-version-min=${macosx_version_min}"
+ export CPPFLAGS=${sysroot_flags}
+ export CFLAGS=${sysroot_flags}
+ export LDFLAGS="${LDFLAGS} ${sysroot_flags}"
+ configure_flags_extra=--with-openssl=/usr/local/opt/openssl
+
+ # see https://bugs.python.org/issue44065
+ sed -i -e 's,$CC --print-multiarch,:,' ${python_src}/configure
+ ;;
+esac
+
+# Patch Python to honor MOZPYTHONHOME instead of PYTHONHOME. That way we have a
+# relocatable python for free, while not interfering with the system Python that
+# already honors PYTHONHOME.
+find ${python_src} -type f -print0 | xargs -0 perl -i -pe "s,PYTHONHOME,MOZPYTHONHOME,g"
+
+# Actual build
+work_dir=`pwd`
+tardir=python
+
+cd `mktemp -d`
+${python_src}/configure --prefix=/${tardir} --enable-optimizations ${configure_flags_extra} || { exit_status=$? && cat config.log && exit $exit_status ; }
+export MAKEFLAGS=-j`nproc`
+make
+make DESTDIR=${work_dir} install
+cd ${work_dir}
+
+${work_dir}/python/bin/python3 -m pip install --upgrade pip==23.0
+${work_dir}/python/bin/python3 -m pip install -r ${GECKO_PATH}/build/psutil_requirements.txt -r ${GECKO_PATH}/build/zstandard_requirements.txt
+
+$(dirname $0)/pack.sh ${tardir}
diff --git a/taskcluster/scripts/misc/build-custom-car-linux.sh b/taskcluster/scripts/misc/build-custom-car-linux.sh
new file mode 100755
index 0000000000..c92b696e28
--- /dev/null
+++ b/taskcluster/scripts/misc/build-custom-car-linux.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building a custom version of chromium-as-release on Linux
+
+# First argument must be the artifact name
+ARTIFACT_NAME=$(basename $TOOLCHAIN_ARTIFACT)
+shift
+
+# Use the rest of the arguments as the build config
+CONFIG=$(echo $* | tr -d "'")
+
+
+mkdir custom_car
+cd custom_car
+CUSTOM_CAR_DIR=$PWD
+
+# Setup depot_tools
+git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+export PATH=$PATH:$CUSTOM_CAR_DIR/depot_tools
+
+
+# Get chromium source code and dependencies
+mkdir chromium
+cd chromium
+fetch --no-history --nohooks chromium
+
+# setup the .gclient file to ensure pgo profiles are downloaded
+# for some reason we need to set --name flag even though it already exists.
+# currently the gclient.py file does NOT recognize --custom-var as it's own argument
+gclient config --name src "https://chromium.googlesource.com/chromium/src.git" --custom-var="checkout_pgo_profiles=True" --unmanaged
+
+cd src
+
+# now we can run hooks and fetch PGO + everything else
+gclient runhooks
+
+# PGO data should be in src/chrome/build/pgo_profiles/
+# with a name like "chrome-{OS}-<some unique identifier>"
+export PGO_DATA_DIR="$CUSTOM_CAR_DIR/chromium/src/chrome/build/pgo_profiles"
+for entry in "$PGO_DATA_DIR"/*
+do
+ if [ -f "$entry" ];then
+ export PGO_DATA_PATH="$entry"
+ fi
+done
+CONFIG=$(echo $CONFIG pgo_data_path='"'$PGO_DATA_PATH'"')
+
+
+# set up then build chrome
+gn gen out/Default --args="$CONFIG"
+autoninja -C out/Default chrome # skips test binaries
+
+
+# Gather binary and related files into a zip, and upload it
+cd ..
+mkdir chromium
+
+mv src/out/Default chromium
+chmod -R +x chromium
+
+tar caf $ARTIFACT_NAME chromium
+
+mkdir -p $UPLOAD_DIR
+mv $ARTIFACT_NAME $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-custom-car-win64.sh b/taskcluster/scripts/misc/build-custom-car-win64.sh
new file mode 100644
index 0000000000..512a0a072f
--- /dev/null
+++ b/taskcluster/scripts/misc/build-custom-car-win64.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building a custom version of chromium-as-release on Windows
+
+# First argument must be the artifact name
+ARTIFACT_NAME=$(basename $TOOLCHAIN_ARTIFACT)
+shift
+
+# Use the rest of the arguments as the build config
+CONFIG=$(echo $* | tr -d "'")
+
+mkdir custom_car
+cd custom_car
+CUSTOM_CAR_DIR=$PWD
+
+# setup VS 2022
+. $GECKO_PATH/taskcluster/scripts/misc/vs-setup.sh
+
+# setup depot_tools
+git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+export PATH="$(cd ./depot_tools && pwd):$PATH"
+
+# setup some environment variables for chromium build scripts
+export DEPOT_TOOLS_WIN_TOOLCHAIN=0
+export GYP_MSVS_OVERRIDE_PATH="$MOZ_FETCHES_DIR/VS"
+export GYP_MSVS_VERSION=2022
+export vs2022_install="$MOZ_FETCHES_DIR/VS"
+export WINDOWSSDKDIR="$MOZ_FETCHES_DIR/VS/Windows Kits/10"
+export DEPOT_TOOLS_UPDATE=1
+export GCLIENT_PY3=1
+# Fool GYP
+touch "$MOZ_FETCHES_DIR/VS/VC/vcvarsall.bat"
+
+
+# construct some of our own dirs and move VS dlls + other files
+# to a path that chromium build files are expecting
+mkdir chrome_dll
+cd chrome_dll
+mkdir system32
+cd ../
+pushd "$WINDOWSSDKDIR"
+mkdir -p Debuggers/x64/
+popd
+mv $MOZ_FETCHES_DIR/VS/VC/Redist/MSVC/14.34.31931/x64/Microsoft.VC143.CRT/* chrome_dll/system32/
+mv "$WINDOWSSDKDIR/App Certification Kit/"* "$WINDOWSSDKDIR"/Debuggers/x64/
+export WINDIR="$PWD/chrome_dll"
+
+# run glcient once first to get some windows deps
+gclient
+
+# fetch chromium src code
+mkdir chromium
+cd chromium
+fetch --no-history --nohooks chromium
+
+# setup the .gclient file to ensure pgo profiles are downloaded
+# for some reason we need to set --name flag even though it already exists.
+# currently the gclient.py file does NOT recognize --custom-var as it's own argument
+gclient config --name src https://chromium.googlesource.com/chromium/src.git --custom-var=checkout_pgo_profiles=True --unmanaged
+
+cd src
+
+# For fast fetches it seems we will be missing some dummy files in windows.
+# We can create a dummy this way to satisfy the rest of the build sequence
+# this is ok because we are not doing any development here and don't need
+# history.
+python3 build/util/lastchange.py -o build/util/LASTCHANGE
+
+# now we can run hooks and fetch PGO + everything else
+gclient runhooks
+
+# PGO data should be in src/chrome/build/pgo_profiles/
+# with a name like "chrome-{OS}-<some unique identifier>"
+export PGO_DATA_DIR="$CUSTOM_CAR_DIR/chromium/src/chrome/build/pgo_profiles"
+for entry in "$PGO_DATA_DIR"/*
+do
+ if [ -f "$entry" ];then
+ export PGO_DATA_PATH="$entry"
+ fi
+done
+# compute a relative path that the build scripts looks for.
+# this odd pathing seems to only happen on windows
+PGO_FILE=${PGO_DATA_PATH#*/*/*/*/*/*/*/*/*/}
+mv $PGO_DATA_PATH build/config/compiler/pgo/
+CONFIG=$(echo $CONFIG pgo_data_path='"'$PGO_FILE'"')
+
+# set up then build chrome
+gn gen out/Default --args="$CONFIG"
+autoninja -C out/Default chrome # skips test binaries
+
+# Gather binary and related files into a zip, and upload it
+cd ..
+mkdir chromium
+
+mv src/out/Default chromium
+chmod -R +x chromium
+
+tar caf $ARTIFACT_NAME chromium
+
+mkdir -p $UPLOAD_DIR
+mv $ARTIFACT_NAME $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-custom-v8.sh b/taskcluster/scripts/misc/build-custom-v8.sh
new file mode 100755
index 0000000000..5c8ea673ad
--- /dev/null
+++ b/taskcluster/scripts/misc/build-custom-v8.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building a custom version of V8
+ARTIFACT_NAME='d8.tar.zst'
+CONFIG='is_debug=false target_cpu="x64"'
+if [[ $# -eq 0 ]]; then
+ echo "Using default configuration for v8 build."
+ CONFIG=$(echo $CONFIG | tr -d "'")
+else
+ # First argument must be the artifact name
+ ARTIFACT_NAME="$1"
+ shift
+
+ # Use the rest of the arguments as the build config
+ CONFIG=$(echo $* | tr -d "'")
+fi
+
+echo "Config: $CONFIG"
+echo "Artifact name: $ARTIFACT_NAME"
+
+cd $GECKO_PATH
+
+# Setup depot_tools
+git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+export PATH=$PATH:$GECKO_PATH/depot_tools
+
+# Get v8 source code and dependencies
+fetch --force v8
+cd v8
+
+# Build v8
+gn gen out/release --args="$CONFIG"
+ninja -C out/release d8
+
+# Gather binary and related files into a zip, and upload it
+cd ..
+mkdir d8
+
+cp -R v8/out/release d8
+cp -R v8/include d8
+chmod -R +x d8
+
+tar caf $ARTIFACT_NAME d8
+
+mkdir -p $UPLOAD_DIR
+cp $ARTIFACT_NAME $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-dist-toolchains.sh b/taskcluster/scripts/misc/build-dist-toolchains.sh
new file mode 100755
index 0000000000..1ad1871775
--- /dev/null
+++ b/taskcluster/scripts/misc/build-dist-toolchains.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for packaging toolchains suitable for use by distributed sccache.
+TL_NAME="$1"
+
+mkdir -p $HOME/artifacts
+mkdir -p $HOME/toolchains
+
+mv $MOZ_FETCHES_DIR/$TL_NAME $HOME/toolchains/$TL_NAME
+
+$MOZ_FETCHES_DIR/sccache/sccache --package-toolchain $HOME/toolchains/$TL_NAME/bin/$TL_NAME $HOME/artifacts/$TL_NAME-dist-toolchain.tar.xz
diff --git a/taskcluster/scripts/misc/build-dmg-hfsplus.sh b/taskcluster/scripts/misc/build-dmg-hfsplus.sh
new file mode 100755
index 0000000000..b0039432aa
--- /dev/null
+++ b/taskcluster/scripts/misc/build-dmg-hfsplus.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building libdmg-hfsplus to get the `dmg` and `hfsplus`
+# tools for producing DMG archives on Linux.
+
+WORKSPACE=$HOME/workspace
+STAGE=$WORKSPACE/dmg
+
+mkdir -p $UPLOAD_DIR $STAGE
+
+cd $MOZ_FETCHES_DIR/libdmg-hfsplus
+
+# The openssl libraries in the sysroot cannot be linked in a PIE executable so we use -no-pie
+cmake \
+ -DCMAKE_C_COMPILER=$MOZ_FETCHES_DIR/clang/bin/clang \
+ -DCMAKE_CXX_COMPILER=$MOZ_FETCHES_DIR/clang/bin/clang++ \
+ -DCMAKE_SYSROOT=$MOZ_FETCHES_DIR/sysroot \
+ -DOPENSSL_USE_STATIC_LIBS=1 \
+ -DCMAKE_EXE_LINKER_FLAGS=-no-pie \
+ .
+
+make VERBOSE=1 -j$(nproc)
+
+# We only need the dmg and hfsplus tools.
+strip dmg/dmg hfs/hfsplus
+cp dmg/dmg hfs/hfsplus $STAGE
+
+# duplicate the functionality of taskcluster-lib-urls, but in bash..
+queue_base="$TASKCLUSTER_ROOT_URL/api/queue/v1"
+
+cat >$STAGE/README<<EOF
+Source is available as a taskcluster artifact:
+$queue_base/task/$(python3 -c 'import json, os; print("{task}/artifacts/{artifact}".format(**next(f for f in json.loads(os.environ["MOZ_FETCHES"]) if "dmg-hfsplus" in f["artifact"])))')
+EOF
+tar caf $UPLOAD_DIR/dmg.tar.zst -C $WORKSPACE `basename $STAGE`
diff --git a/taskcluster/scripts/misc/build-gcc-linux.sh b/taskcluster/scripts/misc/build-gcc-linux.sh
new file mode 100755
index 0000000000..2e8acaabfc
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gcc-linux.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+set -e
+
+# This script is for building GCC for Linux.
+
+root_dir=$MOZ_FETCHES_DIR
+data_dir=$GECKO_PATH/build/unix/build-gcc
+
+PATH=$MOZ_FETCHES_DIR/gcc/bin:$PATH
+
+. $data_dir/build-gcc.sh
+
+pushd $root_dir/gcc-source
+ln -sf ../gmp-source gmp
+ln -sf ../isl-source isl
+ln -sf ../mpc-source mpc
+ln -sf ../mpfr-source mpfr
+popd
+
+for patch in "$@"; do
+ apply_patch $GECKO_PATH/$patch
+done
+
+build_gcc
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp $MOZ_FETCHES_DIR/gcc.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-gcc-sixgill-plugin-linux.sh b/taskcluster/scripts/misc/build-gcc-sixgill-plugin-linux.sh
new file mode 100755
index 0000000000..1ee9dc626c
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gcc-sixgill-plugin-linux.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+set -e
+set -x
+
+# This script is for building the sixgill GCC plugin for Linux. It relies on
+# the gcc checkout because it needs to recompile gmp and the gcc build script
+# determines the version of gmp to download.
+
+root_dir=$MOZ_FETCHES_DIR
+build_dir=$GECKO_PATH/build
+data_dir=$GECKO_PATH/build/unix/build-gcc
+
+sixgill_rev=a642a811d6ee
+sixgill_repo=https://hg.mozilla.org/users/sfink_mozilla.com/sixgill
+
+. $data_dir/build-gcc.sh
+
+mkdir $root_dir/gcc-source || true
+pushd $root_dir/gcc-source
+ln -sf ../gmp-source gmp
+ln -sf ../isl-source isl
+ln -sf ../mpc-source mpc
+ln -sf ../mpfr-source mpfr
+popd
+
+export TMPDIR=${TMPDIR:-/tmp/}
+export gcc_bindir=$MOZ_FETCHES_DIR/gcc/bin
+export gmp_prefix=/tools/gmp
+export gmp_dir=$root_dir$gmp_prefix
+
+prepare_sixgill() {(
+ cd $root_dir
+ hg clone -r $sixgill_rev $sixgill_repo || ( cd sixgill && hg update -r $sixgill_rev )
+)}
+
+build_gmp() {
+ if ! [ -x $gcc_bindir/gcc ]; then
+ echo "GCC not found in $gcc_bindir/gcc" >&2
+ exit 1
+ fi
+
+ # The sixgill plugin uses some gmp symbols, including some not exported by
+ # cc1/cc1plus. So link the plugin statically to libgmp. Except that the
+ # default static build does not have -fPIC, and will result in a relocation
+ # error, so build our own. This requires the gcc and related source to be
+ # in $root_dir/gcc-source.
+
+ mkdir $root_dir/gmp-objdir || true
+ (
+ cd $root_dir/gmp-objdir
+ $root_dir/gcc-source/gmp/configure --disable-shared --with-pic --prefix=$gmp_prefix
+ make -j8
+ make install DESTDIR=$root_dir
+ )
+}
+
+build_sixgill() {(
+ cd $root_dir/sixgill
+ export CC=$gcc_bindir/gcc
+ export CXX=$gcc_bindir/g++
+ export PATH="$gcc_bindir:$PATH"
+ export LD_LIBRARY_PATH="${gcc_bindir%/bin}/lib64"
+ export TARGET_CC=$CC
+ export CPPFLAGS=-I$gmp_dir/include
+ export EXTRA_LDFLAGS=-L$gmp_dir/lib
+ export HOST_CFLAGS=$CPPFLAGS
+
+ ./release.sh --build-and-package --with-gmp=$gmp_dir
+ tarball=$(ls -td *-sixgill | head -1)/sixgill.tar.xz
+ cp $tarball $root_dir/sixgill.tar.xz
+)}
+
+prepare_sixgill
+build_gmp
+build_sixgill
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp $MOZ_FETCHES_DIR/sixgill.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-geckodriver.sh b/taskcluster/scripts/misc/build-geckodriver.sh
new file mode 100755
index 0000000000..7434ee2ef8
--- /dev/null
+++ b/taskcluster/scripts/misc/build-geckodriver.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+set -x -e -v
+
+# Needed by osx-cross-linker.
+export TARGET="$1"
+
+cd $GECKO_PATH
+
+EXE=
+COMPRESS_EXT=gz
+
+case "$TARGET" in
+*windows-msvc)
+ EXE=.exe
+ COMPRESS_EXT=zip
+ . $GECKO_PATH/taskcluster/scripts/misc/vs-setup.sh
+ # Bug 1584530: don't require the Microsoft MSVC runtime to be installed.
+ export RUSTFLAGS="-Ctarget-feature=+crt-static -C linker=$MOZ_FETCHES_DIR/clang/bin/lld-link"
+ export TARGET_CFLAGS="-Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml"
+ export TARGET_CXXFLAGS="-Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml"
+ ;;
+# OSX cross builds are a bit harder
+*-apple-darwin)
+ export PATH="$MOZ_FETCHES_DIR/clang/bin:$PATH"
+ export RUSTFLAGS="-C linker=$GECKO_PATH/taskcluster/scripts/misc/osx-cross-linker"
+ if test "$TARGET" = "aarch64-apple-darwin"; then
+ export MACOSX_DEPLOYMENT_TARGET=11.0
+ else
+ export MACOSX_DEPLOYMENT_TARGET=10.12
+ fi
+ ;;
+aarch64-unknown-linux-musl)
+ export RUSTFLAGS="-C linker=$MOZ_FETCHES_DIR/clang/bin/clang -C link-arg=--target=$TARGET -C link-arg=-fuse-ld=lld"
+ ;;
+esac
+
+export PATH="$MOZ_FETCHES_DIR/rustc/bin:$PATH"
+
+cd $GECKO_PATH/testing/geckodriver
+
+cp $GECKO_PATH/.cargo/config.in $GECKO_PATH/.cargo/config
+
+cargo build --frozen --verbose --release --target "$TARGET"
+
+cd $GECKO_PATH
+mkdir -p $UPLOAD_DIR
+
+cp target/$TARGET/release/geckodriver$EXE .
+if [ "$COMPRESS_EXT" = "zip" ]; then
+ zip geckodriver.zip geckodriver$EXE
+ cp geckodriver.zip $UPLOAD_DIR
+else
+ tar -acf geckodriver.tar.$COMPRESS_EXT geckodriver$EXE
+ cp geckodriver.tar.$COMPRESS_EXT $UPLOAD_DIR
+fi
+
+. $GECKO_PATH/taskcluster/scripts/misc/vs-cleanup.sh
diff --git a/taskcluster/scripts/misc/build-gn-common.sh b/taskcluster/scripts/misc/build-gn-common.sh
new file mode 100755
index 0000000000..b72d51df09
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gn-common.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+set -e -v
+
+# This is shared code for building GN.
+cd $MOZ_FETCHES_DIR/gn
+
+if test -n "$MAC_CROSS"; then
+ python3 build/gen.py --platform darwin --no-last-commit-position
+else
+ python3 build/gen.py --no-last-commit-position
+fi
+
+cat > out/last_commit_position.h <<EOF
+#ifndef OUT_LAST_COMMIT_POSITION_H_
+#define OUT_LAST_COMMIT_POSITION_H_
+
+#define LAST_COMMIT_POSITION_NUM 0
+#define LAST_COMMIT_POSITION "unknown"
+
+#endif // OUT_LAST_COMMIT_POSITION_H_
+EOF
+
+ninja -C out -v
+
+STAGE=gn
+mkdir -p $UPLOAD_DIR $STAGE
+
+# At this point, the resulting binary is at:
+# $WORKSPACE/out/Release/gn
+if test "$MAC_CROSS" = "" -a "$(uname)" = "Linux"; then
+ strip out/gn
+fi
+cp out/gn $STAGE
+
+tar -c $STAGE | python3 $GECKO_PATH/taskcluster/scripts/misc/zstdpy > gn.tar.zst
+cp gn.tar.zst $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-gn-linux.sh b/taskcluster/scripts/misc/build-gn-linux.sh
new file mode 100755
index 0000000000..f6fd9fd507
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gn-linux.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+set -e -v
+
+# This script is for building GN on Linux.
+
+WORKSPACE=$HOME/workspace
+export CC=gcc
+export CXX=g++
+export LDFLAGS=-lrt
+
+cd $GECKO_PATH
+
+. taskcluster/scripts/misc/build-gn-common.sh
diff --git a/taskcluster/scripts/misc/build-gn-macosx.sh b/taskcluster/scripts/misc/build-gn-macosx.sh
new file mode 100755
index 0000000000..0daafea940
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gn-macosx.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+set -e -v
+
+# This script is for building GN.
+
+WORKSPACE=$HOME/workspace
+
+CROSS_SYSROOT=$MOZ_FETCHES_DIR/MacOSX13.3.sdk
+export MACOSX_DEPLOYMENT_TARGET=10.12
+
+export CC=$MOZ_FETCHES_DIR/clang/bin/clang
+export CXX=$MOZ_FETCHES_DIR/clang/bin/clang++
+export AR=$MOZ_FETCHES_DIR/clang/bin/llvm-ar
+export CFLAGS="-target x86_64-apple-darwin -isysroot ${CROSS_SYSROOT} -I${CROSS_SYSROOT}/usr/include -iframework ${CROSS_SYSROOT}/System/Library/Frameworks"
+export CXXFLAGS="-stdlib=libc++ ${CFLAGS}"
+export LDFLAGS="-fuse-ld=lld ${CXXFLAGS} -Wl,-syslibroot,${CROSS_SYSROOT} -Wl,-dead_strip"
+
+# We patch tools/gn/bootstrap/bootstrap.py to detect this.
+export MAC_CROSS=1
+
+cd $GECKO_PATH
+
+. taskcluster/scripts/misc/build-gn-common.sh
diff --git a/taskcluster/scripts/misc/build-gn-win64.sh b/taskcluster/scripts/misc/build-gn-win64.sh
new file mode 100755
index 0000000000..3ecd71fc74
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gn-win64.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -e -v -x
+
+# This script is for building GN on Windows.
+
+UPLOAD_DIR=$PWD/public/build
+
+cd $GECKO_PATH
+
+export PATH="$(cd $MOZ_FETCHES_DIR && pwd)/ninja/bin:$PATH"
+export PATH="$(cd $MOZ_FETCHES_DIR && pwd)/mingw64/bin:$PATH"
+
+. taskcluster/scripts/misc/vs-setup.sh
+. taskcluster/scripts/misc/build-gn-common.sh
+
+. $GECKO_PATH/taskcluster/scripts/misc/vs-cleanup.sh
diff --git a/taskcluster/scripts/misc/build-hfsplus-linux.sh b/taskcluster/scripts/misc/build-hfsplus-linux.sh
new file mode 100755
index 0000000000..91f9901ebc
--- /dev/null
+++ b/taskcluster/scripts/misc/build-hfsplus-linux.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building hfsplus for Linux.
+
+cd $GECKO_PATH
+
+export PATH=$PATH:$MOZ_FETCHES_DIR/clang/bin
+
+build/unix/build-hfsplus/build-hfsplus.sh $MOZ_FETCHES_DIR
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp $MOZ_FETCHES_DIR/hfsplus.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-libunwind.sh b/taskcluster/scripts/misc/build-libunwind.sh
new file mode 100755
index 0000000000..1be5168d0a
--- /dev/null
+++ b/taskcluster/scripts/misc/build-libunwind.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+set -e -x
+
+artifact=$(basename $TOOLCHAIN_ARTIFACT)
+dir=${artifact%.tar.*}
+target=${dir#libunwind-}
+
+# Make the android compiler-rt available to clang.
+env UPLOAD_DIR= $GECKO_PATH/taskcluster/scripts/misc/repack-clang.sh
+
+EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=ON
+ -DCMAKE_C_COMPILER_WORKS=1
+ -DCMAKE_CXX_COMPILER_WORKS=1
+ -DLLVM_ENABLE_RUNTIMES=libunwind
+ -DLIBUNWIND_ENABLE_SHARED=OFF
+"
+
+export EXTRA_CMAKE_FLAGS
+
+$(dirname $0)/build-llvm-common.sh runtimes install $target "$@"
diff --git a/taskcluster/scripts/misc/build-llvm-common.sh b/taskcluster/scripts/misc/build-llvm-common.sh
new file mode 100755
index 0000000000..9ee9147bc0
--- /dev/null
+++ b/taskcluster/scripts/misc/build-llvm-common.sh
@@ -0,0 +1,206 @@
+#!/bin/sh
+
+set -e -x
+
+artifact=$(basename $TOOLCHAIN_ARTIFACT)
+dir=${artifact%.tar.*}
+what=$1
+shift
+install=$1
+shift
+target=$1
+shift
+
+clang=$MOZ_FETCHES_DIR/clang/bin/clang
+
+case "$target" in
+aarch64-apple-darwin)
+ arch=arm64
+ export MACOSX_DEPLOYMENT_TARGET=11.0
+ compiler_wrapper() {
+ echo exec \$MOZ_FETCHES_DIR/clang/bin/$1 -mcpu=apple-m1 \"\$@\" > $1
+ chmod +x $1
+ }
+ compiler_wrapper clang
+ compiler_wrapper clang++
+ clang=$PWD/clang
+ ;;
+x86_64-apple-darwin)
+ arch=x86_64
+ export MACOSX_DEPLOYMENT_TARGET=10.12
+ ;;
+armv7-linux-android|i686-linux-android)
+ api_level=16
+ ;;
+aarch64-linux-android|x86_64-linux-android)
+ api_level=21
+ ;;
+esac
+
+case "$target" in
+*-apple-darwin)
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DCMAKE_LINKER=$MOZ_FETCHES_DIR/clang/bin/ld64.lld
+ -DCMAKE_LIPO=$MOZ_FETCHES_DIR/clang/bin/llvm-lipo
+ -DCMAKE_SYSTEM_NAME=Darwin
+ -DCMAKE_SYSTEM_VERSION=$MACOSX_DEPLOYMENT_TARGET
+ -DCMAKE_OSX_SYSROOT=$MOZ_FETCHES_DIR/MacOSX13.3.sdk
+ -DCMAKE_EXE_LINKER_FLAGS=-fuse-ld=lld
+ -DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=lld
+ -DDARWIN_osx_ARCHS=$arch
+ -DDARWIN_osx_SYSROOT=$MOZ_FETCHES_DIR/MacOSX13.3.sdk
+ -DDARWIN_macosx_OVERRIDE_SDK_VERSION=11.0
+ -DDARWIN_osx_BUILTIN_ARCHS=$arch
+ -DLLVM_DEFAULT_TARGET_TRIPLE=$target
+ "
+ # compiler-rt build script expects to find `codesign` in $PATH.
+ # Give it a fake one.
+ echo "#!/bin/sh" > codesign
+ chmod +x codesign
+ # cmake makes decisions based on the output of the mac-only sw_vers, which is
+ # obviously missing when cross-compiling, so create a fake one. The exact
+ # version doesn't really matter: as of writing, cmake checks at most for 10.5.
+ echo "#!/bin/sh" > sw_vers
+ echo echo 10.12 >> sw_vers
+ chmod +x sw_vers
+ PATH="$PATH:$PWD"
+ ;;
+*-linux-android)
+ case "$target" in
+ armv7-linux-android)
+ arch=arm
+ ;;
+ *-linux-android)
+ arch=${target%-linux-android}
+ ;;
+ esac
+ target=$target$api_level
+ # These flags are only necessary to pass the cmake tests. They don't end up
+ # actually using libgcc, so use an empty library instead of trying to find
+ # where it is in the NDK.
+ if [ "$what" = "compiler-rt" ]; then
+ exe_linker_flags="--rtlib=libgcc -L$PWD"
+ touch libgcc.a
+ fi
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DCMAKE_SYSROOT=$MOZ_FETCHES_DIR/android-ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot
+ -DCMAKE_LINKER=$MOZ_FETCHES_DIR/clang/bin/ld.lld
+ -DCMAKE_EXE_LINKER_FLAGS='-fuse-ld=lld $exe_linker_flags'
+ -DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=lld
+ -DANDROID=1
+ -DANDROID_NATIVE_API_LEVEL=$api_level
+ -DSANITIZER_ALLOW_CXXABI=OFF
+ -DLLVM_DEFAULT_TARGET_TRIPLE=$arch-unknown-linux-android
+ "
+ ;;
+*-unknown-linux-gnu)
+ if [ -d "$MOZ_FETCHES_DIR/sysroot" ]; then
+ sysroot=$MOZ_FETCHES_DIR/sysroot
+ else
+ sysroot=$MOZ_FETCHES_DIR/sysroot-${target%-unknown-linux-gnu}-linux-gnu
+ fi
+ if [ "${target%-unknown-linux-gnu}" = i686 ]; then
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DLLVM_TABLEGEN=$MOZ_FETCHES_DIR/clang/bin/llvm-tblgen
+ "
+ fi
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DCMAKE_SYSROOT=$sysroot
+ -DCMAKE_LINKER=$MOZ_FETCHES_DIR/clang/bin/ld.lld
+ -DCMAKE_EXE_LINKER_FLAGS=-fuse-ld=lld
+ -DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=lld
+ -DLLVM_ENABLE_TERMINFO=OFF
+ "
+ ;;
+*-pc-windows-msvc)
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DCMAKE_TOOLCHAIN_FILE=$MOZ_FETCHES_DIR/llvm-project/llvm/cmake/platforms/WinMsvc.cmake
+ -DLLVM_NATIVE_TOOLCHAIN=$MOZ_FETCHES_DIR/clang
+ -DHOST_ARCH=${target%-pc-windows-msvc}
+ -DLLVM_DISABLE_ASSEMBLY_FILES=ON
+ "
+ # LLVM 15+ uses different input variables.
+ if grep -q LLVM_WINSYSROOT $MOZ_FETCHES_DIR/llvm-project/llvm/cmake/platforms/WinMsvc.cmake; then
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DLLVM_WINSYSROOT=$MOZ_FETCHES_DIR/vs
+ "
+ else
+ # WinMsvc.cmake before LLVM 15 doesn't support spaces in WINDSK_BASE.
+ ln -s "windows kits/10" $MOZ_FETCHES_DIR/vs/sdk
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DMSVC_BASE=$MOZ_FETCHES_DIR/vs/vc/tools/msvc/14.29.30133
+ -DWINSDK_BASE=$MOZ_FETCHES_DIR/vs/sdk
+ -DWINSDK_VER=10.0.19041.0
+ "
+ fi
+ ;;
+*)
+ echo $target is not supported yet
+ exit 1
+ ;;
+esac
+
+case "$target" in
+*-pc-windows-msvc)
+ ;;
+*)
+ EXTRA_CMAKE_FLAGS="
+ $EXTRA_CMAKE_FLAGS
+ -DCMAKE_C_COMPILER=$clang
+ -DCMAKE_CXX_COMPILER=$clang++
+ -DCMAKE_AR=$MOZ_FETCHES_DIR/clang/bin/llvm-ar
+ -DCMAKE_RANLIB=$MOZ_FETCHES_DIR/clang/bin/llvm-ranlib
+ "
+ ;;
+esac
+
+mkdir build
+cd build
+
+for patchfile in "$@"; do
+ case $patchfile in
+ *.json)
+ jq -r '.patches[]' $GECKO_PATH/$patchfile | while read p; do
+ patch -d $MOZ_FETCHES_DIR/llvm-project -p1 < $GECKO_PATH/$(dirname $patchfile)/$p
+ done
+ ;;
+ *)
+ patch -d $MOZ_FETCHES_DIR/llvm-project -p1 < $GECKO_PATH/$patchfile
+ ;;
+ esac
+done
+
+eval cmake \
+ $MOZ_FETCHES_DIR/llvm-project/$what \
+ -GNinja \
+ -DCMAKE_C_COMPILER_TARGET=$target \
+ -DCMAKE_CXX_COMPILER_TARGET=$target \
+ -DCMAKE_ASM_COMPILER_TARGET=$target \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_INSTALL_PREFIX=${PWD}/${dir} \
+ -DLLVM_ENABLE_ASSERTIONS=OFF \
+ -DLLVM_CONFIG_PATH=$MOZ_FETCHES_DIR/clang/bin/llvm-config \
+ $EXTRA_CMAKE_FLAGS
+
+ninja -v $install
+
+if [ "$what" = "compiler-rt" ]; then
+ # ninja install doesn't copy the PDBs
+ case "$target" in
+ *-pc-windows-msvc)
+ cp lib/windows/*pdb $dir/lib/windows/
+ ;;
+ esac
+fi
+
+tar caf "$artifact" "$dir"
+
+mkdir -p "$UPLOAD_DIR"
+mv "$artifact" "$UPLOAD_DIR"
diff --git a/taskcluster/scripts/misc/build-llvm-symbolizer.sh b/taskcluster/scripts/misc/build-llvm-symbolizer.sh
new file mode 100755
index 0000000000..f0b3657376
--- /dev/null
+++ b/taskcluster/scripts/misc/build-llvm-symbolizer.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+set -e -x
+
+$(dirname $0)/build-llvm-common.sh llvm install-llvm-symbolizer "$@"
diff --git a/taskcluster/scripts/misc/build-mar-tools.sh b/taskcluster/scripts/misc/build-mar-tools.sh
new file mode 100755
index 0000000000..1a813dd772
--- /dev/null
+++ b/taskcluster/scripts/misc/build-mar-tools.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building mar and mbsdiff
+
+COMPRESS_EXT=zst
+
+cd $GECKO_PATH
+
+export MOZ_OBJDIR=obj-mar
+
+echo ac_add_options --enable-project=tools/update-packaging > .mozconfig
+echo ac_add_options --enable-linker=lld >> .mozconfig
+
+TOOLCHAINS="clang"
+
+for t in $TOOLCHAINS; do
+ PATH="$MOZ_FETCHES_DIR/$t/bin:$PATH"
+done
+
+./mach build -v
+
+mkdir mar-tools
+cp $MOZ_OBJDIR/dist/host/bin/{mar,mbsdiff} mar-tools/
+
+tar -acf mar-tools.tar.$COMPRESS_EXT mar-tools/
+mkdir -p $UPLOAD_DIR
+cp mar-tools.tar.$COMPRESS_EXT $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-mingw-fxc2-x86.sh b/taskcluster/scripts/misc/build-mingw-fxc2-x86.sh
new file mode 100755
index 0000000000..da93bbf1bc
--- /dev/null
+++ b/taskcluster/scripts/misc/build-mingw-fxc2-x86.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -x -e -v
+
+WORKSPACE=$HOME/workspace
+INSTALL_DIR=$WORKSPACE/fxc2
+
+mkdir -p $INSTALL_DIR/bin
+
+export PATH="$MOZ_FETCHES_DIR/clang/bin:$PATH"
+
+# --------------
+
+cd $MOZ_FETCHES_DIR/fxc2
+make -j$(nproc) x86
+
+cp fxc2.exe $INSTALL_DIR/bin/
+cp dll/d3dcompiler_47_32.dll $INSTALL_DIR/bin/d3dcompiler_47.dll
+
+# --------------
+
+cd $WORKSPACE
+tar caf fxc2.tar.zst fxc2
+
+mkdir -p $UPLOAD_DIR
+cp fxc2.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-mingw32-nsis.sh b/taskcluster/scripts/misc/build-mingw32-nsis.sh
new file mode 100755
index 0000000000..806f9c8608
--- /dev/null
+++ b/taskcluster/scripts/misc/build-mingw32-nsis.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+set -x -e -v
+
+INSTALL_DIR=$MOZ_FETCHES_DIR/nsis
+
+mkdir -p $INSTALL_DIR
+
+cd $MOZ_FETCHES_DIR
+
+export PATH="$MOZ_FETCHES_DIR/binutils/bin:$MOZ_FETCHES_DIR/clang/bin:$PATH"
+
+# Call.S, included from CallCPP.S contains directives that clang's integrated
+# assembler doesn't understand.
+cat <<'EOF' >$MOZ_FETCHES_DIR/clang/bin/i686-w64-mingw32-gcc
+#!/bin/sh
+case "$@" in
+*/CallCPP.S)
+ $(dirname $0)/i686-w64-mingw32-clang -fno-integrated-as "$@"
+ ;;
+*)
+ $(dirname $0)/i686-w64-mingw32-clang "$@"
+ ;;
+esac
+EOF
+
+chmod +x $MOZ_FETCHES_DIR/clang/bin/i686-w64-mingw32-gcc
+ln -s i686-w64-mingw32-clang++ $MOZ_FETCHES_DIR/clang/bin/i686-w64-mingw32-g++
+
+# --------------
+
+cd zlib-1.2.13
+make -f win32/Makefile.gcc PREFIX=i686-w64-mingw32-
+
+cd ../nsis-3.07-src
+patch -p1 < $GECKO_PATH/build/win32/nsis-no-insert-timestamp.patch
+patch -p1 < $GECKO_PATH/build/win32/nsis-no-underscore.patch
+# --exclude-libs is not supported by lld, but is not required anyways.
+# /fixed is passed by the build system when building with MSVC but not
+# when building with GCC/binutils. The build system doesn't really support
+# clang/lld, but apparently binutils and lld don't have the same defaults
+# related to this. Unfortunately, /fixed is necessary for the stubs to be
+# handled properly by the resource editor in NSIS, which doesn't handle
+# relocations, so we pass the equivalent flag to lld-link through lld through
+# clang.
+sed -i 's/-Wl,--exclude-libs,msvcrt.a/-Wl,-Xlink=-fixed/' SCons/Config/gnu
+# memcpy.c and memset.c are built with a C++ compiler so we need to
+# avoid their symbols being mangled.
+sed -i '2i extern "C"' SCons/Config/{memcpy,memset}.c
+# Makensisw is skipped because its resource file fails to build with
+# llvm-rc, but we don't need makensisw.
+scons \
+ PATH=$PATH \
+ CC="clang --sysroot $MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu" \
+ CXX="clang++ --sysroot $MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu" \
+ XGCC_W32_PREFIX=i686-w64-mingw32- \
+ ZLIB_W32=../zlib-1.2.13 \
+ SKIPUTILS="NSIS Menu,Makensisw" \
+ PREFIX_DEST=$INSTALL_DIR/ \
+ PREFIX_BIN=bin \
+ NSIS_CONFIG_CONST_DATA_PATH=no \
+ VERSION=3.07 \
+ install
+# --------------
+
+cd $MOZ_FETCHES_DIR
+
+tar caf nsis.tar.zst nsis
+
+mkdir -p $UPLOAD_DIR
+cp nsis.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-mkbom-linux.sh b/taskcluster/scripts/misc/build-mkbom-linux.sh
new file mode 100755
index 0000000000..8b4a69a1ef
--- /dev/null
+++ b/taskcluster/scripts/misc/build-mkbom-linux.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building mkbom for Linux.
+mkdir -p $UPLOAD_DIR
+
+export PATH=$PATH:$MOZ_FETCHES_DIR/clang/bin
+cd $MOZ_FETCHES_DIR/bomutils
+
+make_flags="-j$(nproc)"
+make "$make_flags"
+
+cd $(mktemp -d)
+mkdir mkbom
+
+cp $MOZ_FETCHES_DIR/bomutils/build/bin/mkbom ./mkbom/mkbom
+tar caf $UPLOAD_DIR/mkbom.tar.zst ./mkbom
diff --git a/taskcluster/scripts/misc/build-mozmake.sh b/taskcluster/scripts/misc/build-mozmake.sh
new file mode 100755
index 0000000000..455496787b
--- /dev/null
+++ b/taskcluster/scripts/misc/build-mozmake.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+set -x -e -v
+
+. $GECKO_PATH/taskcluster/scripts/misc/vs-setup.sh
+
+cd $MOZ_FETCHES_DIR/make
+
+# Patch for http://savannah.gnu.org/bugs/?58656
+patch -p1 <<'EOF'
+diff --git a/src/remake.c b/src/remake.c
+index fb237c5..b2ba069 100644
+--- a/src/remake.c
++++ b/src/remake.c
+@@ -35,6 +35,13 @@ this program. If not, see <http://www.gnu.org/licenses/>. */
+ #endif
+ #ifdef WINDOWS32
+ #include <io.h>
++#include <sys/stat.h>
++#if defined(_MSC_VER) && _MSC_VER > 1200
++/* VC7 or later support _stat64 to access 64-bit file size. */
++#define stat64 _stat64
++#else
++#define stat64 stat
++#endif
+ #endif
+
+
+@@ -1466,7 +1473,11 @@ static FILE_TIMESTAMP
+ name_mtime (const char *name)
+ {
+ FILE_TIMESTAMP mtime;
++#if defined(WINDOWS32)
++ struct stat64 st;
++#else
+ struct stat st;
++#endif
+ int e;
+
+ #if defined(WINDOWS32)
+@@ -1498,7 +1509,7 @@ name_mtime (const char *name)
+ tend = &tem[0];
+ }
+
+- e = stat (tem, &st);
++ e = stat64 (tem, &st);
+ if (e == 0 && !_S_ISDIR (st.st_mode) && tend < tem + (p - name - 1))
+ {
+ errno = ENOTDIR;
+EOF
+
+chmod +w src/config.h.W32
+sed "/#define BATCH_MODE_ONLY_SHELL/s/\/\*\(.*\)\*\//\1/" src/config.h.W32 > src/config.h
+make -f Basic.mk \
+ MAKE_HOST=Windows32 \
+ MKDIR.cmd='mkdir -p $1' \
+ RM.cmd='rm -f $1' \
+ CP.cmd='cp $1 $2' \
+ msvc_CC="$MOZ_FETCHES_DIR/clang/bin/clang-cl -Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml" \
+ msvc_LD=$MOZ_FETCHES_DIR/clang/bin/lld-link
+
+mkdir mozmake
+cp WinRel/gnumake.exe mozmake/mozmake.exe
+
+tar -acvf mozmake.tar.zst mozmake
+mkdir -p $UPLOAD_DIR
+cp mozmake.tar.zst $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-msix-packaging.sh b/taskcluster/scripts/misc/build-msix-packaging.sh
new file mode 100755
index 0000000000..345057a016
--- /dev/null
+++ b/taskcluster/scripts/misc/build-msix-packaging.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+set -x -e -v
+
+cd $MOZ_FETCHES_DIR/msix-packaging
+
+export PATH=$MOZ_FETCHES_DIR/clang/bin:$PATH
+
+# makelinux.sh invokes `make` with no parallelism. These jobs run on hosts with
+# 16+ vCPUs; let's try to take advantage.
+export MAKEFLAGS=-j16
+
+./makelinux.sh --pack -- \
+ -DCMAKE_SYSROOT=$MOZ_FETCHES_DIR/sysroot \
+ -DCMAKE_EXE_LINKER_FLAGS_INIT='-fuse-ld=lld -Wl,-rpath=\$ORIGIN' \
+ -DCMAKE_SHARED_LINKER_FLAGS_INIT='-fuse-ld=lld -Wl,-rpath=\$ORIGIN' \
+ -DCMAKE_SKIP_BUILD_RPATH=TRUE
+
+mkdir msix-packaging
+cp .vs/bin/makemsix msix-packaging
+cp .vs/lib/libmsix.so msix-packaging
+
+# The `msix-packaging` tool links against libicu dynamically. It would be
+# better to link statically, but it's not easy to achieve. This copies the
+# needed libicu libraries from the sysroot, and the rpath settings above allows
+# them to be loaded, which means the consuming environment doesn't need to
+# install libicu directly.
+LD_LIBRARY_PATH=$MOZ_FETCHES_DIR/sysroot/usr/lib/x86_64-linux-gnu \
+ldd msix-packaging/libmsix.so | awk '$3 ~ /libicu/ {print $3}' | xargs -I '{}' cp '{}' msix-packaging
+
+tar caf msix-packaging.tar.zst msix-packaging
+
+mkdir -p $UPLOAD_DIR
+cp msix-packaging.tar.zst $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-nasm.sh b/taskcluster/scripts/misc/build-nasm.sh
new file mode 100755
index 0000000000..79b0887b3e
--- /dev/null
+++ b/taskcluster/scripts/misc/build-nasm.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+set -x -e -v
+
+COMPRESS_EXT=zst
+
+cd $MOZ_FETCHES_DIR/nasm-*
+
+case $(cat version) in
+2.14.02)
+ # Fix for .debug_loc section containing garbage on elf32
+ # https://bugzilla.nasm.us/show_bug.cgi?id=3392631
+ patch -p1 <<'EOF'
+diff --git a/output/outelf.c b/output/outelf.c
+index de99d076..47031e12 100644
+--- a/output/outelf.c
++++ b/output/outelf.c
+@@ -3275,7 +3275,7 @@ static void dwarf_generate(void)
+ WRITELONG(pbuf,framelen-4); /* initial length */
+
+ /* build loc section */
+- loclen = 16;
++ loclen = is_elf64() ? 16 : 8;
+ locbuf = pbuf = nasm_malloc(loclen);
+ if (is_elf32()) {
+ WRITELONG(pbuf,0); /* null beginning offset */
+EOF
+ ;;
+esac
+
+export PATH="$MOZ_FETCHES_DIR/clang/bin:$PATH"
+
+case "$1" in
+ win64)
+ TARGET=x86_64-w64-mingw32
+ CC=x86_64-w64-mingw32-clang
+ EXE=.exe
+ ;;
+ macosx64)
+ export MACOSX_DEPLOYMENT_TARGET=10.12
+ TARGET=x86_64-apple-darwin
+ CC="clang -fuse-ld=lld --target=$TARGET -isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk"
+ EXE=
+ ;;
+ macosx64-aarch64)
+ export MACOSX_DEPLOYMENT_TARGET=11.0
+ TARGET=aarch64-apple-darwin
+ CC="clang -fuse-ld=lld --target=$TARGET -isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk"
+ EXE=
+ ;;
+ *)
+ CC="clang --sysroot=$MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu"
+ EXE=
+ ;;
+esac
+./configure CC="$CC" AR=llvm-ar RANLIB=llvm-ranlib LDFLAGS=-fuse-ld=lld ${TARGET:+--host=$TARGET}
+make -j$(nproc)
+
+mv nasm$EXE nasm-tmp
+mkdir nasm
+mv nasm-tmp nasm/nasm$EXE
+tar -acf nasm.tar.$COMPRESS_EXT nasm
+mkdir -p "$UPLOAD_DIR"
+cp nasm.tar.$COMPRESS_EXT "$UPLOAD_DIR"
diff --git a/taskcluster/scripts/misc/build-nsis.sh b/taskcluster/scripts/misc/build-nsis.sh
new file mode 100755
index 0000000000..b1b8e06248
--- /dev/null
+++ b/taskcluster/scripts/misc/build-nsis.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -x -e -v
+
+export PATH=$MOZ_FETCHES_DIR/clang/bin:$PATH
+
+# nsis/ contains the pre-built windows native nsis. We build a linux
+# makensis from source and install it there.
+INSTALL_DIR=$MOZ_FETCHES_DIR/nsis
+
+cd $MOZ_FETCHES_DIR/nsis-3.07-src
+patch -p1 < $GECKO_PATH/build/win32/nsis-no-underscore.patch
+scons \
+ -j $(nproc) \
+ PATH=$PATH \
+ CC="clang --sysroot $MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu" \
+ CXX="clang++ --sysroot $MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu" \
+ SKIPSTUBS=all \
+ SKIPPLUGINS=all \
+ SKIPUTILS=all \
+ SKIPMISC=all \
+ PREFIX_DEST=$INSTALL_DIR/ \
+ PREFIX_BIN=bin \
+ NSIS_CONFIG_CONST_DATA_PATH=no \
+ VERSION=3.07 \
+ install-compiler
+
+cd $MOZ_FETCHES_DIR
+
+tar caf nsis.tar.zst nsis
+
+mkdir -p $UPLOAD_DIR
+cp nsis.tar.zst $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-pkgconf.sh b/taskcluster/scripts/misc/build-pkgconf.sh
new file mode 100755
index 0000000000..bc4ec7d4bb
--- /dev/null
+++ b/taskcluster/scripts/misc/build-pkgconf.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building pkgconfs.
+PROJECT=pkgconf
+
+cd ${MOZ_FETCHES_DIR}/${PROJECT}
+
+export PATH="$MOZ_FETCHES_DIR/clang/bin:$PATH"
+
+case "$1" in
+x86_64-unknown-linux-gnu)
+ CC="clang --sysroot=$MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu"
+ EXE=
+ ;;
+x86_64-apple-darwin)
+ export MACOSX_DEPLOYMENT_TARGET=10.12
+ TARGET=$1
+ CC="clang --target=$TARGET -isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk"
+ EXE=
+ ;;
+aarch64-apple-darwin)
+ export MACOSX_DEPLOYMENT_TARGET=11.0
+ TARGET=$1
+ CC="clang --target=$TARGET -isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk"
+ EXE=
+ ;;
+x86_64-pc-windows-gnu)
+ TARGET=x86_64-w64-mingw32
+ CC="x86_64-w64-mingw32-clang -DPKGCONFIG_IS_STATIC=1"
+ EXE=.exe
+ ;;
+esac
+
+./configure --disable-shared CC="$CC" AR=llvm-ar RANLIB=llvm-ranlib LDFLAGS=-fuse-ld=lld ${TARGET:+--host=$TARGET}
+make -j$(nproc) V=1
+
+mv ${PROJECT}${EXE} ${PROJECT}_tmp
+mkdir ${PROJECT}
+mv ${PROJECT}_tmp ${PROJECT}/pkg-config${EXE}
+tar -acf ${PROJECT}.tar.zst ${PROJECT}
+
+mkdir -p $UPLOAD_DIR
+mv ${PROJECT}.tar.zst $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-resourcemonitor.sh b/taskcluster/scripts/misc/build-resourcemonitor.sh
new file mode 100755
index 0000000000..6643079c76
--- /dev/null
+++ b/taskcluster/scripts/misc/build-resourcemonitor.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -x -e -v
+
+cd "$MOZ_FETCHES_DIR"/resource-monitor/ || exit 1
+COMPRESS_EXT=zst
+
+PATH="$MOZ_FETCHES_DIR/go/bin:$PATH"
+export PATH
+
+EXE_SUFFIX=""
+
+case "$1" in
+ linux64) GOOS=linux; GOARCH=amd64 ;;
+ macos64) GOOS=darwin; GOARCH=amd64 ;;
+ windows64) GOOS=windows; GOARCH=amd64; EXE_SUFFIX=".exe" ;;
+ windows32) GOOS=windows; GOARCH=386; EXE_SUFFIX=".exe" ;;
+ *)
+ echo "Unknown architecture $1 not recognized in build-resourcemonitor.sh" >&2
+ exit 1
+ ;;
+esac
+
+export GOOS
+export GOARCH
+export EXE_SUFFIX
+
+echo "GOOS=$GOOS"
+echo "GOARCH=$GOARCH"
+
+go build .
+
+STAGING_DIR="resource-monitor"
+mv "resource-monitor${EXE_SUFFIX}" resource-monitor.tmp
+mkdir "${STAGING_DIR}"
+
+cp resource-monitor.tmp "${STAGING_DIR}/resource-monitor${EXE_SUFFIX}"
+
+tar -acf "resource-monitor.tar.$COMPRESS_EXT" "${STAGING_DIR}"/
+mkdir -p "$UPLOAD_DIR"
+cp "resource-monitor.tar.$COMPRESS_EXT" "$UPLOAD_DIR"
diff --git a/taskcluster/scripts/misc/build-rust-based-toolchain.sh b/taskcluster/scripts/misc/build-rust-based-toolchain.sh
new file mode 100755
index 0000000000..a5939ed58e
--- /dev/null
+++ b/taskcluster/scripts/misc/build-rust-based-toolchain.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+set -x -e -v
+
+artifact=$(basename "$TOOLCHAIN_ARTIFACT")
+project=${artifact%.tar.*}
+workspace=$HOME/workspace
+
+# Exported for osx-cross-linker.
+export TARGET=$1
+shift
+
+FEATURES="$@"
+
+case "$TARGET" in
+x86_64-unknown-linux-gnu)
+ # Native Linux Build
+ export RUSTFLAGS="-Clinker=$MOZ_FETCHES_DIR/clang/bin/clang++ -C link-arg=--sysroot=$MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu -C link-arg=-fuse-ld=lld"
+ export CC=$MOZ_FETCHES_DIR/clang/bin/clang
+ export CXX=$MOZ_FETCHES_DIR/clang/bin/clang++
+ export TARGET_CFLAGS="--sysroot=$MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu"
+ export TARGET_CXXFLAGS="-D_GLIBCXX_USE_CXX11_ABI=0 --sysroot=$MOZ_FETCHES_DIR/sysroot-x86_64-linux-gnu"
+ ;;
+*-apple-darwin)
+ # Cross-compiling for Mac on Linux.
+ export PATH="$MOZ_FETCHES_DIR/clang/bin:$PATH"
+ export RUSTFLAGS="-C linker=$GECKO_PATH/taskcluster/scripts/misc/osx-cross-linker"
+ if test "$TARGET" = "aarch64-apple-darwin"; then
+ export MACOSX_DEPLOYMENT_TARGET=11.0
+ else
+ export MACOSX_DEPLOYMENT_TARGET=10.12
+ fi
+ export CC="$MOZ_FETCHES_DIR/clang/bin/clang"
+ export CXX="$MOZ_FETCHES_DIR/clang/bin/clang++"
+ export TARGET_CFLAGS="-isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk"
+ export TARGET_CXXFLAGS="-isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk -stdlib=libc++"
+ ;;
+*-pc-windows-msvc)
+ # Cross-compiling for Windows on Linux.
+ export CC=$MOZ_FETCHES_DIR/clang/bin/clang-cl
+ export CXX=$MOZ_FETCHES_DIR/clang/bin/clang-cl
+ export TARGET_AR=$MOZ_FETCHES_DIR/clang/bin/llvm-lib
+
+ . $GECKO_PATH/taskcluster/scripts/misc/vs-setup.sh
+ export CARGO_TARGET_I686_PC_WINDOWS_MSVC_LINKER=$MOZ_FETCHES_DIR/clang/bin/lld-link
+ export CARGO_TARGET_X86_64_PC_WINDOWS_MSVC_LINKER=$MOZ_FETCHES_DIR/clang/bin/lld-link
+ export TARGET_CFLAGS="-Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml"
+ export TARGET_CXXFLAGS="-Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml"
+ ;;
+esac
+
+PATH="$MOZ_FETCHES_DIR/rustc/bin:$PATH"
+
+cargo install \
+ --locked \
+ --verbose \
+ --path $MOZ_FETCHES_DIR/${FETCH-$project} \
+ --target-dir $workspace/obj \
+ --root $workspace/out \
+ --target "$TARGET" \
+ ${FEATURES:+--features "$FEATURES"}
+
+mkdir $workspace/$project
+mv $workspace/out/bin/* $workspace/$project
+tar -C $workspace -acvf $project.tar.zst $project
+mkdir -p $UPLOAD_DIR
+mv $project.tar.zst $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-sysroot-wasi.sh b/taskcluster/scripts/misc/build-sysroot-wasi.sh
new file mode 100755
index 0000000000..2c6ef551ce
--- /dev/null
+++ b/taskcluster/scripts/misc/build-sysroot-wasi.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+set -x -e -v
+
+artifact=$(basename $TOOLCHAIN_ARTIFACT)
+sysroot=${artifact%.tar.*}
+
+# Make the wasi compiler-rt available to clang.
+env UPLOAD_DIR= $GECKO_PATH/taskcluster/scripts/misc/repack-clang.sh
+
+cd $MOZ_FETCHES_DIR/wasi-sdk
+LLVM_PROJ_DIR=$MOZ_FETCHES_DIR/llvm-project
+
+mkdir -p build/install/wasi
+# The wasi-sdk build system wants to build clang itself. We trick it into
+# thinking it did, and put our own clang where it would have built its own.
+ln -s $MOZ_FETCHES_DIR/clang build/llvm
+touch build/llvm.BUILT
+
+# The wasi-sdk build system wants a clang and an ar binary in
+# build/install/$PREFIX/bin
+ln -s $MOZ_FETCHES_DIR/clang/bin build/install/wasi/bin
+ln -s llvm-ar build/install/wasi/bin/ar
+
+# Build wasi-libc, libc++ and libc++abi.
+do_make() {
+ make \
+ LLVM_PROJ_DIR=$LLVM_PROJ_DIR \
+ PREFIX=/wasi \
+ -j$(nproc) \
+ $1
+}
+
+do_make build/wasi-libc.BUILT
+
+# The wasi-sdk build system has a dependency on compiler-rt for libcxxabi,
+# but that's not actually necessary. Pretend it's already built.
+# Because compiler-rt has a dependency on wasi-libc, we can only do this
+# after wasi-libc is built.
+touch build/compiler-rt.BUILT
+
+do_make build/libcxx.BUILT
+
+mv build/install/wasi/share/wasi-sysroot $sysroot
+tar --zstd -cf $artifact $sysroot
+mkdir -p $UPLOAD_DIR
+mv $artifact $UPLOAD_DIR/
diff --git a/taskcluster/scripts/misc/build-sysroot.sh b/taskcluster/scripts/misc/build-sysroot.sh
new file mode 100755
index 0000000000..8b110eadf7
--- /dev/null
+++ b/taskcluster/scripts/misc/build-sysroot.sh
@@ -0,0 +1,127 @@
+#!/bin/sh
+
+set -x
+set -e
+
+arch=$1
+shift
+
+sysroot=$(basename $TOOLCHAIN_ARTIFACT)
+sysroot=${sysroot%%.*}
+
+# To repackage Firefox as a .deb package
+# we bootstrap jessie systems on a bullseye image.
+# To keep the build and repackage environments
+# consistent the build baseline used here (jessie) should be
+# synchronized with the packaging baseline used in
+# taskcluster/docker/debian-repackage/Dockerfile
+# and python/mozbuild/mozbuild/repackaging/deb.py
+case "$arch" in
+i386|amd64)
+ dist=jessie
+ if [ -n "$PACKAGES_TASKS" ]; then
+ gcc_version=8
+ else
+ gcc_version=4.9
+ fi
+ # The Debian Jessie GPG key expired.
+ extra_apt_opt='Apt::Key::gpgvcommand "/usr/local/sbin/gpgvnoexpkeysig"'
+ ;;
+arm64)
+ dist=buster
+ gcc_version=8
+ ;;
+*)
+ echo "$arch is not supported." >&2
+ exit 1
+ ;;
+esac
+
+case "$dist" in
+jessie)
+ repo_url=https://archive.debian.org/debian
+ ;;
+*)
+ SNAPSHOT=20210208T213147Z
+ repo_url=http://snapshot.debian.org/archive/debian/$SNAPSHOT
+ ;;
+esac
+
+packages="
+ linux-libc-dev
+ libasound2-dev
+ libstdc++-${gcc_version}-dev
+ libdbus-glib-1-dev
+ libfontconfig1-dev
+ libfreetype6-dev
+ libgconf2-dev
+ libgcc-${gcc_version}-dev
+ libgtk-3-dev
+ libpango1.0-dev
+ libpulse-dev
+ libx11-xcb-dev
+ libxt-dev
+ valgrind
+ $*
+"
+
+# --keyring=... works around https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=981710
+# For a sysroot, we don't need everything. Essentially only libraries and headers, as
+# well as pkgconfig files. We exclude debug info files and valgrind files that are not
+# useful to build.
+queue_base="$TASKCLUSTER_ROOT_URL/api/queue/v1"
+(
+ echo "deb $repo_url $dist main"
+ for task in $PACKAGES_TASKS; do
+ echo "deb [trusted=yes] $queue_base/task/$task/artifacts/public/build/ apt/"
+ done
+) | mmdebstrap \
+ --architectures=$arch \
+ --variant=extract \
+ --include=$(echo $packages | tr ' ' ,) \
+ $dist \
+ $sysroot \
+ - \
+ --aptopt=/etc/apt/apt.conf.d/99taskcluster \
+ ${extra_apt_opt:+--aptopt="$extra_apt_opt"} \
+ --dpkgopt=path-exclude="*" \
+ --dpkgopt=path-include="/lib/*" \
+ --dpkgopt=path-include="/lib32/*" \
+ --dpkgopt=path-include="/usr/include/*" \
+ --dpkgopt=path-include="/usr/lib/*" \
+ --dpkgopt=path-include="/usr/lib32/*" \
+ --dpkgopt=path-exclude="/usr/lib/debug/*" \
+ --dpkgopt=path-exclude="/usr/lib/python*" \
+ --dpkgopt=path-exclude="/usr/lib/valgrind/*" \
+ --dpkgopt=path-include="/usr/share/pkgconfig/*" \
+ --keyring=/usr/share/keyrings/debian-archive-removed-keys.gpg \
+ -v
+
+# Remove files that are created despite the path-exclude=*.
+rm -rf $sysroot/etc $sysroot/dev $sysroot/tmp $sysroot/var
+
+# Remove empty directories
+find $sysroot -depth -type d -empty -delete
+
+# Adjust symbolic links to link into the sysroot instead of absolute
+# paths that end up pointing at the host system.
+find $sysroot -type l | while read l; do
+ t=$(readlink $l)
+ case "$t" in
+ /*)
+ # We have a path in the form "$sysroot/a/b/c/d" and we want ../../..,
+ # which is how we get from d to the root of the sysroot. For that,
+ # we start from the directory containing d ("$sysroot/a/b/c"), remove
+ # all non-slash characters, leaving is with "///", replace each slash
+ # with "../", which gives us "../../../", and then we remove the last
+ # slash.
+ rel=$(dirname $l | sed 's,[^/],,g;s,/,../,g;s,/$,,')
+ ln -sf $rel$t $l
+ ;;
+ esac
+done
+
+tar caf $sysroot.tar.zst $sysroot
+
+mkdir -p "$UPLOAD_DIR"
+mv "$sysroot.tar.zst" "$UPLOAD_DIR"
diff --git a/taskcluster/scripts/misc/build-upx.sh b/taskcluster/scripts/misc/build-upx.sh
new file mode 100755
index 0000000000..a0ad9af626
--- /dev/null
+++ b/taskcluster/scripts/misc/build-upx.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+set -x -e -v
+
+WORKSPACE=$HOME/workspace
+INSTALL_DIR=$WORKSPACE/upx
+
+mkdir -p $INSTALL_DIR/bin
+
+cd $WORKSPACE
+
+git clone -n https://github.com/upx/upx.git upx-clone
+cd upx-clone
+# https://github.com/upx/upx/releases/tag/v3.95
+git checkout 7a3637ff5a800b8bcbad20ae7f668d8c8449b014 # Asserts integrity of the clone (right?)
+git submodule update --init --recursive
+cd src
+make -j$(nproc)
+cp upx.out $INSTALL_DIR/bin/upx
+
+# --------------
+
+cd $WORKSPACE
+tar caf upx.tar.zst upx
+
+mkdir -p $UPLOAD_DIR
+cp upx.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-winchecksec.sh b/taskcluster/scripts/misc/build-winchecksec.sh
new file mode 100755
index 0000000000..f13ef5b77b
--- /dev/null
+++ b/taskcluster/scripts/misc/build-winchecksec.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+set -e -v -x
+
+mkdir -p $UPLOAD_DIR
+
+cd $MOZ_FETCHES_DIR/winchecksec
+
+SUFFIX=
+
+case "$1" in
+x86_64-pc-windows-msvc)
+ SUFFIX=.exe
+ export PATH="$MOZ_FETCHES_DIR/clang/bin:$PATH"
+
+ . $GECKO_PATH/taskcluster/scripts/misc/vs-setup.sh
+
+ # Patch pe-parse because clang-cl doesn't support /analyze.
+ patch -p1 <<'EOF'
+--- a/pe-parse/cmake/compilation_flags.cmake
++++ b/pe-parse/cmake/compilation_flags.cmake
+@@ -1,5 +1,5 @@
+ if (MSVC)
+- list(APPEND DEFAULT_CXX_FLAGS /W4 /analyze)
++ list(APPEND DEFAULT_CXX_FLAGS /W4)
+
+ if (CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
+ list(APPEND DEFAULT_CXX_FLAGS /Zi)
+EOF
+
+ CMAKE_FLAGS='
+ -DCMAKE_CXX_COMPILER=clang-cl
+ -DCMAKE_C_COMPILER=clang-cl
+ -DCMAKE_LINKER=lld-link
+ -DCMAKE_C_FLAGS="-fuse-ld=lld -Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml"
+ -DCMAKE_CXX_FLAGS="-fuse-ld=lld -EHsc -Xclang -ivfsoverlay -Xclang $MOZ_FETCHES_DIR/vs/overlay.yaml"
+ -DCMAKE_RC_COMPILER=llvm-rc
+ -DCMAKE_MT=llvm-mt
+ -DCMAKE_SYSTEM_NAME=Windows
+ -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded
+ '
+ ;;
+esac
+
+eval cmake \
+ -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DBUILD_SHARED_LIBS=Off \
+ $CMAKE_FLAGS
+
+ninja -v
+
+cd ..
+tar -caf winchecksec.tar.zst winchecksec/winchecksec${SUFFIX}
+cp winchecksec.tar.zst $UPLOAD_DIR/
diff --git a/taskcluster/scripts/misc/build-wine.sh b/taskcluster/scripts/misc/build-wine.sh
new file mode 100755
index 0000000000..e292fd2e1e
--- /dev/null
+++ b/taskcluster/scripts/misc/build-wine.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+set -x -e -v
+
+WORKSPACE=$HOME/workspace
+INSTALL_DIR=$WORKSPACE/wine
+
+mkdir -p $INSTALL_DIR
+mkdir -p $WORKSPACE/build/wine
+mkdir -p $WORKSPACE/build/wine64
+
+cd $WORKSPACE/build/wine64
+$MOZ_FETCHES_DIR/wine-source/configure --enable-win64 --without-x --without-freetype --prefix=$INSTALL_DIR/
+make -j$(nproc)
+
+cd $WORKSPACE/build/wine
+$MOZ_FETCHES_DIR/wine-source/configure --with-wine64=../wine64 --without-x --without-freetype --prefix=$INSTALL_DIR/
+make -j$(nproc)
+make install
+
+cd $WORKSPACE/build/wine64
+make install
+
+# --------------
+
+cd $WORKSPACE/
+tar caf wine.tar.zst wine
+
+mkdir -p $UPLOAD_DIR
+cp wine.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-xar-linux.sh b/taskcluster/scripts/misc/build-xar-linux.sh
new file mode 100755
index 0000000000..5c1706b10e
--- /dev/null
+++ b/taskcluster/scripts/misc/build-xar-linux.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building xar for Linux.
+mkdir -p $UPLOAD_DIR
+
+export PATH=$PATH:$MOZ_FETCHES_DIR/clang/bin
+cd $MOZ_FETCHES_DIR/xar/xar
+
+./autogen.sh --prefix=/builds/worker --enable-static
+make_flags="-j$(nproc)"
+make $make_flags
+
+cd $(mktemp -d)
+mkdir xar
+
+cp $MOZ_FETCHES_DIR/xar/xar/src/xar ./xar/xar
+tar caf $UPLOAD_DIR/xar.tar.zst ./xar
diff --git a/taskcluster/scripts/misc/fetch-chromium.py b/taskcluster/scripts/misc/fetch-chromium.py
new file mode 100644
index 0000000000..d9a03261d1
--- /dev/null
+++ b/taskcluster/scripts/misc/fetch-chromium.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python3 -u
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+This script downloads the latest chromium build (or a manually
+defined version) for a given platform. It then uploads the build,
+with the revision of the build stored in a REVISION file.
+"""
+
+import argparse
+import errno
+import os
+import shutil
+import subprocess
+import tempfile
+
+import requests
+from redo import retriable
+
+LAST_CHANGE_URL = (
+ # formatted with platform
+ "https://www.googleapis.com/download/storage/v1/b/"
+ "chromium-browser-snapshots/o/{}%2FLAST_CHANGE?alt=media"
+)
+
+CHROMIUM_BASE_URL = (
+ # formatted with (platform/revision/archive)
+ "https://www.googleapis.com/download/storage/v1/b/"
+ "chromium-browser-snapshots/o/{}%2F{}%2F{}?alt=media"
+)
+
+
+CHROMIUM_INFO = {
+ "linux": {
+ "platform": "Linux_x64",
+ "chromium": "chrome-linux.zip",
+ "result": "chromium-linux.tar.bz2",
+ "chromedriver": "chromedriver_linux64.zip",
+ },
+ "win32": {
+ "platform": "Win",
+ "chromium": "chrome-win.zip",
+ "result": "chromium-win32.tar.bz2",
+ "chromedriver": "chromedriver_win32.zip",
+ },
+ "win64": {
+ "platform": "Win",
+ "chromium": "chrome-win.zip",
+ "result": "chromium-win64.tar.bz2",
+ "chromedriver": "chromedriver_win32.zip",
+ },
+ "mac": {
+ "platform": "Mac",
+ "chromium": "chrome-mac.zip",
+ "result": "chromium-mac.tar.bz2",
+ "chromedriver": "chromedriver_mac64.zip",
+ },
+}
+
+
+def log(msg):
+ print("build-chromium: %s" % msg)
+
+
+@retriable(attempts=7, sleeptime=5, sleepscale=2)
+def fetch_file(url, filepath):
+ """Download a file from the given url to a given file."""
+ size = 4096
+ r = requests.get(url, stream=True)
+ r.raise_for_status()
+
+ with open(filepath, "wb") as fd:
+ for chunk in r.iter_content(size):
+ fd.write(chunk)
+
+
+def unzip(zippath, target):
+ """Unzips an archive to the target location."""
+ log("Unpacking archive at: %s to: %s" % (zippath, target))
+ unzip_command = ["unzip", "-q", "-o", zippath, "-d", target]
+ subprocess.check_call(unzip_command)
+
+
+@retriable(attempts=7, sleeptime=5, sleepscale=2)
+def fetch_chromium_revision(platform):
+ """Get the revision of the latest chromium build."""
+ chromium_platform = CHROMIUM_INFO[platform]["platform"]
+ revision_url = LAST_CHANGE_URL.format(chromium_platform)
+
+ log("Getting revision number for latest %s chromium build..." % chromium_platform)
+
+ # Expecting a file with a single number indicating the latest
+ # chromium build with a chromedriver that we can download
+ r = requests.get(revision_url, timeout=30)
+ r.raise_for_status()
+
+ chromium_revision = r.content.decode("utf-8")
+ return chromium_revision.strip()
+
+
+def fetch_chromium_build(platform, revision, zippath):
+ """Download a chromium build for a given revision, or the latest."""
+ if not revision:
+ revision = fetch_chromium_revision(platform)
+
+ download_platform = CHROMIUM_INFO[platform]["platform"]
+ download_url = CHROMIUM_BASE_URL.format(
+ download_platform, revision, CHROMIUM_INFO[platform]["chromium"]
+ )
+
+ log("Downloading %s chromium build revision %s..." % (download_platform, revision))
+ log(download_url)
+ fetch_file(download_url, zippath)
+ return revision
+
+
+def fetch_chromedriver(platform, revision, chromium_dir):
+ """Get the chromedriver for the given revision and repackage it."""
+ download_url = CHROMIUM_BASE_URL.format(
+ CHROMIUM_INFO[platform]["platform"],
+ revision,
+ CHROMIUM_INFO[platform]["chromedriver"],
+ )
+
+ tmpzip = os.path.join(tempfile.mkdtemp(), "cd-tmp.zip")
+ log("Downloading chromedriver from %s" % download_url)
+ fetch_file(download_url, tmpzip)
+
+ tmppath = tempfile.mkdtemp()
+ unzip(tmpzip, tmppath)
+
+ # Find the chromedriver then copy it to the chromium directory
+ cd_path = None
+ for dirpath, _, filenames in os.walk(tmppath):
+ for filename in filenames:
+ if filename == "chromedriver" or filename == "chromedriver.exe":
+ cd_path = os.path.join(dirpath, filename)
+ break
+ if cd_path is not None:
+ break
+ if cd_path is None:
+ raise Exception("Could not find chromedriver binary in %s" % tmppath)
+ log("Copying chromedriver from: %s to: %s" % (cd_path, chromium_dir))
+ shutil.copy(cd_path, chromium_dir)
+
+
+def build_chromium_archive(platform, revision=None):
+ """
+ Download and store a chromium build for a given platform.
+
+ Retrieves either the latest version, or uses a pre-defined version if
+ the `--revision` option is given a revision.
+ """
+ upload_dir = os.environ.get("UPLOAD_DIR")
+ if upload_dir:
+ # Create the upload directory if it doesn't exist.
+ try:
+ log("Creating upload directory in %s..." % os.path.abspath(upload_dir))
+ os.makedirs(upload_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # Make a temporary location for the file
+ tmppath = tempfile.mkdtemp()
+ tmpzip = os.path.join(tmppath, "tmp-chromium.zip")
+
+ revision = fetch_chromium_build(platform, revision, tmpzip)
+
+ # Unpack archive in `tmpzip` to store the revision number and
+ # the chromedriver
+ unzip(tmpzip, tmppath)
+
+ dirs = [
+ d
+ for d in os.listdir(tmppath)
+ if os.path.isdir(os.path.join(tmppath, d)) and d.startswith("chrome-")
+ ]
+
+ if len(dirs) > 1:
+ raise Exception(
+ "Too many directories starting with `chrome-` after extracting."
+ )
+ elif len(dirs) == 0:
+ raise Exception(
+ "Could not find any directories after extraction of chromium zip."
+ )
+
+ chromium_dir = os.path.join(tmppath, dirs[0])
+ revision_file = os.path.join(chromium_dir, ".REVISION")
+ with open(revision_file, "w+") as f:
+ f.write(str(revision))
+
+ # Get and store the chromedriver
+ fetch_chromedriver(platform, revision, chromium_dir)
+
+ tar_file = CHROMIUM_INFO[platform]["result"]
+ tar_command = ["tar", "cjf", tar_file, "-C", tmppath, dirs[0]]
+ log("Added revision to %s file." % revision_file)
+
+ log("Tarring with the command: %s" % str(tar_command))
+ subprocess.check_call(tar_command)
+
+ upload_dir = os.environ.get("UPLOAD_DIR")
+ if upload_dir:
+ # Move the tarball to the output directory for upload.
+ log("Moving %s to the upload directory..." % tar_file)
+ shutil.copy(tar_file, os.path.join(upload_dir, tar_file))
+
+ shutil.rmtree(tmppath)
+
+
+def parse_args():
+ """Read command line arguments and return options."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--platform", help="Platform version of chromium to build.", required=True
+ )
+ parser.add_argument(
+ "--revision",
+ help="Revision of chromium to build to get. "
+ "(Defaults to the newest chromium build).",
+ default=None,
+ )
+
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ args = vars(parse_args())
+ build_chromium_archive(**args)
diff --git a/taskcluster/scripts/misc/fetch-content b/taskcluster/scripts/misc/fetch-content
new file mode 100755
index 0000000000..f3160fad5d
--- /dev/null
+++ b/taskcluster/scripts/misc/fetch-content
@@ -0,0 +1,881 @@
+#!/usr/bin/python3 -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import bz2
+import concurrent.futures
+import contextlib
+import datetime
+import gzip
+import hashlib
+import json
+import lzma
+import multiprocessing
+import os
+import pathlib
+import random
+import re
+import stat
+import subprocess
+import sys
+import tarfile
+import tempfile
+import time
+import urllib.parse
+import urllib.request
+import zipfile
+
+try:
+ import zstandard
+except ImportError:
+ zstandard = None
+
+try:
+ import certifi
+except ImportError:
+ certifi = None
+
+
+CONCURRENCY = multiprocessing.cpu_count()
+
+
+def log(msg):
+ print(msg, file=sys.stderr)
+ sys.stderr.flush()
+
+
+class IntegrityError(Exception):
+ """Represents an integrity error when downloading a URL."""
+
+
+def ZstdCompressor(*args, **kwargs):
+ if not zstandard:
+ raise ValueError("zstandard Python package not available")
+ return zstandard.ZstdCompressor(*args, **kwargs)
+
+
+def ZstdDecompressor(*args, **kwargs):
+ if not zstandard:
+ raise ValueError("zstandard Python package not available")
+ return zstandard.ZstdDecompressor(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def rename_after_close(fname, *args, **kwargs):
+ """
+ Context manager that opens a temporary file to use as a writer,
+ and closes the file on context exit, renaming it to the expected
+ file name in case of success, or removing it in case of failure.
+
+ Takes the same options as open(), but must be used as a context
+ manager.
+ """
+ path = pathlib.Path(fname)
+ tmp = path.with_name("%s.tmp" % path.name)
+ try:
+ with tmp.open(*args, **kwargs) as fh:
+ yield fh
+ except Exception:
+ tmp.unlink()
+ raise
+ else:
+ tmp.rename(fname)
+
+
+# The following is copied from
+# https://github.com/mozilla-releng/redo/blob/6d07678a014e0c525e54a860381a165d34db10ff/redo/__init__.py#L15-L85
+def retrier(attempts=5, sleeptime=10, max_sleeptime=300, sleepscale=1.5, jitter=1):
+ """
+ A generator function that sleeps between retries, handles exponential
+ backoff and jitter. The action you are retrying is meant to run after
+ retrier yields.
+
+ At each iteration, we sleep for sleeptime + random.randint(-jitter, jitter).
+ Afterwards sleeptime is multiplied by sleepscale for the next iteration.
+
+ Args:
+ attempts (int): maximum number of times to try; defaults to 5
+ sleeptime (float): how many seconds to sleep between tries; defaults to
+ 60s (one minute)
+ max_sleeptime (float): the longest we'll sleep, in seconds; defaults to
+ 300s (five minutes)
+ sleepscale (float): how much to multiply the sleep time by each
+ iteration; defaults to 1.5
+ jitter (int): random jitter to introduce to sleep time each iteration.
+ the amount is chosen at random between [-jitter, +jitter]
+ defaults to 1
+
+ Yields:
+ None, a maximum of `attempts` number of times
+
+ Example:
+ >>> n = 0
+ >>> for _ in retrier(sleeptime=0, jitter=0):
+ ... if n == 3:
+ ... # We did the thing!
+ ... break
+ ... n += 1
+ >>> n
+ 3
+
+ >>> n = 0
+ >>> for _ in retrier(sleeptime=0, jitter=0):
+ ... if n == 6:
+ ... # We did the thing!
+ ... break
+ ... n += 1
+ ... else:
+ ... print("max tries hit")
+ max tries hit
+ """
+ jitter = jitter or 0 # py35 barfs on the next line if jitter is None
+ if jitter > sleeptime:
+ # To prevent negative sleep times
+ raise Exception(
+ "jitter ({}) must be less than sleep time ({})".format(jitter, sleeptime)
+ )
+
+ sleeptime_real = sleeptime
+ for _ in range(attempts):
+ log("attempt %i/%i" % (_ + 1, attempts))
+
+ yield sleeptime_real
+
+ if jitter:
+ sleeptime_real = sleeptime + random.randint(-jitter, jitter)
+ # our jitter should scale along with the sleeptime
+ jitter = int(jitter * sleepscale)
+ else:
+ sleeptime_real = sleeptime
+
+ sleeptime *= sleepscale
+
+ if sleeptime_real > max_sleeptime:
+ sleeptime_real = max_sleeptime
+
+ # Don't need to sleep the last time
+ if _ < attempts - 1:
+ log(
+ "sleeping for %.2fs (attempt %i/%i)" % (sleeptime_real, _ + 1, attempts)
+ )
+ time.sleep(sleeptime_real)
+
+
+def stream_download(url, sha256=None, size=None):
+ """Download a URL to a generator, optionally with content verification.
+
+ If ``sha256`` or ``size`` are defined, the downloaded URL will be
+ validated against those requirements and ``IntegrityError`` will be
+ raised if expectations do not match.
+
+ Because verification cannot occur until the file is completely downloaded
+ it is recommended for consumers to not do anything meaningful with the
+ data if content verification is being used. To securely handle retrieved
+ content, it should be streamed to a file or memory and only operated
+ on after the generator is exhausted without raising.
+ """
+ log("Downloading %s" % url)
+
+ h = hashlib.sha256()
+ length = 0
+
+ t0 = time.time()
+ with urllib.request.urlopen(
+ url, timeout=60, cafile=certifi.where()
+ ) if certifi else urllib.request.urlopen(url, timeout=60) as fh:
+ if not url.endswith(".gz") and fh.info().get("Content-Encoding") == "gzip":
+ fh = gzip.GzipFile(fileobj=fh)
+
+ while True:
+ chunk = fh.read(65536)
+ if not chunk:
+ break
+
+ h.update(chunk)
+ length += len(chunk)
+
+ yield chunk
+
+ duration = time.time() - t0
+ digest = h.hexdigest()
+
+ log(
+ "%s resolved to %d bytes with sha256 %s in %.3fs"
+ % (url, length, digest, duration)
+ )
+
+ if size:
+ if size == length:
+ log("Verified size of %s" % url)
+ else:
+ raise IntegrityError(
+ "size mismatch on %s: wanted %d; got %d" % (url, size, length)
+ )
+
+ if sha256:
+ if digest == sha256:
+ log("Verified sha256 integrity of %s" % url)
+ else:
+ raise IntegrityError(
+ "sha256 mismatch on %s: wanted %s; got %s" % (url, sha256, digest)
+ )
+
+
+def download_to_path(url, path, sha256=None, size=None):
+ """Download a URL to a filesystem path, possibly with verification."""
+
+ # We download to a temporary file and rename at the end so there's
+ # no chance of the final file being partially written or containing
+ # bad data.
+ try:
+ path.unlink()
+ except FileNotFoundError:
+ pass
+
+ for _ in retrier(attempts=5, sleeptime=60):
+ try:
+ log("Downloading %s to %s" % (url, path))
+
+ with rename_after_close(path, "wb") as fh:
+ for chunk in stream_download(url, sha256=sha256, size=size):
+ fh.write(chunk)
+
+ return
+ except IntegrityError:
+ raise
+ except Exception as e:
+ log("Download failed: {}".format(e))
+ continue
+
+ raise Exception("Download failed, no more retries!")
+
+
+def download_to_memory(url, sha256=None, size=None):
+ """Download a URL to memory, possibly with verification."""
+
+ data = b""
+ for _ in retrier(attempts=5, sleeptime=60):
+ try:
+ log("Downloading %s" % (url))
+
+ for chunk in stream_download(url, sha256=sha256, size=size):
+ data += chunk
+
+ return data
+ except IntegrityError:
+ raise
+ except Exception as e:
+ log("Download failed: {}".format(e))
+ continue
+
+ raise Exception("Download failed, no more retries!")
+
+
+def gpg_verify_path(path: pathlib.Path, public_key_data: bytes, signature_data: bytes):
+ """Verify that a filesystem path verifies using GPG.
+
+ Takes a Path defining a file to verify. ``public_key_data`` contains
+ bytes with GPG public key data. ``signature_data`` contains a signed
+ GPG document to use with ``gpg --verify``.
+ """
+ log("Validating GPG signature of %s" % path)
+ log("GPG key data:\n%s" % public_key_data.decode("ascii"))
+
+ with tempfile.TemporaryDirectory() as td:
+ try:
+ # --batch since we're running unattended.
+ gpg_args = ["gpg", "--homedir", td, "--batch"]
+
+ log("Importing GPG key...")
+ subprocess.run(gpg_args + ["--import"], input=public_key_data, check=True)
+
+ log("Verifying GPG signature...")
+ subprocess.run(
+ gpg_args + ["--verify", "-", "%s" % path],
+ input=signature_data,
+ check=True,
+ )
+
+ log("GPG signature verified!")
+ finally:
+ # There is a race between the agent self-terminating and
+ # shutil.rmtree() from the temporary directory cleanup that can
+ # lead to exceptions. Kill the agent before cleanup to prevent this.
+ env = dict(os.environ)
+ env["GNUPGHOME"] = td
+ subprocess.run(["gpgconf", "--kill", "gpg-agent"], env=env)
+
+
+def open_tar_stream(path: pathlib.Path):
+ """"""
+ if path.suffix == ".bz2":
+ return bz2.open(str(path), "rb")
+ elif path.suffix in (".gz", ".tgz") :
+ return gzip.open(str(path), "rb")
+ elif path.suffix == ".xz":
+ return lzma.open(str(path), "rb")
+ elif path.suffix == ".zst":
+ dctx = ZstdDecompressor()
+ return dctx.stream_reader(path.open("rb"))
+ elif path.suffix == ".tar":
+ return path.open("rb")
+ else:
+ raise ValueError("unknown archive format for tar file: %s" % path)
+
+
+def archive_type(path: pathlib.Path):
+ """Attempt to identify a path as an extractable archive."""
+ if path.suffixes[-2:-1] == [".tar"] or path.suffixes[-1:] == [".tgz"]:
+ return "tar"
+ elif path.suffix == ".zip":
+ return "zip"
+ else:
+ return None
+
+
+def extract_archive(path, dest_dir, typ):
+ """Extract an archive to a destination directory."""
+
+ # Resolve paths to absolute variants.
+ path = path.resolve()
+ dest_dir = dest_dir.resolve()
+
+ log("Extracting %s to %s" % (path, dest_dir))
+ t0 = time.time()
+
+ # We pipe input to the decompressor program so that we can apply
+ # custom decompressors that the program may not know about.
+ if typ == "tar":
+ ifh = open_tar_stream(path)
+ # On Windows, the tar program doesn't support things like symbolic
+ # links, while Windows actually support them. The tarfile module in
+ # python does. So use that. But since it's significantly slower than
+ # the tar program on Linux, only use tarfile on Windows (tarfile is
+ # also not much slower on Windows, presumably because of the
+ # notoriously bad I/O).
+ if sys.platform == "win32":
+ tar = tarfile.open(fileobj=ifh, mode="r|")
+ tar.extractall(str(dest_dir))
+ args = []
+ else:
+ args = ["tar", "xf", "-"]
+ pipe_stdin = True
+ elif typ == "zip":
+ # unzip from stdin has wonky behavior. We don't use a pipe for it.
+ ifh = open(os.devnull, "rb")
+ args = ["unzip", "-o", str(path)]
+ pipe_stdin = False
+ else:
+ raise ValueError("unknown archive format: %s" % path)
+
+ if args:
+ with ifh, subprocess.Popen(
+ args, cwd=str(dest_dir), bufsize=0, stdin=subprocess.PIPE
+ ) as p:
+ while True:
+ if not pipe_stdin:
+ break
+
+ chunk = ifh.read(131072)
+ if not chunk:
+ break
+
+ p.stdin.write(chunk)
+
+ if p.returncode:
+ raise Exception("%r exited %d" % (args, p.returncode))
+
+ log("%s extracted in %.3fs" % (path, time.time() - t0))
+
+
+def repack_archive(
+ orig: pathlib.Path, dest: pathlib.Path, strip_components=0, prefix=""
+):
+ assert orig != dest
+ log("Repacking as %s" % dest)
+ orig_typ = archive_type(orig)
+ typ = archive_type(dest)
+ if not orig_typ:
+ raise Exception("Archive type not supported for %s" % orig.name)
+ if not typ:
+ raise Exception("Archive type not supported for %s" % dest.name)
+
+ if dest.suffixes[-2:] != [".tar", ".zst"]:
+ raise Exception("Only producing .tar.zst archives is supported.")
+
+ if strip_components or prefix:
+
+ def filter(name):
+ if strip_components:
+ stripped = "/".join(name.split("/")[strip_components:])
+ if not stripped:
+ raise Exception(
+ "Stripping %d components would remove files" % strip_components
+ )
+ name = stripped
+ return prefix + name
+
+ else:
+ filter = None
+
+ with rename_after_close(dest, "wb") as fh:
+ ctx = ZstdCompressor()
+ if orig_typ == "zip":
+ assert typ == "tar"
+ zip = zipfile.ZipFile(orig)
+ # Convert the zip stream to a tar on the fly.
+ with ctx.stream_writer(fh) as compressor, tarfile.open(
+ fileobj=compressor, mode="w:"
+ ) as tar:
+ for zipinfo in zip.infolist():
+ if zipinfo.is_dir():
+ continue
+ tarinfo = tarfile.TarInfo()
+ filename = zipinfo.filename
+ tarinfo.name = filter(filename) if filter else filename
+ tarinfo.size = zipinfo.file_size
+ # Zip files don't have any knowledge of the timezone
+ # they were created in. Which is not really convenient to
+ # reliably convert to a timestamp. But we don't really
+ # care about accuracy, but rather about reproducibility,
+ # so we pick UTC.
+ time = datetime.datetime(
+ *zipinfo.date_time, tzinfo=datetime.timezone.utc
+ )
+ tarinfo.mtime = time.timestamp()
+ # 0 is MS-DOS, 3 is UNIX. Only in the latter case do we
+ # get anything useful for the tar file mode.
+ if zipinfo.create_system == 3:
+ mode = zipinfo.external_attr >> 16
+ else:
+ mode = 0o0644
+ tarinfo.mode = stat.S_IMODE(mode)
+ if stat.S_ISLNK(mode):
+ tarinfo.type = tarfile.SYMTYPE
+ tarinfo.linkname = zip.read(filename).decode()
+ tar.addfile(tarinfo, zip.open(filename))
+ elif stat.S_ISREG(mode) or stat.S_IFMT(mode) == 0:
+ tar.addfile(tarinfo, zip.open(filename))
+ else:
+ raise Exception("Unsupported file mode %o" % stat.S_IFMT(mode))
+
+ elif orig_typ == "tar":
+ if typ == "zip":
+ raise Exception("Repacking a tar to zip is not supported")
+ assert typ == "tar"
+
+ ifh = open_tar_stream(orig)
+ if filter:
+ # To apply the filter, we need to open the tar stream and
+ # tweak it.
+ origtar = tarfile.open(fileobj=ifh, mode="r|")
+ with ctx.stream_writer(fh) as compressor, tarfile.open(
+ fileobj=compressor,
+ mode="w:",
+ format=origtar.format,
+ ) as tar:
+ for tarinfo in origtar:
+ if tarinfo.isdir():
+ continue
+ tarinfo.name = filter(tarinfo.name)
+ if "path" in tarinfo.pax_headers:
+ tarinfo.pax_headers["path"] = filter(
+ tarinfo.pax_headers["path"]
+ )
+ if tarinfo.isfile():
+ tar.addfile(tarinfo, origtar.extractfile(tarinfo))
+ else:
+ tar.addfile(tarinfo)
+ else:
+ # We only change compression here. The tar stream is unchanged.
+ ctx.copy_stream(ifh, fh)
+
+
+def fetch_and_extract(url, dest_dir, extract=True, sha256=None, size=None):
+ """Fetch a URL and extract it to a destination path.
+
+ If the downloaded URL is an archive, it is extracted automatically
+ and the archive is deleted. Otherwise the file remains in place in
+ the destination directory.
+ """
+
+ basename = urllib.parse.urlparse(url).path.split("/")[-1]
+ dest_path = dest_dir / basename
+
+ download_to_path(url, dest_path, sha256=sha256, size=size)
+
+ if not extract:
+ return
+
+ typ = archive_type(dest_path)
+ if typ:
+ extract_archive(dest_path, dest_dir, typ)
+ log("Removing %s" % dest_path)
+ dest_path.unlink()
+
+
+def fetch_urls(downloads):
+ """Fetch URLs pairs to a pathlib.Path."""
+ with concurrent.futures.ThreadPoolExecutor(CONCURRENCY) as e:
+ fs = []
+
+ for download in downloads:
+ fs.append(e.submit(fetch_and_extract, *download))
+
+ for f in fs:
+ f.result()
+
+
+def _git_checkout_github_archive(dest_path: pathlib.Path, repo: str,
+ commit: str, prefix: str):
+ 'Use github archive generator to speed up github git repo cloning'
+ repo = repo.rstrip('/')
+ github_url = '{repo}/archive/{commit}.tar.gz'.format(**locals())
+
+ with tempfile.TemporaryDirectory() as td:
+ temp_dir = pathlib.Path(td)
+ dl_dest = temp_dir / 'archive.tar.gz'
+ download_to_path(github_url, dl_dest)
+ repack_archive(dl_dest, dest_path,
+ strip_components=1,
+ prefix=prefix + '/')
+
+
+def _github_submodule_required(repo: str, commit: str):
+ 'Use github API to check if submodules are used'
+ url = '{repo}/blob/{commit}/.gitmodules'.format(**locals())
+ try:
+ status_code = urllib.request.urlopen(url).getcode()
+ return status_code == 200
+ except:
+ return False
+
+
+def git_checkout_archive(
+ dest_path: pathlib.Path,
+ repo: str,
+ commit: str,
+ prefix=None,
+ ssh_key=None,
+ include_dot_git=False,
+):
+ """Produce an archive of the files comprising a Git checkout."""
+ dest_path.parent.mkdir(parents=True, exist_ok=True)
+
+ if not prefix:
+ prefix = repo.rstrip("/").rsplit("/", 1)[-1]
+
+ if dest_path.suffixes[-2:] != [".tar", ".zst"]:
+ raise Exception("Only producing .tar.zst archives is supported.")
+
+ if repo.startswith('https://github.com/'):
+ if not include_dot_git and not _github_submodule_required(repo, commit):
+ log("Using github archive service to speedup archive creation")
+ # Always log sha1 info, either from commit or resolved from repo.
+ if re.match(r"^[a-fA-F0-9]{40}$", commit):
+ revision = commit
+ else:
+ ref_output = subprocess.check_output(["git", "ls-remote", repo,
+ 'refs/heads/' + commit])
+ revision, _ = ref_output.decode().split(maxsplit=1)
+ log("Fetching revision {}".format(revision))
+ return _git_checkout_github_archive(dest_path, repo, commit, prefix)
+
+ with tempfile.TemporaryDirectory() as td:
+ temp_dir = pathlib.Path(td)
+
+ git_dir = temp_dir / prefix
+
+ # This could be faster with a shallow clone. However, Git requires a ref
+ # to initiate a clone. Since the commit-ish may not refer to a ref, we
+ # simply perform a full clone followed by a checkout.
+ print("cloning %s to %s" % (repo, git_dir))
+
+ env = os.environ.copy()
+ keypath = ""
+ if ssh_key:
+ taskcluster_secret_url = api(
+ os.environ.get("TASKCLUSTER_PROXY_URL"),
+ "secrets",
+ "v1",
+ "secret/{keypath}".format(keypath=ssh_key),
+ )
+ taskcluster_secret = b"".join(stream_download(taskcluster_secret_url))
+ taskcluster_secret = json.loads(taskcluster_secret)
+ sshkey = taskcluster_secret["secret"]["ssh_privkey"]
+
+ keypath = temp_dir.joinpath("ssh-key")
+ keypath.write_text(sshkey)
+ keypath.chmod(0o600)
+
+ env = {
+ "GIT_SSH_COMMAND": "ssh -o 'StrictHostKeyChecking no' -i {keypath}".format(
+ keypath=keypath
+ )
+ }
+
+ subprocess.run(["git", "clone", "-n", repo, str(git_dir)], check=True, env=env)
+
+ # Always use a detached head so that git prints out what it checked out.
+ subprocess.run(
+ ["git", "checkout", "--detach", commit], cwd=str(git_dir), check=True
+ )
+
+ # When including the .git, we want --depth 1, but a direct clone would not
+ # necessarily be able to give us the right commit.
+ if include_dot_git:
+ initial_clone = git_dir.with_name(git_dir.name + ".orig")
+ git_dir.rename(initial_clone)
+ subprocess.run(
+ [
+ "git",
+ "clone",
+ "file://" + str(initial_clone),
+ str(git_dir),
+ "--depth",
+ "1",
+ ],
+ check=True,
+ )
+ subprocess.run(
+ ["git", "remote", "set-url", "origin", repo],
+ cwd=str(git_dir),
+ check=True,
+ )
+
+ # --depth 1 can induce more work on the server side, so only use it for
+ # submodule initialization when we want to keep the .git directory.
+ depth = ["--depth", "1"] if include_dot_git else []
+ subprocess.run(
+ ["git", "submodule", "update", "--init"] + depth,
+ cwd=str(git_dir),
+ check=True,
+ )
+
+ if keypath:
+ os.remove(keypath)
+
+ print("creating archive %s of commit %s" % (dest_path, commit))
+ exclude_dot_git = [] if include_dot_git else ["--exclude=.git"]
+ proc = subprocess.Popen(
+ [
+ "tar",
+ "cf",
+ "-",
+ ]
+ + exclude_dot_git
+ + [
+ "-C",
+ str(temp_dir),
+ prefix,
+ ],
+ stdout=subprocess.PIPE,
+ )
+
+ with rename_after_close(dest_path, "wb") as out:
+ ctx = ZstdCompressor()
+ ctx.copy_stream(proc.stdout, out)
+
+ proc.wait()
+
+
+def command_git_checkout_archive(args):
+ dest = pathlib.Path(args.dest)
+
+ try:
+ git_checkout_archive(
+ dest,
+ args.repo,
+ args.commit,
+ prefix=args.path_prefix,
+ ssh_key=args.ssh_key_secret,
+ include_dot_git=args.include_dot_git,
+ )
+ except Exception:
+ try:
+ dest.unlink()
+ except FileNotFoundError:
+ pass
+
+ raise
+
+
+def command_static_url(args):
+ gpg_sig_url = args.gpg_sig_url
+ gpg_env_key = args.gpg_key_env
+
+ if bool(gpg_sig_url) != bool(gpg_env_key):
+ print("--gpg-sig-url and --gpg-key-env must both be defined")
+ return 1
+
+ if gpg_sig_url:
+ gpg_signature = b"".join(stream_download(gpg_sig_url))
+ gpg_key = os.environb[gpg_env_key.encode("ascii")]
+
+ dest = pathlib.Path(args.dest)
+ dest.parent.mkdir(parents=True, exist_ok=True)
+
+ basename = urllib.parse.urlparse(args.url).path.split("/")[-1]
+ if basename.endswith("".join(dest.suffixes)):
+ dl_dest = dest
+ else:
+ dl_dest = dest.parent / basename
+
+ try:
+ download_to_path(args.url, dl_dest, sha256=args.sha256, size=args.size)
+
+ if gpg_sig_url:
+ gpg_verify_path(dl_dest, gpg_key, gpg_signature)
+
+ if dl_dest != dest or args.strip_components or args.add_prefix:
+ repack_archive(dl_dest, dest, args.strip_components, args.add_prefix)
+ except Exception:
+ try:
+ dl_dest.unlink()
+ except FileNotFoundError:
+ pass
+
+ raise
+
+ if dl_dest != dest:
+ log("Removing %s" % dl_dest)
+ dl_dest.unlink()
+
+
+def api(root_url, service, version, path):
+ # taskcluster-lib-urls is not available when this script runs, so
+ # simulate its behavior:
+ return "{root_url}/api/{service}/{version}/{path}".format(
+ root_url=root_url, service=service, version=version, path=path
+ )
+
+
+def get_hash(fetch, root_url):
+ path = "task/{task}/artifacts/{artifact}".format(
+ task=fetch["task"], artifact="public/chain-of-trust.json"
+ )
+ url = api(root_url, "queue", "v1", path)
+ cot = json.loads(download_to_memory(url))
+ return cot["artifacts"][fetch["artifact"]]["sha256"]
+
+
+def command_task_artifacts(args):
+ start = time.monotonic()
+ fetches = json.loads(os.environ["MOZ_FETCHES"])
+ downloads = []
+ for fetch in fetches:
+ extdir = pathlib.Path(args.dest)
+ if "dest" in fetch:
+ # Note: normpath doesn't like pathlib.Path in python 3.5
+ extdir = pathlib.Path(os.path.normpath(str(extdir.joinpath(fetch["dest"]))))
+ extdir.mkdir(parents=True, exist_ok=True)
+ root_url = os.environ["TASKCLUSTER_ROOT_URL"]
+ sha256 = None
+ if fetch.get("verify-hash"):
+ sha256 = get_hash(fetch, root_url)
+ if fetch["artifact"].startswith("public/"):
+ path = "task/{task}/artifacts/{artifact}".format(
+ task=fetch["task"], artifact=fetch["artifact"]
+ )
+ url = api(root_url, "queue", "v1", path)
+ else:
+ url = ("{proxy_url}/api/queue/v1/task/{task}/artifacts/{artifact}").format(
+ proxy_url=os.environ["TASKCLUSTER_PROXY_URL"],
+ task=fetch["task"],
+ artifact=fetch["artifact"],
+ )
+ downloads.append((url, extdir, fetch["extract"], sha256))
+
+ fetch_urls(downloads)
+ end = time.monotonic()
+
+ perfherder_data = {
+ "framework": {"name": "build_metrics"},
+ "suites": [
+ {
+ "name": "fetch_content",
+ "value": end - start,
+ "lowerIsBetter": True,
+ "shouldAlert": False,
+ "subtests": [],
+ }
+ ],
+ }
+ print("PERFHERDER_DATA: {}".format(json.dumps(perfherder_data)), file=sys.stderr)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ subparsers = parser.add_subparsers(title="sub commands")
+
+ git_checkout = subparsers.add_parser(
+ "git-checkout-archive",
+ help="Obtain an archive of files from a Git repository checkout",
+ )
+ git_checkout.set_defaults(func=command_git_checkout_archive)
+ git_checkout.add_argument(
+ "--path-prefix", help="Prefix for paths in produced archive"
+ )
+ git_checkout.add_argument("repo", help="URL to Git repository to be cloned")
+ git_checkout.add_argument("commit", help="Git commit to check out")
+ git_checkout.add_argument("dest", help="Destination path of archive")
+ git_checkout.add_argument(
+ "--ssh-key-secret", help="The scope path of the ssh key to used for checkout"
+ )
+ git_checkout.add_argument(
+ "--include-dot-git", action="store_true", help="Include the .git directory"
+ )
+
+ url = subparsers.add_parser("static-url", help="Download a static URL")
+ url.set_defaults(func=command_static_url)
+ url.add_argument("--sha256", required=True, help="SHA-256 of downloaded content")
+ url.add_argument(
+ "--size", required=True, type=int, help="Size of downloaded content, in bytes"
+ )
+ url.add_argument(
+ "--gpg-sig-url",
+ help="URL containing signed GPG document validating " "URL to fetch",
+ )
+ url.add_argument(
+ "--gpg-key-env", help="Environment variable containing GPG key to validate"
+ )
+ url.add_argument(
+ "--strip-components",
+ type=int,
+ default=0,
+ help="Number of leading components to strip from file "
+ "names in the downloaded archive",
+ )
+ url.add_argument(
+ "--add-prefix",
+ default="",
+ help="Prefix to add to file names in the downloaded " "archive",
+ )
+ url.add_argument("url", help="URL to fetch")
+ url.add_argument("dest", help="Destination path")
+
+ artifacts = subparsers.add_parser("task-artifacts", help="Fetch task artifacts")
+ artifacts.set_defaults(func=command_task_artifacts)
+ artifacts.add_argument(
+ "-d",
+ "--dest",
+ default=os.environ.get("MOZ_FETCHES_DIR"),
+ help="Destination directory which will contain all "
+ "artifacts (defaults to $MOZ_FETCHES_DIR)",
+ )
+
+ args = parser.parse_args()
+
+ if not args.dest:
+ parser.error(
+ "no destination directory specified, either pass in --dest "
+ "or set $MOZ_FETCHES_DIR"
+ )
+
+ return args.func(args)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/taskcluster/scripts/misc/get_vs.py b/taskcluster/scripts/misc/get_vs.py
new file mode 100755
index 0000000000..d630abedb8
--- /dev/null
+++ b/taskcluster/scripts/misc/get_vs.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import os
+import shutil
+import ssl
+from pathlib import Path
+from tempfile import TemporaryDirectory
+from urllib import request
+
+import certifi
+import yaml
+from buildconfig import topsrcdir
+from vsdownload import downloadPackages, extractPackages
+
+# Hack to hook certifi
+_urlopen = request.urlopen
+
+
+def urlopen(url, data=None):
+ return _urlopen(
+ url, data, context=ssl.create_default_context(cafile=certifi.where())
+ )
+
+
+request.urlopen = urlopen
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Download and build a Visual Studio artifact"
+ )
+ parser.add_argument("manifest", help="YAML manifest of the contents to download")
+ parser.add_argument("outdir", help="Output directory")
+ args = parser.parse_args()
+
+ out_dir = Path(args.outdir)
+ with open(Path(topsrcdir) / args.manifest) as f:
+ selected = yaml.safe_load(f.read())
+ with TemporaryDirectory(prefix="get_vs", dir=".") as tmpdir:
+ tmpdir = Path(tmpdir)
+ dl_cache = tmpdir / "cache"
+ downloadPackages(selected, dl_cache)
+ unpacked = tmpdir / "unpack"
+ extractPackages(selected, dl_cache, unpacked)
+ vfs = {}
+ # Fill the output directory with all the paths in lowercase form for
+ # cross-compiles.
+ for subpath in ("VC", "Windows Kits/10", "DIA SDK"):
+ dest = subpath
+ # When running on Windows, SDK files are extracted under Windows Kits,
+ # but on other platforms, they end up in Program Files/Windows Kits.
+ program_files_subpath = unpacked / "Program Files" / subpath
+ if program_files_subpath.exists():
+ subpath = program_files_subpath
+ else:
+ subpath = unpacked / subpath
+ dest = Path(dest)
+ for root, dirs, files in os.walk(subpath):
+ relpath = Path(root).relative_to(subpath)
+ for f in files:
+ path = Path(root) / f
+ mode = os.stat(path).st_mode
+ with open(path, "rb") as fh:
+ lower_f = f.lower()
+ # Ideally, we'd use the overlay for .libs too but as of
+ # writing it's still impractical to use, so lowercase
+ # them for now, that'll be enough.
+ if lower_f.endswith(".lib"):
+ f = lower_f
+ name = str(dest / relpath / f)
+ # Set executable flag on .exe files, the Firefox build
+ # system wants it.
+ if lower_f.endswith(".exe"):
+ mode |= (mode & 0o444) >> 2
+ print("Adding", name)
+ out_file = out_dir / name
+ out_file.parent.mkdir(parents=True, exist_ok=True)
+ with out_file.open("wb") as out_fh:
+ shutil.copyfileobj(fh, out_fh)
+ os.chmod(out_file, mode)
+ if lower_f.endswith((".h", ".idl")):
+ vfs.setdefault(str(dest / relpath), []).append(f)
+ # Create an overlay file for use with clang's -ivfsoverlay flag.
+ overlay = {
+ "version": 0,
+ "case-sensitive": False,
+ "root-relative": "overlay-dir",
+ "overlay-relative": True,
+ "roots": [
+ {
+ "name": p,
+ "type": "directory",
+ "contents": [
+ {
+ "name": f,
+ "type": "file",
+ "external-contents": f"{p}/{f}",
+ }
+ for f in files
+ ],
+ }
+ for p, files in vfs.items()
+ ],
+ }
+ overlay_yaml = out_dir / "overlay.yaml"
+ with overlay_yaml.open("w") as fh:
+ fh.write(yaml.dump(overlay))
diff --git a/taskcluster/scripts/misc/mingw-composition.patch b/taskcluster/scripts/misc/mingw-composition.patch
new file mode 100644
index 0000000000..40edf921d8
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-composition.patch
@@ -0,0 +1,50 @@
+diff --git a/mingw-w64-headers/include/windows.ui.composition.h b/mingw-w64-headers/include/windows.ui.composition.h
+index 9dac0f1..58872d5 100644
+--- a/mingw-w64-headers/include/windows.ui.composition.h
++++ b/mingw-w64-headers/include/windows.ui.composition.h
+@@ -4916,13 +4916,13 @@ namespace ABI {
+ ICompositionDrawingSurface : public IInspectable
+ {
+ virtual HRESULT STDMETHODCALLTYPE get_AlphaMode(
+- enum DirectXAlphaMode *value) = 0;
++ ABI::Windows::Graphics::DirectX::DirectXAlphaMode *value) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE get_PixelFormat(
+- enum DirectXPixelFormat *value) = 0;
++ ABI::Windows::Graphics::DirectX::DirectXPixelFormat *value) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE get_Size(
+- struct Size *value) = 0;
++ ABI::Windows::Foundation::Size *value) = 0;
+
+ };
+ }
+@@ -5704,8 +5704,8 @@ namespace ABI {
+ {
+ virtual HRESULT STDMETHODCALLTYPE CreateDrawingSurface(
+ struct Size pixels,
+- enum DirectXPixelFormat format,
+- enum DirectXAlphaMode mode,
++ ABI::Windows::Graphics::DirectX::DirectXPixelFormat format,
++ ABI::Windows::Graphics::DirectX::DirectXAlphaMode mode,
+ ABI::Windows::UI::Composition::ICompositionDrawingSurface **result) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE add_RenderingDeviceReplaced(
+@@ -9338,7 +9338,7 @@ namespace ABI {
+ boolean value) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE get_Offset(
+- struct Vector3 *value) = 0;
++ ABI::Windows::Foundation::Numerics::Vector3 *value) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE put_Offset(
+ struct Vector3 value) = 0;
+@@ -9383,7 +9383,7 @@ namespace ABI {
+ struct Vector3 value) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE get_Size(
+- struct Vector2 *value) = 0;
++ ABI::Windows::Foundation::Numerics::Vector2 *value) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE put_Size(
+ struct Vector2 value) = 0;
diff --git a/taskcluster/scripts/misc/mingw-dispatchqueue.patch b/taskcluster/scripts/misc/mingw-dispatchqueue.patch
new file mode 100644
index 0000000000..70fd9be819
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-dispatchqueue.patch
@@ -0,0 +1,157 @@
+From 6e031273d1763ef1fd7acc11a6ed6c2a819c91ba Mon Sep 17 00:00:00 2001
+From: Tom Ritter <tom@ritter.vg>
+Date: Thu, 2 Feb 2023 15:51:46 -0500
+Subject: [PATCH 6/6] Add back IDispatcherQueueController
+
+---
+ mingw-w64-headers/include/windows.system.h | 127 +++++++++++++++++++++
+ 1 file changed, 127 insertions(+)
+
+diff --git a/mingw-w64-headers/include/windows.system.h b/mingw-w64-headers/include/windows.system.h
+index 688361148..1bb159a31 100644
+--- a/mingw-w64-headers/include/windows.system.h
++++ b/mingw-w64-headers/include/windows.system.h
+@@ -41,6 +41,22 @@ namespace ABI {
+ #endif /* __cplusplus */
+ #endif
+
++#ifndef ____x_ABI_CWindows_CSystem_CIDispatcherQueueController_FWD_DEFINED__
++#define ____x_ABI_CWindows_CSystem_CIDispatcherQueueController_FWD_DEFINED__
++typedef interface __x_ABI_CWindows_CSystem_CIDispatcherQueueController __x_ABI_CWindows_CSystem_CIDispatcherQueueController;
++#ifdef __cplusplus
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController ABI::Windows::System::IDispatcherQueueController
++namespace ABI {
++ namespace Windows {
++ namespace System {
++ interface IDispatcherQueueController;
++ }
++ }
++}
++#endif /* __cplusplus */
++#endif
++
++
+ #ifndef ____x_ABI_CWindows_CSystem_CUser_FWD_DEFINED__
+ #define ____x_ABI_CWindows_CSystem_CUser_FWD_DEFINED__
+ #ifdef __cplusplus
+@@ -269,6 +285,117 @@ static __WIDL_INLINE HRESULT __x_ABI_CWindows_CSystem_CIUserChangedEventArgs_get
+ #endif /* ____x_ABI_CWindows_CSystem_CIUserChangedEventArgs_INTERFACE_DEFINED__ */
+ #endif /* WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION >= 0x10000 */
+
++/*****************************************************************************
++ * IDispatcherQueueController interface
++ */
++#if WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION >= 0x50000
++#ifndef ____x_ABI_CWindows_CSystem_CIDispatcherQueueController_INTERFACE_DEFINED__
++#define ____x_ABI_CWindows_CSystem_CIDispatcherQueueController_INTERFACE_DEFINED__
++
++DEFINE_GUID(IID___x_ABI_CWindows_CSystem_CIDispatcherQueueController, 0x22f34e66, 0x50db, 0x4e36, 0xa9,0x8d, 0x61,0xc0,0x1b,0x38,0x4d,0x20);
++#if defined(__cplusplus) && !defined(CINTERFACE)
++} /* extern "C" */
++namespace ABI {
++ namespace Windows {
++ namespace System {
++ MIDL_INTERFACE("22f34e66-50db-4e36-a98d-61c01b384d20")
++ IDispatcherQueueController : public IInspectable
++ {
++ };
++ }
++ }
++}
++extern "C" {
++#ifdef __CRT_UUID_DECL
++__CRT_UUID_DECL(__x_ABI_CWindows_CSystem_CIDispatcherQueueController, 0x22f34e66, 0x50db, 0x4e36, 0xa9,0x8d, 0x61,0xc0,0x1b,0x38,0x4d,0x20)
++#endif
++#else
++typedef struct __x_ABI_CWindows_CSystem_CIDispatcherQueueControllerVtbl {
++ BEGIN_INTERFACE
++
++ /*** IUnknown methods ***/
++ HRESULT (STDMETHODCALLTYPE *QueryInterface)(
++ __x_ABI_CWindows_CSystem_CIDispatcherQueueController *This,
++ REFIID riid,
++ void **ppvObject);
++
++ ULONG (STDMETHODCALLTYPE *AddRef)(
++ __x_ABI_CWindows_CSystem_CIDispatcherQueueController *This);
++
++ ULONG (STDMETHODCALLTYPE *Release)(
++ __x_ABI_CWindows_CSystem_CIDispatcherQueueController *This);
++
++ /*** IInspectable methods ***/
++ HRESULT (STDMETHODCALLTYPE *GetIids)(
++ __x_ABI_CWindows_CSystem_CIDispatcherQueueController *This,
++ ULONG *iidCount,
++ IID **iids);
++
++ HRESULT (STDMETHODCALLTYPE *GetRuntimeClassName)(
++ __x_ABI_CWindows_CSystem_CIDispatcherQueueController *This,
++ HSTRING *className);
++
++ HRESULT (STDMETHODCALLTYPE *GetTrustLevel)(
++ __x_ABI_CWindows_CSystem_CIDispatcherQueueController *This,
++ TrustLevel *trustLevel);
++
++ END_INTERFACE
++} __x_ABI_CWindows_CSystem_CIDispatcherQueueControllerVtbl;
++
++interface __x_ABI_CWindows_CSystem_CIDispatcherQueueController {
++ CONST_VTBL __x_ABI_CWindows_CSystem_CIDispatcherQueueControllerVtbl* lpVtbl;
++};
++
++#ifdef COBJMACROS
++#ifndef WIDL_C_INLINE_WRAPPERS
++/*** IUnknown methods ***/
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject)
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController_AddRef(This) (This)->lpVtbl->AddRef(This)
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController_Release(This) (This)->lpVtbl->Release(This)
++/*** IInspectable methods ***/
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetIids(This,iidCount,iids) (This)->lpVtbl->GetIids(This,iidCount,iids)
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetRuntimeClassName(This,className) (This)->lpVtbl->GetRuntimeClassName(This,className)
++#define __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetTrustLevel(This,trustLevel) (This)->lpVtbl->GetTrustLevel(This,trustLevel)
++#else
++/*** IUnknown methods ***/
++static FORCEINLINE HRESULT __x_ABI_CWindows_CSystem_CIDispatcherQueueController_QueryInterface(__x_ABI_CWindows_CSystem_CIDispatcherQueueController* This,REFIID riid,void **ppvObject) {
++ return This->lpVtbl->QueryInterface(This,riid,ppvObject);
++}
++static FORCEINLINE ULONG __x_ABI_CWindows_CSystem_CIDispatcherQueueController_AddRef(__x_ABI_CWindows_CSystem_CIDispatcherQueueController* This) {
++ return This->lpVtbl->AddRef(This);
++}
++static FORCEINLINE ULONG __x_ABI_CWindows_CSystem_CIDispatcherQueueController_Release(__x_ABI_CWindows_CSystem_CIDispatcherQueueController* This) {
++ return This->lpVtbl->Release(This);
++}
++/*** IInspectable methods ***/
++static FORCEINLINE HRESULT __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetIids(__x_ABI_CWindows_CSystem_CIDispatcherQueueController* This,ULONG *iidCount,IID **iids) {
++ return This->lpVtbl->GetIids(This,iidCount,iids);
++}
++static FORCEINLINE HRESULT __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetRuntimeClassName(__x_ABI_CWindows_CSystem_CIDispatcherQueueController* This,HSTRING *className) {
++ return This->lpVtbl->GetRuntimeClassName(This,className);
++}
++static FORCEINLINE HRESULT __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetTrustLevel(__x_ABI_CWindows_CSystem_CIDispatcherQueueController* This,TrustLevel *trustLevel) {
++ return This->lpVtbl->GetTrustLevel(This,trustLevel);
++}
++#endif
++#ifdef WIDL_using_Windows_System
++#define IID_IDispatcherQueueController IID___x_ABI_CWindows_CSystem_CIDispatcherQueueController
++#define IDispatcherQueueControllerVtbl __x_ABI_CWindows_CSystem_CIDispatcherQueueControllerVtbl
++#define IDispatcherQueueController __x_ABI_CWindows_CSystem_CIDispatcherQueueController
++#define IDispatcherQueueController_QueryInterface __x_ABI_CWindows_CSystem_CIDispatcherQueueController_QueryInterface
++#define IDispatcherQueueController_AddRef __x_ABI_CWindows_CSystem_CIDispatcherQueueController_AddRef
++#define IDispatcherQueueController_Release __x_ABI_CWindows_CSystem_CIDispatcherQueueController_Release
++#define IDispatcherQueueController_GetIids __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetIids
++#define IDispatcherQueueController_GetRuntimeClassName __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetRuntimeClassName
++#define IDispatcherQueueController_GetTrustLevel __x_ABI_CWindows_CSystem_CIDispatcherQueueController_GetTrustLevel
++#endif /* WIDL_using_Windows_System */
++#endif
++
++#endif
++
++#endif /* ____x_ABI_CWindows_CSystem_CIDispatcherQueueController_INTERFACE_DEFINED__ */
++#endif /* WINDOWS_FOUNDATION_UNIVERSALAPICONTRACT_VERSION >= 0x50000 */
++
+ /*
+ * Class Windows.System.User
+ */
+--
+2.25.1
+
diff --git a/taskcluster/scripts/misc/mingw-dwrite_3.patch b/taskcluster/scripts/misc/mingw-dwrite_3.patch
new file mode 100644
index 0000000000..25c7b89eea
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-dwrite_3.patch
@@ -0,0 +1,87 @@
+From a9804765e442063be37338933b9c40e3e3d01aac Mon Sep 17 00:00:00 2001
+From: Sanketh Menda <me@snkth.com>
+Date: Thu, 2 Feb 2023 12:29:03 -0500
+Subject: [PATCH 4/7] dwrite_3.h: rename GetGlyphImageFormats_ to
+ GetGlyphImageFormats
+
+Wine's WIDL currently doesn't support overloading functions, so till
+that is fixed patch dwrite_3.h to rename GlyphImageFormats_ to
+GetGlyphImageFormats.
+---
+ mingw-w64-headers/include/dwrite_3.h | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/mingw-w64-headers/include/dwrite_3.h b/mingw-w64-headers/include/dwrite_3.h
+index 205c47f04..352731bf1 100644
+--- a/mingw-w64-headers/include/dwrite_3.h
++++ b/mingw-w64-headers/include/dwrite_3.h
+@@ -8181,7 +8181,7 @@ DEFINE_GUID(IID_IDWriteFontFace4, 0x27f2a904, 0x4eb8, 0x441d, 0x96,0x78, 0x05,0x
+ MIDL_INTERFACE("27f2a904-4eb8-441d-9678-0563f53e3e2f")
+ IDWriteFontFace4 : public IDWriteFontFace3
+ {
+- virtual HRESULT STDMETHODCALLTYPE GetGlyphImageFormats_(
++ virtual HRESULT STDMETHODCALLTYPE GetGlyphImageFormats(
+ UINT16 glyph,
+ UINT32 ppem_first,
+ UINT32 ppem_last,
+@@ -8481,7 +8481,7 @@ typedef struct IDWriteFontFace4Vtbl {
+ WINBOOL *are_local);
+
+ /*** IDWriteFontFace4 methods ***/
+- HRESULT (STDMETHODCALLTYPE *GetGlyphImageFormats_)(
++ HRESULT (STDMETHODCALLTYPE *GetGlyphImageFormats)(
+ IDWriteFontFace4 *This,
+ UINT16 glyph,
+ UINT32 ppem_first,
+@@ -8562,7 +8562,7 @@ interface IDWriteFontFace4 {
+ #define IDWriteFontFace4_AreCharactersLocal(This,characters,count,enqueue_if_not,are_local) (This)->lpVtbl->AreCharactersLocal(This,characters,count,enqueue_if_not,are_local)
+ #define IDWriteFontFace4_AreGlyphsLocal(This,glyphs,count,enqueue_if_not,are_local) (This)->lpVtbl->AreGlyphsLocal(This,glyphs,count,enqueue_if_not,are_local)
+ /*** IDWriteFontFace4 methods ***/
+-#define IDWriteFontFace4_GetGlyphImageFormats_(This,glyph,ppem_first,ppem_last,formats) (This)->lpVtbl->GetGlyphImageFormats_(This,glyph,ppem_first,ppem_last,formats)
++#define IDWriteFontFace4_GetGlyphImageFormats(This,glyph,ppem_first,ppem_last,formats) (This)->lpVtbl->GetGlyphImageFormats(This,glyph,ppem_first,ppem_last,formats)
+ #define IDWriteFontFace4_GetGlyphImageFormats(This) (This)->lpVtbl->GetGlyphImageFormats(This)
+ #define IDWriteFontFace4_GetGlyphImageData(This,glyph,ppem,format,data,context) (This)->lpVtbl->GetGlyphImageData(This,glyph,ppem,format,data,context)
+ #define IDWriteFontFace4_ReleaseGlyphImageData(This,context) (This)->lpVtbl->ReleaseGlyphImageData(This,context)
+@@ -8705,8 +8705,8 @@ static __WIDL_INLINE HRESULT IDWriteFontFace4_AreGlyphsLocal(IDWriteFontFace4* T
+ return This->lpVtbl->AreGlyphsLocal(This,glyphs,count,enqueue_if_not,are_local);
+ }
+ /*** IDWriteFontFace4 methods ***/
+-static __WIDL_INLINE HRESULT IDWriteFontFace4_GetGlyphImageFormats_(IDWriteFontFace4* This,UINT16 glyph,UINT32 ppem_first,UINT32 ppem_last,DWRITE_GLYPH_IMAGE_FORMATS *formats) {
+- return This->lpVtbl->GetGlyphImageFormats_(This,glyph,ppem_first,ppem_last,formats);
++static __WIDL_INLINE HRESULT IDWriteFontFace4_GetGlyphImageFormats(IDWriteFontFace4* This,UINT16 glyph,UINT32 ppem_first,UINT32 ppem_last,DWRITE_GLYPH_IMAGE_FORMATS *formats) {
++ return This->lpVtbl->GetGlyphImageFormats(This,glyph,ppem_first,ppem_last,formats);
+ }
+ static __WIDL_INLINE DWRITE_GLYPH_IMAGE_FORMATS IDWriteFontFace4_GetGlyphImageFormats(IDWriteFontFace4* This) {
+ return This->lpVtbl->GetGlyphImageFormats(This);
+@@ -9033,7 +9033,7 @@ typedef struct IDWriteFontFace5Vtbl {
+ WINBOOL *are_local);
+
+ /*** IDWriteFontFace4 methods ***/
+- HRESULT (STDMETHODCALLTYPE *GetGlyphImageFormats_)(
++ HRESULT (STDMETHODCALLTYPE *GetGlyphImageFormats)(
+ IDWriteFontFace5 *This,
+ UINT16 glyph,
+ UINT32 ppem_first,
+@@ -9134,7 +9134,7 @@ interface IDWriteFontFace5 {
+ #define IDWriteFontFace5_AreCharactersLocal(This,characters,count,enqueue_if_not,are_local) (This)->lpVtbl->AreCharactersLocal(This,characters,count,enqueue_if_not,are_local)
+ #define IDWriteFontFace5_AreGlyphsLocal(This,glyphs,count,enqueue_if_not,are_local) (This)->lpVtbl->AreGlyphsLocal(This,glyphs,count,enqueue_if_not,are_local)
+ /*** IDWriteFontFace4 methods ***/
+-#define IDWriteFontFace5_GetGlyphImageFormats_(This,glyph,ppem_first,ppem_last,formats) (This)->lpVtbl->GetGlyphImageFormats_(This,glyph,ppem_first,ppem_last,formats)
++#define IDWriteFontFace5_GetGlyphImageFormats(This,glyph,ppem_first,ppem_last,formats) (This)->lpVtbl->GetGlyphImageFormats(This,glyph,ppem_first,ppem_last,formats)
+ #define IDWriteFontFace5_GetGlyphImageFormats(This) (This)->lpVtbl->GetGlyphImageFormats(This)
+ #define IDWriteFontFace5_GetGlyphImageData(This,glyph,ppem,format,data,context) (This)->lpVtbl->GetGlyphImageData(This,glyph,ppem,format,data,context)
+ #define IDWriteFontFace5_ReleaseGlyphImageData(This,context) (This)->lpVtbl->ReleaseGlyphImageData(This,context)
+@@ -9283,8 +9283,8 @@ static __WIDL_INLINE HRESULT IDWriteFontFace5_AreGlyphsLocal(IDWriteFontFace5* T
+ return This->lpVtbl->AreGlyphsLocal(This,glyphs,count,enqueue_if_not,are_local);
+ }
+ /*** IDWriteFontFace4 methods ***/
+-static __WIDL_INLINE HRESULT IDWriteFontFace5_GetGlyphImageFormats_(IDWriteFontFace5* This,UINT16 glyph,UINT32 ppem_first,UINT32 ppem_last,DWRITE_GLYPH_IMAGE_FORMATS *formats) {
+- return This->lpVtbl->GetGlyphImageFormats_(This,glyph,ppem_first,ppem_last,formats);
++static __WIDL_INLINE HRESULT IDWriteFontFace5_GetGlyphImageFormats(IDWriteFontFace5* This,UINT16 glyph,UINT32 ppem_first,UINT32 ppem_last,DWRITE_GLYPH_IMAGE_FORMATS *formats) {
++ return This->lpVtbl->GetGlyphImageFormats(This,glyph,ppem_first,ppem_last,formats);
+ }
+ static __WIDL_INLINE DWRITE_GLYPH_IMAGE_FORMATS IDWriteFontFace5_GetGlyphImageFormats(IDWriteFontFace5* This) {
+ return This->lpVtbl->GetGlyphImageFormats(This);
+--
+2.25.1
+
diff --git a/taskcluster/scripts/misc/mingw-enum.patch b/taskcluster/scripts/misc/mingw-enum.patch
new file mode 100644
index 0000000000..de12434ee3
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-enum.patch
@@ -0,0 +1,25 @@
+From b415d3e199de9cb2dce6290721bcfc2871f33769 Mon Sep 17 00:00:00 2001
+From: Tom Ritter <tom@ritter.vg>
+Date: Thu, 2 Feb 2023 12:26:47 -0500
+Subject: [PATCH 3/7] Fix enum int issues
+
+---
+ mingw-w64-headers/include/windows.foundation.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mingw-w64-headers/include/windows.foundation.h b/mingw-w64-headers/include/windows.foundation.h
+index fd66e27d3..7981f3380 100644
+--- a/mingw-w64-headers/include/windows.foundation.h
++++ b/mingw-w64-headers/include/windows.foundation.h
+@@ -647,7 +647,7 @@ static __WIDL_INLINE HRESULT __x_ABI_CWindows_CFoundation_CIAsyncActionCompleted
+ namespace ABI {
+ namespace Windows {
+ namespace Foundation {
+- enum PropertyType {
++ enum PropertyType : int {
+ PropertyType_Empty = 0,
+ PropertyType_UInt8 = 1,
+ PropertyType_Int16 = 2,
+--
+2.25.1
+
diff --git a/taskcluster/scripts/misc/mingw-ts_sd.patch b/taskcluster/scripts/misc/mingw-ts_sd.patch
new file mode 100644
index 0000000000..c76f5f1dab
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-ts_sd.patch
@@ -0,0 +1,33 @@
+From 8e23d493352ada53b3a766f14e2e93484353c15c Mon Sep 17 00:00:00 2001
+From: Tom Ritter <tom@ritter.vg>
+Date: Wed, 8 Feb 2023 10:52:16 -0500
+Subject: [PATCH 11/11] TS_SD_ defines
+
+---
+ mingw-w64-headers/include/textstor.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/mingw-w64-headers/include/textstor.h b/mingw-w64-headers/include/textstor.h
+index 0681cab92..64b5ebec5 100644
+--- a/mingw-w64-headers/include/textstor.h
++++ b/mingw-w64-headers/include/textstor.h
+@@ -68,6 +68,16 @@ extern "C" {
+
+ #define TS_SD_LOADING (0x2)
+
++#define TS_SD_RESERVED (0x4)
++
++#define TS_SD_TKBAUTOCORRECTENABLE (0x8)
++
++#define TS_SD_TKBPREDICTIONENABLE (0x10)
++
++#define TS_SD_UIINTEGRATIONENABLE (0x20)
++
++#define TS_SD_INPUTPANEMANUALDISPLAYENABLE (0x40)
++
+ #define TS_SS_DISJOINTSEL (0x1)
+
+ #define TS_SS_REGIONS (0x2)
+--
+2.25.1
+
diff --git a/taskcluster/scripts/misc/mingw-unknown.patch b/taskcluster/scripts/misc/mingw-unknown.patch
new file mode 100644
index 0000000000..2e29e60106
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-unknown.patch
@@ -0,0 +1,46 @@
+From 753c3ad7018936ef9a9d2af8b75efbfa14c149b7 Mon Sep 17 00:00:00 2001
+From: Tom Ritter <tom@ritter.vg>
+Date: Thu, 2 Feb 2023 12:26:22 -0500
+Subject: [PATCH 2/7] Add back the IUnknown_QI functions
+
+---
+ mingw-w64-headers/include/unknwn.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/mingw-w64-headers/include/unknwn.h b/mingw-w64-headers/include/unknwn.h
+index f3ada04a2..f33e8f270 100644
+--- a/mingw-w64-headers/include/unknwn.h
++++ b/mingw-w64-headers/include/unknwn.h
+@@ -169,6 +169,29 @@ static __WIDL_INLINE ULONG IUnknown_Release(IUnknown* This) {
+
+ #endif
+
++HRESULT STDMETHODCALLTYPE IUnknown_QueryInterface_Proxy(
++ IUnknown* This,
++ REFIID riid,
++ void **ppvObject);
++void __RPC_STUB IUnknown_QueryInterface_Stub(
++ IRpcStubBuffer* This,
++ IRpcChannelBuffer* pRpcChannelBuffer,
++ PRPC_MESSAGE pRpcMessage,
++ DWORD* pdwStubPhase);
++ULONG STDMETHODCALLTYPE IUnknown_AddRef_Proxy(
++ IUnknown* This);
++void __RPC_STUB IUnknown_AddRef_Stub(
++ IRpcStubBuffer* This,
++ IRpcChannelBuffer* pRpcChannelBuffer,
++ PRPC_MESSAGE pRpcMessage,
++ DWORD* pdwStubPhase);
++ULONG STDMETHODCALLTYPE IUnknown_Release_Proxy(
++ IUnknown* This);
++void __RPC_STUB IUnknown_Release_Stub(
++ IRpcStubBuffer* This,
++ IRpcChannelBuffer* pRpcChannelBuffer,
++ PRPC_MESSAGE pRpcMessage,
++ DWORD* pdwStubPhase);
+
+ #endif /* __IUnknown_INTERFACE_DEFINED__ */
+
+--
+2.25.1
+
diff --git a/taskcluster/scripts/misc/mingw-widl.patch b/taskcluster/scripts/misc/mingw-widl.patch
new file mode 100644
index 0000000000..225a908ad7
--- /dev/null
+++ b/taskcluster/scripts/misc/mingw-widl.patch
@@ -0,0 +1,35 @@
+From 534ecbfb4da9a27c287a9a44ea18ef44ccf2aac2 Mon Sep 17 00:00:00 2001
+From: Tom Ritter <tom@ritter.vg>
+Date: Thu, 2 Feb 2023 13:28:39 -0500
+Subject: [PATCH 5/7] Fix widl
+
+In commit c94f44f9b455 (in wine's repo) open_typelib was changed
+from returning a file descriptor (null on error) to aborting if
+an error was encountered.
+
+This is incorrect, because read_importlib in typelib.c has a
+fallback behavior where it calls open_typelib again if it
+fails the first time. And _then_ it will error if it couldn't do
+it either time.
+
+Restore the original behavior for open_typelib
+---
+ mingw-w64-tools/widl/src/widl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mingw-w64-tools/widl/src/widl.c b/mingw-w64-tools/widl/src/widl.c
+index 986aa3624..39bc8ac21 100644
+--- a/mingw-w64-tools/widl/src/widl.c
++++ b/mingw-w64-tools/widl/src/widl.c
+@@ -710,7 +710,7 @@ int open_typelib( const char *name )
+ TRYOPEN( strmake( "%s%s/%s", default_dirs[i], pe_dir, name ));
+ }
+ }
+- error( "cannot find %s\n", name );
++ return -1;
+ #undef TRYOPEN
+ }
+
+--
+2.25.1
+
diff --git a/taskcluster/scripts/misc/moz.build b/taskcluster/scripts/misc/moz.build
new file mode 100644
index 0000000000..3b5b0e1e58
--- /dev/null
+++ b/taskcluster/scripts/misc/moz.build
@@ -0,0 +1,8 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("verify-updatebot.py"):
+ BUG_COMPONENT = ("Developer Infrastructure", "Mach Vendor & Updatebot")
diff --git a/taskcluster/scripts/misc/osx-cross-linker b/taskcluster/scripts/misc/osx-cross-linker
new file mode 100755
index 0000000000..1e685db5fb
--- /dev/null
+++ b/taskcluster/scripts/misc/osx-cross-linker
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+exec $MOZ_FETCHES_DIR/clang/bin/clang -v \
+ -fuse-ld=lld \
+ -mmacosx-version-min=${MACOSX_DEPLOYMENT_TARGET:-10.12} \
+ -target $TARGET \
+ -isysroot $MOZ_FETCHES_DIR/MacOSX13.3.sdk \
+ "$@"
diff --git a/taskcluster/scripts/misc/pack-cpython.sh b/taskcluster/scripts/misc/pack-cpython.sh
new file mode 100755
index 0000000000..71a11adb07
--- /dev/null
+++ b/taskcluster/scripts/misc/pack-cpython.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for extracting python bianry for windows from setup file.
+
+ARTIFACT_NAME=win64-cpython.tar.zst
+PYTHON_INSTALLER=`echo $MOZ_FETCHES_DIR/python-3.*-amd64.exe`
+WINE=$MOZ_FETCHES_DIR/wine/bin/wine
+
+cabextract $PYTHON_INSTALLER
+
+tardir=python
+mkdir $tardir
+pushd $tardir
+msiextract ../*
+rm api-ms-win-*
+
+# bundle pip
+$WINE python.exe -m ensurepip
+$WINE python.exe -m pip install --upgrade pip==23.0
+$WINE python.exe -m pip install --only-binary ':all:' -r ${GECKO_PATH}/build/psutil_requirements.txt -r ${GECKO_PATH}/build/zstandard_requirements.txt
+
+# extra symlinks to have a consistent install with Linux and OSX
+ln -s python.exe python3.exe
+chmod u+x python3.exe
+
+ln -s ./Scripts/pip3.exe pip3.exe
+chmod u+x pip3.exe
+
+
+popd
+
+tar caf `basename ${TOOLCHAIN_ARTIFACT}` ${tardir}
+
+mkdir -p $UPLOAD_DIR
+mv `basename ${TOOLCHAIN_ARTIFACT}` $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/pack.sh b/taskcluster/scripts/misc/pack.sh
new file mode 100755
index 0000000000..f19feb5053
--- /dev/null
+++ b/taskcluster/scripts/misc/pack.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -x
+set -e
+set -o pipefail
+
+[ -z "$1" ] && echo Missing argument && exit 1
+
+dir=$(dirname "$1")
+name=$(basename "$1")
+
+case "$(uname -s)" in
+Darwin)
+ TAR_FLAGS=--no-fflags
+ ;;
+*)
+ TAR_FLAGS=
+ ;;
+esac
+
+(cd "$dir"; find "$name"/* -not -type d -print0 | tar $TAR_FLAGS -cvf - --null -T -) | python3 $GECKO_PATH/taskcluster/scripts/misc/zstdpy > "$name.tar.zst"
+
+mkdir -p "$UPLOAD_DIR"
+mv "$name.tar.zst" "$UPLOAD_DIR"
diff --git a/taskcluster/scripts/misc/private_local_toolchain.sh b/taskcluster/scripts/misc/private_local_toolchain.sh
new file mode 100755
index 0000000000..df255a8576
--- /dev/null
+++ b/taskcluster/scripts/misc/private_local_toolchain.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -x
+set -e
+set -o pipefail
+
+script=$1
+shift
+artifact=$(basename $TOOLCHAIN_ARTIFACT)
+dir=${artifact%.tar.*}
+
+$GECKO_PATH/mach python --virtualenv build $(dirname $0)/$script "$@" $dir
+
+$(dirname $0)/pack.sh $dir
diff --git a/taskcluster/scripts/misc/repack-android-avd-linux.sh b/taskcluster/scripts/misc/repack-android-avd-linux.sh
new file mode 100755
index 0000000000..f36fda905d
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-android-avd-linux.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -x -e -v
+
+# Initialize XVFB for the AVD
+. /builds/worker/scripts/xvfb.sh
+
+cleanup() {
+ local rv=$?
+ cleanup_xvfb
+ exit $rv
+}
+trap cleanup EXIT INT
+
+start_xvfb '1024x768x24' 2
+
+# This script is for fetching and repacking the Android SDK (for
+# Linux), the tools required to produce Android packages.
+
+UPLOAD_DIR=/builds/worker/artifacts/
+AVD_JSON_CONFIG="$1"
+
+mkdir -p $HOME/artifacts $UPLOAD_DIR
+
+# Populate /builds/worker/.mozbuild/android-device
+cd $GECKO_PATH
+./mach python python/mozboot/mozboot/android.py --artifact-mode --prewarm-avd --avd-manifest="$AVD_JSON_CONFIG" --no-interactive --list-packages
+
+tar cavf $UPLOAD_DIR/android-avd-linux.tar.zst -C /builds/worker/.mozbuild android-device
+
+ls -al $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/repack-android-emulator-linux.sh b/taskcluster/scripts/misc/repack-android-emulator-linux.sh
new file mode 100755
index 0000000000..be6a61640d
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-android-emulator-linux.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for fetching and repacking the Android emulator (for
+# Linux), the tools required to produce Android packages.
+
+mkdir -p $UPLOAD_DIR
+
+# Populate /builds/worker/.mozbuild/android-emulator-linux.
+cd $GECKO_PATH
+./mach python python/mozboot/mozboot/android.py --emulator-only --no-interactive --list-packages
+
+# Remove extra files we don't need
+rm -rfv /builds/worker/.mozbuild/android-sdk-linux/tools
+mkdir /builds/worker/.mozbuild/android-sdk-linux/system-images
+mkdir /builds/worker/.mozbuild/android-sdk-linux/platforms
+find /builds/worker/.mozbuild/android-sdk-linux/emulator/qemu -type f -not -name "*x86*" -print -delete
+
+tar cavf $UPLOAD_DIR/android-emulator-linux.tar.zst -C /builds/worker/.mozbuild android-sdk-linux bundletool.jar
+
+ls -al $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/repack-android-ndk-linux.sh b/taskcluster/scripts/misc/repack-android-ndk-linux.sh
new file mode 100755
index 0000000000..f48b60c3e1
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-android-ndk-linux.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for fetching and repacking the Android NDK (for
+# Linux), the tools required to produce native Android programs.
+
+mkdir -p $UPLOAD_DIR
+
+# Populate /builds/worker/.mozbuild/android-ndk-$VER.
+cd $GECKO_PATH
+./mach python python/mozboot/mozboot/android.py --ndk-only --no-interactive
+
+# Don't generate a tarball with a versioned NDK directory.
+mv $HOME/.mozbuild/android-ndk-* $HOME/.mozbuild/android-ndk
+tar cavf $UPLOAD_DIR/android-ndk.tar.zst -C /builds/worker/.mozbuild android-ndk
+
+ls -al $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/repack-android-sdk-linux.sh b/taskcluster/scripts/misc/repack-android-sdk-linux.sh
new file mode 100755
index 0000000000..ee2b068701
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-android-sdk-linux.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for fetching and repacking the Android SDK (for
+# Linux), the tools required to produce Android packages.
+
+mkdir -p $UPLOAD_DIR
+
+# Populate /builds/worker/.mozbuild/android-sdk-linux.
+cd $GECKO_PATH
+./mach python python/mozboot/mozboot/android.py --artifact-mode --no-interactive --list-packages
+
+tar cavf $UPLOAD_DIR/android-sdk-linux.tar.zst -C /builds/worker/.mozbuild android-sdk-linux bundletool.jar
+
+ls -al $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/repack-android-system-images-linux.sh b/taskcluster/scripts/misc/repack-android-system-images-linux.sh
new file mode 100755
index 0000000000..395061ba9e
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-android-system-images-linux.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for fetching and repacking the Android SDK (for
+# Linux), the tools required to produce Android packages.
+
+AVD_JSON_CONFIG="$1"
+
+mkdir -p $UPLOAD_DIR
+
+# Populate /builds/worker/.mozbuild/android-sdk-linux.
+cd $GECKO_PATH
+./mach python python/mozboot/mozboot/android.py --artifact-mode --system-images-only --avd-manifest="$AVD_JSON_CONFIG" --no-interactive --list-packages
+
+tar cavf $UPLOAD_DIR/android-system-images-linux.tar.zst -C /builds/worker/.mozbuild android-sdk-linux/system-images
+
+ls -al $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/repack-clang.sh b/taskcluster/scripts/misc/repack-clang.sh
new file mode 100755
index 0000000000..4a1b3ed01e
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-clang.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+set -x -e -v
+
+shopt -s nullglob
+
+# This script is for repacking clang for cross targets on a Linux host.
+
+cd $MOZ_FETCHES_DIR
+
+# We have a clang toolchain in $MOZ_FETCHES_DIR/clang
+# We have some compiler-rts in $MOZ_FETCHES_DIR/compiler-rt*
+# We have some libunwinds in $MOZ_FETCHES_DIR/libunwind*
+# We copy everything from the compiler-rts into clang/lib/clang/$version/
+# and everything from the libunwinds into clang/
+clang_ver_dir=$(echo clang/lib/clang/*/include)
+clang_ver_dir=${clang_ver_dir%/include}
+[ -n "$clang_ver_dir" ] && for c in compiler-rt* libunwind*; do
+ case $c in
+ compiler-rt*)
+ clang_dir=$clang_ver_dir
+ ;;
+ libunwind*)
+ clang_dir=clang
+ ;;
+ esac
+ find $c -mindepth 1 -type d | while read d; do
+ mkdir -p "$clang_dir/${d#$c/}"
+ find $d -mindepth 1 -maxdepth 1 -not -type d | while read f; do
+ target_file="$clang_dir/${f#$c/}"
+ case $d in
+ compiler-rt-*/lib/darwin)
+ if [ -f "$target_file" ]; then
+ # Unify overlapping files for darwin/
+ $MOZ_FETCHES_DIR/cctools/bin/lipo -create "$f" "$target_file" -output "$target_file.new"
+ mv "$target_file.new" "$target_file"
+ continue
+ fi
+ ;;
+ esac
+ if [ -f "$target_file" ] && ! diff -q "$f" "$target_file" 2>/dev/null; then
+ echo "Cannot copy $f because it is already in ${target_file%/*}" >&2 && exit 1
+ fi
+ cp "$f" "$target_file"
+ done
+ done
+done
+
+if [ -n "$UPLOAD_DIR" ]; then
+ tar caf clang.tar.zst clang
+ mkdir -p $UPLOAD_DIR
+ mv clang.tar.zst $UPLOAD_DIR
+fi
diff --git a/taskcluster/scripts/misc/repack-jdk-linux.sh b/taskcluster/scripts/misc/repack-jdk-linux.sh
new file mode 100755
index 0000000000..2d13e360e5
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-jdk-linux.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for fetching and repacking the OpenJDK (for
+# Linux)
+
+AVD_JSON_CONFIG="$1"
+
+mkdir -p $UPLOAD_DIR
+
+# Populate /builds/worker/.mozbuild/jdk
+cd $GECKO_PATH
+./mach python python/mozboot/mozboot/android.py --jdk-only
+
+tar cavf $UPLOAD_DIR/jdk-linux.tar.zst -C /builds/worker/.mozbuild jdk
+
+ls -al $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/repack-node.sh b/taskcluster/scripts/misc/repack-node.sh
new file mode 100755
index 0000000000..d3880e4799
--- /dev/null
+++ b/taskcluster/scripts/misc/repack-node.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for repacking Node (and NPM) from nodejs.org.
+
+mkdir -p "$UPLOAD_DIR"
+
+cd "$MOZ_FETCHES_DIR"
+
+# npx doesn't have great security characteristics (it downloads and executes
+# stuff directly out of npm at runtime), so let's not risk it getting into
+# anyone's PATH who doesn't already have it there:
+rm -f node/bin/npx node/bin/npx.exe
+tar caf "$UPLOAD_DIR"/node.tar.zst node
diff --git a/taskcluster/scripts/misc/repack_rust.py b/taskcluster/scripts/misc/repack_rust.py
new file mode 100755
index 0000000000..909ff379dc
--- /dev/null
+++ b/taskcluster/scripts/misc/repack_rust.py
@@ -0,0 +1,647 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+This script downloads and repacks official rust language builds
+with the necessary tool and target support for the Firefox
+build environment.
+"""
+
+import argparse
+import errno
+import hashlib
+import os
+import shutil
+import subprocess
+import tarfile
+import textwrap
+from contextlib import contextmanager
+
+import requests
+import toml
+import zstandard
+
+
+def log(msg):
+ print("repack: %s" % msg, flush=True)
+
+
+def fetch_file(url):
+ """Download a file from the given url if it's not already present.
+
+ Returns the SHA-2 256-bit hash of the received file."""
+ filename = os.path.basename(url)
+ sha = hashlib.sha256()
+ size = 4096
+ if os.path.exists(filename):
+ with open(filename, "rb") as fd:
+ while True:
+ block = fd.read(size)
+ if not block:
+ return sha.hexdigest()
+ sha.update(block)
+ log("Could not calculate checksum!")
+ return None
+ r = requests.get(url, stream=True)
+ r.raise_for_status()
+ with open(filename, "wb") as fd:
+ for chunk in r.iter_content(size):
+ fd.write(chunk)
+ sha.update(chunk)
+ return sha.hexdigest()
+
+
+def check_call_with_input(cmd, input_data):
+ """Invoke a command, passing the input String over stdin.
+
+ This is like subprocess.check_call, but allows piping
+ input to interactive commands."""
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
+ p.communicate(input_data)
+ if p.wait():
+ raise subprocess.CalledProcessError(p.returncode, cmd)
+
+
+def setup_gpg():
+ """Add the signing key to the current gpg config.
+
+ Import a hard-coded copy of the release signing public key
+ and mark it trusted in the gpg database so subsequent
+ signature checks can succeed or fail cleanly."""
+ keyid = "0x85AB96E6FA1BE5FE"
+ log("Importing signing key %s..." % keyid)
+ key = b"""
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFJEwMkBEADlPACa2K7reD4x5zd8afKx75QYKmxqZwywRbgeICeD4bKiQoJZ
+dUjmn1LgrGaXuBMKXJQhyA34e/1YZel/8et+HPE5XpljBfNYXWbVocE1UMUTnFU9
+CKXa4AhJ33f7we2/QmNRMUifw5adPwGMg4D8cDKXk02NdnqQlmFByv0vSaArR5kn
+gZKnLY6o0zZ9Buyy761Im/ShXqv4ATUgYiFc48z33G4j+BDmn0ryGr1aFdP58tHp
+gjWtLZs0iWeFNRDYDje6ODyu/MjOyuAWb2pYDH47Xu7XedMZzenH2TLM9yt/hyOV
+xReDPhvoGkaO8xqHioJMoPQi1gBjuBeewmFyTSPS4deASukhCFOcTsw/enzJagiS
+ZAq6Imehduke+peAL1z4PuRmzDPO2LPhVS7CDXtuKAYqUV2YakTq8MZUempVhw5n
+LqVaJ5/XiyOcv405PnkT25eIVVVghxAgyz6bOU/UMjGQYlkUxI7YZ9tdreLlFyPR
+OUL30E8q/aCd4PGJV24yJ1uit+yS8xjyUiMKm4J7oMP2XdBN98TUfLGw7SKeAxyU
+92BHlxg7yyPfI4TglsCzoSgEIV6xoGOVRRCYlGzSjUfz0bCMCclhTQRBkegKcjB3
+sMTyG3SPZbjTlCqrFHy13e6hGl37Nhs8/MvXUysq2cluEISn5bivTKEeeQARAQAB
+tERSdXN0IExhbmd1YWdlIChUYWcgYW5kIFJlbGVhc2UgU2lnbmluZyBLZXkpIDxy
+dXN0LWtleUBydXN0LWxhbmcub3JnPokCOAQTAQIAIgUCUkTAyQIbAwYLCQgHAwIG
+FQgCCQoLBBYCAwECHgECF4AACgkQhauW5vob5f5fYQ//b1DWK1NSGx5nZ3zYZeHJ
+9mwGCftIaA2IRghAGrNf4Y8DaPqR+w1OdIegWn8kCoGfPfGAVW5XXJg+Oxk6QIaD
+2hJojBUrq1DALeCZVewzTVw6BN4DGuUexsc53a8DcY2Yk5WE3ll6UKq/YPiWiPNX
+9r8FE2MJwMABB6mWZLqJeg4RCrriBiCG26NZxGE7RTtPHyppoVxWKAFDiWyNdJ+3
+UnjldWrT9xFqjqfXWw9Bhz8/EoaGeSSbMIAQDkQQpp1SWpljpgqvctZlc5fHhsG6
+lmzW5RM4NG8OKvq3UrBihvgzwrIfoEDKpXbk3DXqaSs1o81NH5ftVWWbJp/ywM9Q
+uMC6n0YWiMZMQ1cFBy7tukpMkd+VPbPkiSwBhPkfZIzUAWd74nanN5SKBtcnymgJ
++OJcxfZLiUkXRj0aUT1GLA9/7wnikhJI+RvwRfHBgrssXBKNPOfXGWajtIAmZc2t
+kR1E8zjBVLId7r5M8g52HKk+J+y5fVgJY91nxG0zf782JjtYuz9+knQd55JLFJCO
+hhbv3uRvhvkqgauHagR5X9vCMtcvqDseK7LXrRaOdOUDrK/Zg/abi5d+NIyZfEt/
+ObFsv3idAIe/zpU6xa1nYNe3+Ixlb6mlZm3WCWGxWe+GvNW/kq36jZ/v/8pYMyVO
+p/kJqnf9y4dbufuYBg+RLqC5Ag0EUkTAyQEQANxy2tTSeRspfrpBk9+ju+KZ3zc4
+umaIsEa5DxJ2zIKHywVAR67Um0K1YRG07/F5+tD9TIRkdx2pcmpjmSQzqdk3zqa9
+2Zzeijjz2RNyBY8qYmyE08IncjTsFFB8OnvdXcsAgjCFmI1BKnePxrABL/2k8X18
+aysPb0beWqQVsi5FsSpAHu6k1kaLKc+130x6Hf/YJAjeo+S7HeU5NeOz3zD+h5bA
+Q25qMiVHX3FwH7rFKZtFFog9Ogjzi0TkDKKxoeFKyADfIdteJWFjOlCI9KoIhfXq
+Et9JMnxApGqsJElJtfQjIdhMN4Lnep2WkudHAfwJ/412fe7wiW0rcBMvr/BlBGRY
+vM4sTgN058EwIuY9Qmc8RK4gbBf6GsfGNJjWozJ5XmXElmkQCAvbQFoAfi5TGfVb
+77QQrhrQlSpfIYrvfpvjYoqj618SbU6uBhzh758gLllmMB8LOhxWtq9eyn1rMWyR
+KL1fEkfvvMc78zP+Px6yDMa6UIez8jZXQ87Zou9EriLbzF4QfIYAqR9LUSMnLk6K
+o61tSFmFEDobC3tc1jkSg4zZe/wxskn96KOlmnxgMGO0vJ7ASrynoxEnQE8k3WwA
++/YJDwboIR7zDwTy3Jw3mn1FgnH+c7Rb9h9geOzxKYINBFz5Hd0MKx7kZ1U6WobW
+KiYYxcCmoEeguSPHABEBAAGJAh8EGAECAAkFAlJEwMkCGwwACgkQhauW5vob5f7f
+FA//Ra+itJF4NsEyyhx4xYDOPq4uj0VWVjLdabDvFjQtbBLwIyh2bm8uO3AY4r/r
+rM5WWQ8oIXQ2vvXpAQO9g8iNlFez6OLzbfdSG80AG74pQqVVVyCQxD7FanB/KGge
+tAoOstFxaCAg4nxFlarMctFqOOXCFkylWl504JVIOvgbbbyj6I7qCUmbmqazBSMU
+K8c/Nz+FNu2Uf/lYWOeGogRSBgS0CVBcbmPUpnDHLxZWNXDWQOCxbhA1Uf58hcyu
+036kkiWHh2OGgJqlo2WIraPXx1cGw1Ey+U6exbtrZfE5kM9pZzRG7ZY83CXpYWMp
+kyVXNWmf9JcIWWBrXvJmMi0FDvtgg3Pt1tnoxqdilk6yhieFc8LqBn6CZgFUBk0t
+NSaWk3PsN0N6Ut8VXY6sai7MJ0Gih1gE1xadWj2zfZ9sLGyt2jZ6wK++U881YeXA
+ryaGKJ8sIs182hwQb4qN7eiUHzLtIh8oVBHo8Q4BJSat88E5/gOD6IQIpxc42iRL
+T+oNZw1hdwNyPOT1GMkkn86l3o7klwmQUWCPm6vl1aHp3omo+GHC63PpNFO5RncJ
+Ilo3aBKKmoE5lDSMGE8KFso5awTo9z9QnVPkRsk6qeBYit9xE3x3S+iwjcSg0nie
+aAkc0N00nc9V9jfPvt4z/5A5vjHh+NhFwH5h2vBJVPdsz6m5Ag0EVI9keAEQAL3R
+oVsHncJTmjHfBOV4JJsvCum4DuJDZ/rDdxauGcjMUWZaG338ZehnDqG1Yn/ys7zE
+aKYUmqyT+XP+M2IAQRTyxwlU1RsDlemQfWrESfZQCCmbnFScL0E7cBzy4xvtInQe
+UaFgJZ1BmxbzQrx+eBBdOTDv7RLnNVygRmMzmkDhxO1IGEu1+3ETIg/DxFE7VQY0
+It/Ywz+nHu1o4Hemc/GdKxu9hcYvcRVc/Xhueq/zcIM96l0m+CFbs0HMKCj8dgMe
+Ng6pbbDjNM+cV+5BgpRdIpE2l9W7ImpbLihqcZt47J6oWt/RDRVoKOzRxjhULVyV
+2VP9ESr48HnbvxcpvUAEDCQUhsGpur4EKHFJ9AmQ4zf91gWLrDc6QmlACn9o9ARU
+fOV5aFsZI9ni1MJEInJTP37stz/uDECRie4LTL4O6P4Dkto8ROM2wzZq5CiRNfnT
+PP7ARfxlCkpg+gpLYRlxGUvRn6EeYwDtiMQJUQPfpGHSvThUlgDEsDrpp4SQSmdA
+CB+rvaRqCawWKoXs0In/9wylGorRUupeqGC0I0/rh+f5mayFvORzwy/4KK4QIEV9
+aYTXTvSRl35MevfXU1Cumlaqle6SDkLr3ZnFQgJBqap0Y+Nmmz2HfO/pohsbtHPX
+92SN3dKqaoSBvzNGY5WT3CsqxDtik37kR3f9/DHpABEBAAGJBD4EGAECAAkFAlSP
+ZHgCGwICKQkQhauW5vob5f7BXSAEGQECAAYFAlSPZHgACgkQXLSpNHs7CdwemA/+
+KFoGuFqU0uKT9qblN4ugRyil5itmTRVffl4tm5OoWkW8uDnu7Ue3vzdzy+9NV8X2
+wRG835qjXijWP++AGuxgW6LB9nV5OWiKMCHOWnUjJQ6pNQMAgSN69QzkFXVF/q5f
+bkma9TgSbwjrVMyPzLSRwq7HsT3V02Qfr4cyq39QeILGy/NHW5z6LZnBy3BaVSd0
+lGjCEc3yfH5OaB79na4W86WCV5n4IT7cojFM+LdL6P46RgmEtWSG3/CDjnJl6BLR
+WqatRNBWLIMKMpn+YvOOL9TwuP1xbqWr1vZ66wksm53NIDcWhptpp0KEuzbU0/Dt
+OltBhcX8tOmO36LrSadX9rwckSETCVYklmpAHNxPml011YNDThtBidvsicw1vZwR
+HsXn+txlL6RAIRN+J/Rw3uOiJAqN9Qgedpx2q+E15t8MiTg/FXtB9SysnskFT/BH
+z0USNKJUY0btZBw3eXWzUnZf59D8VW1M/9JwznCHAx0c9wy/gRDiwt9w4RoXryJD
+VAwZg8rwByjldoiThUJhkCYvJ0R3xH3kPnPlGXDW49E9R8C2umRC3cYOL4U9dOQ1
+5hSlYydF5urFGCLIvodtE9q80uhpyt8L/5jj9tbwZWv6JLnfBquZSnCGqFZRfXlb
+Jphk9+CBQWwiZSRLZRzqQ4ffl4xyLuolx01PMaatkQbRaw/+JpgRNlurKQ0PsTrO
+8tztO/tpBBj/huc2DGkSwEWvkfWElS5RLDKdoMVs/j5CLYUJzZVikUJRm7m7b+OA
+P3W1nbDhuID+XV1CSBmGifQwpoPTys21stTIGLgznJrIfE5moFviOLqD/LrcYlsq
+CQg0yleu7SjOs//8dM3mC2FyLaE/dCZ8l2DCLhHw0+ynyRAvSK6aGCmZz6jMjmYF
+MXgiy7zESksMnVFMulIJJhR3eB0wx2GitibjY/ZhQ7tD3i0yy9ILR07dFz4pgkVM
+afxpVR7fmrMZ0t+yENd+9qzyAZs0ksxORoc2ze90SCx2jwEX/3K+m4I0hP2H/w5W
+gqdvuRLiqf+4BGW4zqWkLLlNIe/okt0r82SwHtDN0Ui1asmZTGj6sm8SXtwx+5cE
+38MttWqjDiibQOSthRVcETByRYM8KcjYSUCi4PoBc3NpDONkFbZm6XofR/f5mTcl
+2jDw6fIeVc4Hd1jBGajNzEqtneqqbdAkPQaLsuD2TMkQfTDJfE/IljwjrhDa9Mi+
+odtnMWq8vlwOZZ24/8/BNK5qXuCYL67O7AJB4ZQ6BT+g4z96iRLbupzu/XJyXkQF
+rOY/Ghegvn7fDrnt2KC9MpgeFBXzUp+k5rzUdF8jbCx5apVjA1sWXB9Kh3L+DUwF
+Mve696B5tlHyc1KxjHR6w9GRsh4=
+=5FXw
+-----END PGP PUBLIC KEY BLOCK-----
+"""
+ check_call_with_input(["gpg", "--import"], key)
+ check_call_with_input(
+ ["gpg", "--command-fd", "0", "--edit-key", keyid], b"trust\n5\ny\n"
+ )
+
+
+def verify_sha(filename, sha):
+ """Verify that the checksum file matches the given sha digest."""
+ sha_filename = filename + ".sha256"
+ with open(sha_filename) as f:
+ # Older sha256 files would contain `sha filename`, but more recent
+ # ones only contain `sha`.
+ checksum = f.readline().split()[0]
+ if checksum != sha:
+ raise ValueError("Checksum mismatch in %s" % filename)
+ return True
+ log("No checksum file for %s!" % filename)
+ return False
+
+
+def fetch(url, validate=True):
+ """Download and verify a package url."""
+ base = os.path.basename(url)
+ log("Fetching %s..." % base)
+ if validate:
+ fetch_file(url + ".asc")
+ fetch_file(url + ".sha256")
+ sha = fetch_file(url)
+ if validate:
+ log("Verifying %s..." % base)
+ verify_sha(base, sha)
+ subprocess.check_call(
+ ["gpg", "--keyid-format", "0xlong", "--verify", base + ".asc", base]
+ )
+ return sha
+
+
+def install(filename, target):
+ """Run a package's installer script against the given target directory."""
+ log("Unpacking %s..." % filename)
+ subprocess.check_call(["tar", "xf", filename])
+ basename = filename.split(".tar")[0]
+ log("Installing %s..." % basename)
+ install_cmd = [os.path.join(basename, "install.sh")]
+ install_cmd += ["--prefix=" + os.path.abspath(target)]
+ install_cmd += ["--disable-ldconfig"]
+ subprocess.check_call(install_cmd)
+ log("Cleaning %s..." % basename)
+ shutil.rmtree(basename)
+
+
+def package(manifest, pkg, target):
+ """Pull out the package dict for a particular package and target
+ from the given manifest."""
+ version = manifest["pkg"][pkg]["version"]
+ if target in manifest["pkg"][pkg]["target"]:
+ info = manifest["pkg"][pkg]["target"][target]
+ else:
+ # rust-src is the same for all targets, and has a literal '*' in the
+ # section key/name instead of a target
+ info = manifest["pkg"][pkg]["target"]["*"]
+ if "xz_url" in info:
+ info["url"] = info.pop("xz_url")
+ info["hash"] = info.pop("xz_hash")
+ return (version, info)
+
+
+def fetch_package(manifest, pkg, host):
+ version, info = package(manifest, pkg, host)
+ if not info["available"]:
+ log("%s marked unavailable for %s" % (pkg, host))
+ raise KeyError
+
+ log("%s %s\n %s\n %s" % (pkg, version, info["url"], info["hash"]))
+ sha = fetch(info["url"], info["hash"] is not None)
+ if info["hash"] and sha != info["hash"]:
+ log(
+ "Checksum mismatch: package resource is different from manifest"
+ "\n %s" % sha
+ )
+ raise AssertionError
+ return info
+
+
+def fetch_std(manifest, targets):
+ stds = []
+ for target in targets:
+ stds.append(fetch_package(manifest, "rust-std", target))
+ # not available for i686
+ if target != "i686-unknown-linux-musl":
+ stds.append(fetch_package(manifest, "rust-analysis", target))
+ return stds
+
+
+def fetch_optional(manifest, pkg, host):
+ try:
+ return fetch_package(manifest, pkg, host)
+ except KeyError:
+ # The package is not available, oh well!
+ return None
+
+
+@contextmanager
+def chdir(path):
+ d = os.getcwd()
+ log('cd "%s"' % path)
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ log('cd "%s"' % d)
+ os.chdir(d)
+
+
+def build_tar_package(name, base, directory):
+ name = os.path.realpath(name)
+ log("tarring {} from {}/{}".format(name, base, directory))
+ assert name.endswith(".tar.zst")
+
+ cctx = zstandard.ZstdCompressor()
+ with open(name, "wb") as f, cctx.stream_writer(f) as z:
+ with tarfile.open(mode="w|", fileobj=z) as tf:
+ with chdir(base):
+ tf.add(directory)
+
+
+def fetch_manifest(channel="stable", host=None, targets=()):
+ if channel.startswith("bors-"):
+ assert host
+ rev = channel[len("bors-") :]
+ base_url = "https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rustc-builds"
+ manifest = {
+ "date": "some date",
+ "pkg": {},
+ }
+
+ def target(url):
+ return {
+ "url": url,
+ "hash": None,
+ "available": requests.head(url).status_code == 200,
+ }
+
+ for pkg in (
+ "cargo",
+ "rustc",
+ "rustfmt-preview",
+ "clippy-preview",
+ "rust-analyzer-preview",
+ ):
+ manifest["pkg"][pkg] = {
+ "version": "bors",
+ "target": {
+ host: target(
+ "{}/{}/{}-nightly-{}.tar.xz".format(base_url, rev, pkg, host)
+ ),
+ },
+ }
+ manifest["pkg"]["rust-src"] = {
+ "version": "bors",
+ "target": {
+ "*": target("{}/{}/rust-src-nightly.tar.xz".format(base_url, rev)),
+ },
+ }
+ for pkg in ("rust-std", "rust-analysis"):
+ manifest["pkg"][pkg] = {
+ "version": "bors",
+ "target": {
+ t: target(
+ "{}/{}/{}-nightly-{}.tar.xz".format(base_url, rev, pkg, t)
+ )
+ for t in sorted(set(targets) | set([host]))
+ },
+ }
+ return manifest
+ if "-" in channel:
+ channel, date = channel.split("-", 1)
+ prefix = "/" + date
+ else:
+ prefix = ""
+ url = "https://static.rust-lang.org/dist%s/channel-rust-%s.toml" % (prefix, channel)
+ req = requests.get(url)
+ req.raise_for_status()
+ manifest = toml.loads(req.text)
+ if manifest["manifest-version"] != "2":
+ raise NotImplementedError(
+ "Unrecognized manifest version %s." % manifest["manifest-version"]
+ )
+ return manifest
+
+
+def patch_src(patch, module):
+ log("Patching Rust src... {} with {}".format(module, patch))
+ patch = os.path.realpath(patch)
+ subprocess.check_call(["patch", "-d", module, "-p1", "-i", patch, "--fuzz=0", "-s"])
+
+
+def build_src(install_dir, host, targets, patches):
+ install_dir = os.path.abspath(install_dir)
+ fetches = os.environ["MOZ_FETCHES_DIR"]
+ rust_dir = os.path.join(fetches, "rust")
+ patch_dir = os.path.join(os.environ["GECKO_PATH"], "build", "build-rust")
+
+ # Clear and remake any previous install directory.
+ try:
+ shutil.rmtree(install_dir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ os.makedirs(install_dir)
+
+ # Patch the src (see the --patch flag's description for details)
+ for p in patches:
+ module, colon, file = p.partition(":")
+ if not colon:
+ module, file = "", p
+ patch_file = os.path.join(patch_dir, file)
+ patch_module = os.path.join(rust_dir, module)
+ patch_src(patch_file, patch_module)
+
+ log("Building Rust...")
+
+ example_config = ""
+ for example_toml in ("config.example.toml", "config.toml.example"):
+ path = os.path.join(rust_dir, example_toml)
+ if os.path.exists(path):
+ with open(path) as file:
+ example_config = file.read()
+ break
+
+ if "ignore-git" in example_config:
+ omit_git_hash = "ignore-git"
+ else:
+ omit_git_hash = "omit-git-hash"
+
+ # Rust builds are configured primarily through a config.toml file.
+ #
+ # `sysconfdir` is overloaded to be relative instead of absolute.
+ # This is the default of `install.sh`, but for whatever reason
+ # `x.py install` has its own default of `/etc` which we don't want.
+ #
+ # `missing-tools` is set so `rustfmt` is allowed to fail. This means
+ # we can "succeed" at building Rust while failing to build, say, Cargo.
+ # Ideally the build system would have better granularity:
+ # https://github.com/rust-lang/rust/issues/79249
+ base_config = textwrap.dedent(
+ """
+ [build]
+ docs = false
+ sanitizers = true
+ extended = true
+ tools = ["analysis", "cargo", "rustfmt", "clippy", "src", "rust-analyzer"]
+
+ [rust]
+ {omit_git_hash} = false
+ use-lld = true
+
+ [install]
+ prefix = "{prefix}"
+ sysconfdir = "etc"
+
+ [dist]
+ missing-tools = true
+
+ [llvm]
+ download-ci-llvm = false
+ """.format(
+ prefix=install_dir,
+ omit_git_hash=omit_git_hash,
+ )
+ )
+
+ # Rust requires these to be specified per-target
+ target_config = textwrap.dedent(
+ """
+ [target.{target}]
+ cc = "clang"
+ cxx = "clang++"
+ linker = "clang"
+
+ """
+ )
+
+ final_config = base_config
+ for target in sorted(set(targets) | set([host])):
+ final_config = final_config + target_config.format(target=target)
+
+ with open(os.path.join(rust_dir, "config.toml"), "w") as file:
+ file.write(final_config)
+
+ # Setup the env so compilers and toolchains are visible
+ clang = os.path.join(fetches, "clang")
+ clang_bin = os.path.join(clang, "bin")
+ clang_lib = os.path.join(clang, "lib")
+
+ env = os.environ.copy()
+ env.update(
+ {
+ "PATH": os.pathsep.join((clang_bin, os.environ["PATH"])),
+ "LD_LIBRARY_PATH": clang_lib,
+ }
+ )
+
+ # x.py install does everything we need for us.
+ # If you're running into issues, consider using `-vv` to debug it.
+ command = ["python3", "x.py", "install", "-v", "--host", host]
+ for target in targets:
+ command.extend(["--target", target])
+
+ subprocess.check_call(command, stderr=subprocess.STDOUT, env=env, cwd=rust_dir)
+
+
+def repack(
+ host,
+ targets,
+ channel="stable",
+ cargo_channel=None,
+ patches=[],
+):
+ install_dir = "rustc"
+ if channel == "dev":
+ build_src(install_dir, host, targets, patches)
+ else:
+ if patches:
+ raise ValueError(
+ 'Patch specified, but channel "%s" is not "dev"!'
+ "\nPatches are only for building from source." % channel
+ )
+ log("Repacking rust for %s supporting %s..." % (host, targets))
+ manifest = fetch_manifest(channel, host, targets)
+ log("Using manifest for rust %s as of %s." % (channel, manifest["date"]))
+ if cargo_channel == channel:
+ cargo_manifest = manifest
+ else:
+ cargo_manifest = fetch_manifest(cargo_channel, host, targets)
+ log(
+ "Using manifest for cargo %s as of %s."
+ % (cargo_channel, cargo_manifest["date"])
+ )
+
+ log("Fetching packages...")
+ rustc = fetch_package(manifest, "rustc", host)
+ cargo = fetch_package(cargo_manifest, "cargo", host)
+ stds = fetch_std(manifest, targets)
+ rustsrc = fetch_package(manifest, "rust-src", host)
+ rustfmt = fetch_optional(manifest, "rustfmt-preview", host)
+ clippy = fetch_optional(manifest, "clippy-preview", host)
+ rust_analyzer = fetch_optional(manifest, "rust-analyzer-preview", host)
+
+ log("Installing packages...")
+
+ # Clear any previous install directory.
+ try:
+ shutil.rmtree(install_dir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ install(os.path.basename(rustc["url"]), install_dir)
+ install(os.path.basename(cargo["url"]), install_dir)
+ install(os.path.basename(rustsrc["url"]), install_dir)
+ if rustfmt:
+ install(os.path.basename(rustfmt["url"]), install_dir)
+ if clippy:
+ install(os.path.basename(clippy["url"]), install_dir)
+ if rust_analyzer:
+ install(os.path.basename(rust_analyzer["url"]), install_dir)
+ for std in stds:
+ install(os.path.basename(std["url"]), install_dir)
+ pass
+
+ log("Creating archive...")
+ tar_file = install_dir + ".tar.zst"
+ build_tar_package(tar_file, ".", install_dir)
+ shutil.rmtree(install_dir)
+ log("%s is ready." % tar_file)
+
+ upload_dir = os.environ.get("UPLOAD_DIR")
+ if upload_dir:
+ # Create the upload directory if it doesn't exist.
+ try:
+ log("Creating upload directory in %s..." % os.path.abspath(upload_dir))
+ os.makedirs(upload_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ # Move the tarball to the output directory for upload.
+ log("Moving %s to the upload directory..." % tar_file)
+ shutil.move(tar_file, upload_dir)
+
+
+def expand_platform(name):
+ """Expand a shortcut name to a full Rust platform string."""
+ platforms = {
+ "android": "armv7-linux-androideabi",
+ "android_x86": "i686-linux-android",
+ "android_x86-64": "x86_64-linux-android",
+ "android_aarch64": "aarch64-linux-android",
+ "linux64": "x86_64-unknown-linux-gnu",
+ "linux32": "i686-unknown-linux-gnu",
+ "mac": "x86_64-apple-darwin",
+ "macos": "x86_64-apple-darwin",
+ "mac64": "x86_64-apple-darwin",
+ "mac32": "i686-apple-darwin",
+ "win64": "x86_64-pc-windows-msvc",
+ "win32": "i686-pc-windows-msvc",
+ "mingw32": "i686-pc-windows-gnu",
+ }
+ return platforms.get(name, name)
+
+
+def validate_channel(channel):
+ """Require a specific release version.
+
+ Packaging from meta-channels, like `stable`, `beta`, or `nightly`
+ doesn't give repeatable output. Reject such channels."""
+ channel_prefixes = ("stable", "beta", "nightly")
+ if any([channel.startswith(c) for c in channel_prefixes]):
+ if "-" not in channel:
+ raise ValueError(
+ 'Generic channel "%s" specified!'
+ "\nPlease give a specific release version"
+ ' like "1.24.0" or "beta-2018-02-20".' % channel
+ )
+
+
+def args():
+ """Read command line arguments and return options."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--channel",
+ help="Release channel to use:"
+ " 1.xx.y, beta-yyyy-mm-dd,"
+ " nightly-yyyy-mm-dd,"
+ " bors-$rev (grab a build from rust's CI),"
+ " or dev (build from source).",
+ required=True,
+ )
+ parser.add_argument(
+ "--patch",
+ dest="patches",
+ action="append",
+ default=[],
+ help="apply the given patch file to a dev build."
+ " Patch files should be placed in /build/build-rust."
+ " Patches can be prefixed with `module-path:` to specify they"
+ " apply to that git submodule in the Rust source."
+ " e.g. `src/llvm-project:mypatch.diff` patches rust's llvm."
+ " Can be given more than once.",
+ )
+ parser.add_argument(
+ "--cargo-channel",
+ help="Release channel version to use for cargo."
+ " Defaults to the same as --channel.",
+ )
+ parser.add_argument(
+ "--host",
+ help="Host platform for the toolchain executable:"
+ " e.g. linux64 or aarch64-linux-android."
+ " Defaults to linux64.",
+ )
+ parser.add_argument(
+ "--target",
+ dest="targets",
+ action="append",
+ default=[],
+ help="Additional target platform to support:"
+ " e.g. linux32 or i686-pc-windows-gnu."
+ " can be given more than once.",
+ )
+ args = parser.parse_args()
+ if not args.cargo_channel:
+ args.cargo_channel = args.channel
+ validate_channel(args.channel)
+ validate_channel(args.cargo_channel)
+ if not args.host:
+ args.host = "linux64"
+ args.host = expand_platform(args.host)
+ args.targets = [expand_platform(t) for t in args.targets]
+
+ return args
+
+
+if __name__ == "__main__":
+ args = vars(args())
+ setup_gpg()
+ repack(**args)
diff --git a/taskcluster/scripts/misc/run-profileserver-macos.sh b/taskcluster/scripts/misc/run-profileserver-macos.sh
new file mode 100755
index 0000000000..61873a273a
--- /dev/null
+++ b/taskcluster/scripts/misc/run-profileserver-macos.sh
@@ -0,0 +1,20 @@
+#! /bin/bash -vex
+set -x -e
+
+####
+# Taskcluster friendly wrapper for running the profileserver on macOS
+####
+
+export UPLOAD_PATH=../../artifacts
+mkdir -p $UPLOAD_PATH
+
+export JARLOG_FILE="en-US.log"
+
+export LLVM_PROFDATA=$MOZ_FETCHES_DIR/clang/bin/llvm-profdata
+
+set -v
+
+./mach python python/mozbuild/mozbuild/action/install.py $MOZ_FETCHES_DIR/target.dmg $MOZ_FETCHES_DIR
+./mach python build/pgo/profileserver.py --binary $MOZ_FETCHES_DIR/*.app/Contents/MacOS/firefox
+
+tar -Jcvf $UPLOAD_PATH/profdata.tar.xz merged.profdata en-US.log
diff --git a/taskcluster/scripts/misc/run-profileserver.sh b/taskcluster/scripts/misc/run-profileserver.sh
new file mode 100755
index 0000000000..dd0ad31f05
--- /dev/null
+++ b/taskcluster/scripts/misc/run-profileserver.sh
@@ -0,0 +1,42 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+: NEED_XVFB ${NEED_XVFB:=false}
+: UPLOAD_PATH ${UPLOAD_PATH:=$HOME/artifacts}
+export UPLOAD_PATH
+
+####
+# Taskcluster friendly wrapper for running the profileserver
+####
+
+PGO_RUNDIR=obj-firefox/dist
+export JARLOG_FILE="en-US.log"
+export LLVM_PROFDATA=$MOZ_FETCHES_DIR/clang/bin/llvm-profdata
+
+set -v
+
+if $NEED_XVFB; then
+ # run XVfb in the background
+ . /builds/worker/scripts/xvfb.sh
+
+ cleanup() {
+ local rv=$?
+ cleanup_xvfb
+ exit $rv
+ }
+ trap cleanup EXIT INT
+
+ start_xvfb '1024x768x24' 2
+fi
+
+# Move our fetched firefox into objdir/dist so the jarlog entries will match
+# the paths when the final PGO stage packages the build.
+mkdir -p $PGO_RUNDIR
+mkdir -p $UPLOAD_PATH
+mv $MOZ_FETCHES_DIR/firefox $PGO_RUNDIR
+./mach python build/pgo/profileserver.py --binary $PGO_RUNDIR/firefox/firefox
+
+tar -acvf $UPLOAD_PATH/profdata.tar.xz merged.profdata en-US.log
diff --git a/taskcluster/scripts/misc/source-test-clang-setup.sh b/taskcluster/scripts/misc/source-test-clang-setup.sh
new file mode 100755
index 0000000000..5388b6376e
--- /dev/null
+++ b/taskcluster/scripts/misc/source-test-clang-setup.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+source $HOME/checkouts/gecko/taskcluster/scripts/misc/source-test-common.sh
+
+# Add clang-tidy to PATH
+export PATH=$MOZ_FETCHES_DIR/clang-tidy/bin:$PATH
+
+# Use toolchain clang
+export LD_LIBRARY_PATH=$MOZ_FETCHES_DIR/clang/lib
+
+# Write custom mozconfig
+export MOZCONFIG=$GECKO_PATH/mozconfig
+
+# Add to mozconfig all the appropriate options
+cat <<EOT >> $MOZCONFIG
+# Enable debug mode
+ac_add_options --enable-debug
+# Enable clang-plugin in order to have all defines activated for static-analysis
+ac_add_options --enable-clang-plugin
+# Enable GC zeal, a testing and debugging feature that helps find GC-related bugs in JSAPI applications.
+ac_add_options --enable-gczeal
+# Do not treat warnings as errors
+ac_add_options --disable-warnings-as-errors
+EOT
+
+# Mach lookup clang-tidy in clang-tools
+mkdir -p $MOZBUILD_STATE_PATH/clang-tools
+ln -s $MOZ_FETCHES_DIR/clang-tidy $MOZBUILD_STATE_PATH/clang-tools/clang-tidy
diff --git a/taskcluster/scripts/misc/source-test-common.sh b/taskcluster/scripts/misc/source-test-common.sh
new file mode 100755
index 0000000000..eb2409cf4e
--- /dev/null
+++ b/taskcluster/scripts/misc/source-test-common.sh
@@ -0,0 +1,16 @@
+#! /bin/bash -vex
+
+set -x -e
+
+export MOZBUILD_STATE_PATH=$HOME/workspace
+
+# Add toolchain binaries to PATH to run ./mach configure
+export PATH=$MOZ_FETCHES_DIR/clang/bin:$PATH
+export PATH=$MOZ_FETCHES_DIR/rustc/bin:$PATH
+export PATH=$MOZ_FETCHES_DIR/cbindgen:$PATH
+export PATH=$MOZ_FETCHES_DIR/nasm:$PATH
+export PATH=$MOZ_FETCHES_DIR/node/bin:$PATH
+
+# Use clang as host compiler
+export CC=$MOZ_FETCHES_DIR/clang/bin/clang
+export CXX=$MOZ_FETCHES_DIR/clang/bin/clang++
diff --git a/taskcluster/scripts/misc/source-test-infer-setup.sh b/taskcluster/scripts/misc/source-test-infer-setup.sh
new file mode 100755
index 0000000000..57786013c5
--- /dev/null
+++ b/taskcluster/scripts/misc/source-test-infer-setup.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+source $GECKO_PATH/taskcluster/scripts/misc/source-test-common.sh
+
+# Write custom mozconfig
+MOZCONFIG=$GECKO_PATH/mozconfig
+echo "ac_add_options --enable-project=mobile/android" > $MOZCONFIG
+echo "ac_add_options --target=arm-linux-androideabi" >> $MOZCONFIG
+echo "ac_add_options --with-android-sdk=${MOZ_FETCHES_DIR}/android-sdk-linux" >> $MOZCONFIG
+echo "ac_add_options --with-android-ndk=${MOZ_FETCHES_DIR}/android-ndk" >> $MOZCONFIG
+
+# Write custom grade properties
+export GRADLE_USER_HOME=$HOME/workspace/gradle
+mkdir -p $GRADLE_USER_HOME
+echo "org.gradle.daemon=false" >> ${GRADLE_USER_HOME}/gradle.properties
+
+# Mach lookup infer in infer...
+mkdir -p $MOZBUILD_STATE_PATH/infer/infer
+mv $MOZ_FETCHES_DIR/infer/{bin,lib} $MOZBUILD_STATE_PATH/infer/infer
diff --git a/taskcluster/scripts/misc/summarize-tgdiff.py b/taskcluster/scripts/misc/summarize-tgdiff.py
new file mode 100644
index 0000000000..f3a265ed66
--- /dev/null
+++ b/taskcluster/scripts/misc/summarize-tgdiff.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import json
+import pathlib
+
+
+def filter_changes(line):
+ # Skip diff headers
+ if line.startswith("---") or line.startswith("+++"):
+ return False
+
+ # Only count lines that changed
+ return line.startswith("-") or line.startswith("+")
+
+
+def run():
+
+ parser = argparse.ArgumentParser(
+ description="Classify output of taskgraph for CI analsyis"
+ )
+ parser.add_argument(
+ "path",
+ type=pathlib.Path,
+ help="Folder containing all the TXT files from taskgraph target.",
+ )
+ parser.add_argument(
+ "threshold",
+ type=int,
+ help="Minimum number of lines to trigger a warning on taskgraph output.",
+ )
+ args = parser.parse_args()
+
+ out = {"files": {}, "status": "OK", "threshold": args.threshold}
+ for path in args.path.glob("*.txt"):
+
+ with path.open() as f:
+ nb = len(list(filter(filter_changes, f.readlines())))
+
+ out["files"][path.stem] = {
+ "nb": nb,
+ "status": "WARNING" if nb >= args.threshold else "OK",
+ }
+
+ if nb >= args.threshold:
+ out["status"] = "WARNING"
+
+ (args.path / "summary.json").write_text(json.dumps(out, sort_keys=True, indent=4))
+
+
+if __name__ == "__main__":
+ run()
diff --git a/taskcluster/scripts/misc/tooltool-download.sh b/taskcluster/scripts/misc/tooltool-download.sh
new file mode 100644
index 0000000000..5f971d725a
--- /dev/null
+++ b/taskcluster/scripts/misc/tooltool-download.sh
@@ -0,0 +1,21 @@
+# Fetch a tooltool manifest.
+
+cd $MOZ_FETCHES_DIR
+
+TOOLTOOL_DL_FLAGS=
+
+if [ -n "$UPLOAD_DIR" ]; then
+ TOOLTOOL_DL_FLAGS="${TOOLTOOL_DL_FLAGS=} --artifact-manifest $UPLOAD_DIR/toolchains.json"
+fi
+
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/builds/worker/tooltool-cache}
+export TOOLTOOL_CACHE
+
+if [ -z "$TOOLTOOL_MANIFEST" ]; then
+ echo This script should not be used when there is no tooltool manifest set
+ exit 1
+fi
+
+${GECKO_PATH}/mach artifact toolchain -v${TOOLTOOL_DL_FLAGS} --tooltool-manifest "${GECKO_PATH}/${TOOLTOOL_MANIFEST}"${TOOLTOOL_CACHE:+ --cache-dir ${TOOLTOOL_CACHE}} --retry 5
+
+cd $OLDPWD
diff --git a/taskcluster/scripts/misc/unify.sh b/taskcluster/scripts/misc/unify.sh
new file mode 100755
index 0000000000..85c57667d8
--- /dev/null
+++ b/taskcluster/scripts/misc/unify.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+set -x -e
+
+export LIPO=$MOZ_FETCHES_DIR/cctools/bin/x86_64-apple-darwin-lipo
+
+for i in x64 aarch64; do
+ $GECKO_PATH/mach python -m mozbuild.action.unpack_dmg $MOZ_FETCHES_DIR/$i/target.dmg $i
+done
+$GECKO_PATH/mach python $GECKO_PATH/toolkit/mozapps/installer/unify.py x64/*.app aarch64/*.app
+$GECKO_PATH/mach python -m mozbuild.action.make_dmg x64 target.dmg
+
+mkdir -p $UPLOAD_DIR
+mv target.dmg $UPLOAD_DIR/
+
+python3 -c '
+import json
+import os
+
+for artifact in json.loads(os.environ["MOZ_FETCHES"]):
+ if artifact.get("extract") and artifact.get("dest", "").startswith("x64"):
+ print(artifact["dest"], os.path.basename(artifact["artifact"]))
+' | while read dir artifact; do
+ if [ "$artifact" = target.crashreporter-symbols.zip ]; then
+ $GECKO_PATH/mach python $GECKO_PATH/python/mozbuild/mozbuild/action/unify_symbols.py $MOZ_FETCHES_DIR/$dir $MOZ_FETCHES_DIR/aarch64${dir#x64}
+ else
+ $GECKO_PATH/mach python $GECKO_PATH/python/mozbuild/mozbuild/action/unify_tests.py $MOZ_FETCHES_DIR/$dir $MOZ_FETCHES_DIR/aarch64${dir#x64}
+ fi
+
+ case $artifact in
+ *.tar.gz)
+ find $MOZ_FETCHES_DIR/$dir -not -type d -printf '%P\0' | tar -C $MOZ_FETCHES_DIR/$dir --owner=0:0 --group=0:0 -zcf $artifact --no-recursion --null -T -
+ ;;
+ *.zip)
+ $GECKO_PATH/mach python $GECKO_PATH/python/mozbuild/mozbuild/action/zip.py -C $MOZ_FETCHES_DIR/$dir $PWD/$artifact '*'
+ ;;
+ esac
+ mv $artifact $UPLOAD_DIR/
+done
diff --git a/taskcluster/scripts/misc/unpack-sdk.py b/taskcluster/scripts/misc/unpack-sdk.py
new file mode 100644
index 0000000000..290370db42
--- /dev/null
+++ b/taskcluster/scripts/misc/unpack-sdk.py
@@ -0,0 +1,87 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import hashlib
+import os
+import shutil
+import stat
+import sys
+import tempfile
+from io import BytesIO
+from urllib.request import urlopen
+
+from mozpack.macpkg import Pbzx, uncpio, unxar
+
+
+def unpack_sdk(url, sha256, extract_prefix, out_dir="."):
+ with tempfile.TemporaryFile() as pkg:
+ hash = hashlib.sha256()
+ with urlopen(url) as fh:
+ # Equivalent to shutil.copyfileobj, but computes sha256 at the same time.
+ while True:
+ buf = fh.read(1024 * 1024)
+ if not buf:
+ break
+ hash.update(buf)
+ pkg.write(buf)
+ digest = hash.hexdigest()
+ if digest != sha256:
+ raise Exception(f"(actual) {digest} != (expected) {sha256}")
+
+ pkg.seek(0, os.SEEK_SET)
+
+ for name, content in unxar(pkg):
+ if name in ("Payload", "Content"):
+ extract_payload(content, extract_prefix, out_dir)
+
+
+def extract_payload(fileobj, extract_prefix, out_dir="."):
+ hardlinks = {}
+ for path, st, content in uncpio(Pbzx(fileobj)):
+ # When there are hardlinks, normally a cpio stream is supposed to
+ # contain the data for all of them, but, even with compression, that
+ # can be a waste of space, so in some cpio streams (*cough* *cough*,
+ # Apple's, e.g. in Xcode), the files after the first one have dummy
+ # data.
+ # As we may be filtering the first file out (if it doesn't match
+ # extract_prefix), we need to keep its data around (we're not going
+ # to be able to rewind).
+ # We could do something fancy in the case where the first file is not
+ # filtered out, but in practice, it's not worth the extra complexity.
+ if stat.S_ISREG(st.mode) and st.nlink > 1:
+ key = (st.dev, st.ino)
+ hardlink = hardlinks.get(key)
+ if hardlink:
+ hardlink[0] -= 1
+ if hardlink[0] == 0:
+ del hardlinks[key]
+ else:
+ hardlink = hardlinks[key] = [st.nlink - 1, BytesIO(content.read())]
+ content = hardlink[1]
+ content.seek(0)
+
+ if not path:
+ continue
+ path = path.decode()
+ if not path.startswith(extract_prefix):
+ continue
+ path = os.path.join(out_dir, path[len(extract_prefix) :].lstrip("/"))
+ if stat.S_ISDIR(st.mode):
+ os.makedirs(path, exist_ok=True)
+ else:
+ parent = os.path.dirname(path)
+ if parent:
+ os.makedirs(parent, exist_ok=True)
+
+ if stat.S_ISLNK(st.mode):
+ os.symlink(content.read(), path)
+ elif stat.S_ISREG(st.mode):
+ with open(path, "wb") as out:
+ shutil.copyfileobj(content, out)
+ else:
+ raise Exception(f"File mode {st.mode:o} is not supported")
+
+
+if __name__ == "__main__":
+ unpack_sdk(*sys.argv[1:])
diff --git a/taskcluster/scripts/misc/verify-devtools-bundle.py b/taskcluster/scripts/misc/verify-devtools-bundle.py
new file mode 100644
index 0000000000..901db0eb08
--- /dev/null
+++ b/taskcluster/scripts/misc/verify-devtools-bundle.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Check that the current sourcemap and worker bundles built for DevTools are up to date.
+This job should fail if any file impacting the bundle creation was modified without
+regenerating the bundles.
+
+This check should be run after building the bundles via:
+cd devtools/client/debugger
+yarn && node bin/bundle.js
+
+Those steps are done in the devtools-verify-bundle job, prior to calling this script.
+The script will only run `hg status devtools/` and check that no change is detected by
+mercurial.
+"""
+
+import argparse
+import json
+import subprocess
+import sys
+
+# Ignore module-manifest.json updates which can randomly happen when
+# building bundles.
+hg_exclude = "devtools/client/debugger/bin/module-manifest.json"
+
+print("Run `hg status devtools/`")
+status = (
+ subprocess.check_output(["hg", "status", "-n", "devtools/", "-X", hg_exclude])
+ .decode("utf-8")
+ .split("\n")
+)
+print(" status:")
+print("-" * 80)
+
+doc = "https://firefox-source-docs.mozilla.org/devtools/tests/node-tests.html#devtools-bundle"
+
+failures = {}
+for l in status:
+ if not l:
+ # Ignore empty lines
+ continue
+
+ failures[l] = [
+ {
+ "path": l,
+ "line": None,
+ "column": None,
+ "level": "error",
+ "message": l
+ + " is outdated and needs to be regenerated, "
+ + f"instructions at: {doc}",
+ }
+ ]
+
+
+diff = subprocess.check_output(["hg", "diff", "devtools/", "-X", hg_exclude]).decode(
+ "utf-8"
+)
+
+# Revert all the changes created by `node bin/bundle.js`
+subprocess.check_output(["hg", "revert", "-C", "devtools/"])
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--output", required=True)
+args = parser.parse_args()
+
+with open(args.output, "w") as fp:
+ json.dump(failures, fp, indent=2)
+
+if len(failures) > 0:
+ print(
+ "TEST-UNEXPECTED-FAIL | devtools-bundle | DevTools bundles need to be regenerated, "
+ + f"instructions at: {doc}"
+ )
+
+ print("The following devtools bundles were detected as outdated:")
+ for failure in failures:
+ print(failure)
+
+ print(f"diff:{diff}")
+
+ sys.exit(1)
diff --git a/taskcluster/scripts/misc/vs-cleanup.sh b/taskcluster/scripts/misc/vs-cleanup.sh
new file mode 100644
index 0000000000..8bb93b266f
--- /dev/null
+++ b/taskcluster/scripts/misc/vs-cleanup.sh
@@ -0,0 +1,13 @@
+case "$(uname -s)" in
+MINGW*|MSYS*)
+ # For some reason, by the time the task finishes, and when run-task
+ # starts its cleanup, there is still a vctip.exe (MSVC telemetry-related
+ # process) running and using a dll that run-task can't then delete.
+ # "For some reason", because the same doesn't happen with other tasks.
+ # In fact, this used to happen with older versions of MSVC for other
+ # tasks, and stopped when upgrading to 15.8.4...
+ taskkill -f -im vctip.exe || true
+ # Same with the mspdbsrv process.
+ taskkill -f -im mspdbsrv.exe || true
+ ;;
+esac
diff --git a/taskcluster/scripts/misc/vs-setup.sh b/taskcluster/scripts/misc/vs-setup.sh
new file mode 100644
index 0000000000..3721f93114
--- /dev/null
+++ b/taskcluster/scripts/misc/vs-setup.sh
@@ -0,0 +1,42 @@
+VSDIR=vs
+VSPATH="${MOZ_FETCHES_DIR}/${VSDIR}"
+UNIX_VSPATH="$(cd ${MOZ_FETCHES_DIR} && pwd)/${VSDIR}"
+VCDIR=VC/Tools/MSVC/14.16.27023
+if [ ! -d "${VSPATH}/${VCDIR}" ]; then
+ VCDIR=VC/Tools/MSVC/14.29.30133
+fi
+if [ ! -d "${VSPATH}/${VCDIR}" ]; then
+ VCDIR=VC/Tools/MSVC/14.35.32215
+fi
+SDKDIR="Windows Kits/10"
+SDK_VERSION=10.0.17134.0
+if [ ! -d "${VSPATH}/${SDKDIR}/Lib/${SDK_VERSION}" ]; then
+ SDK_VERSION=10.0.19041.0
+fi
+if [ ! -d "${VSPATH}/${SDKDIR}/Lib/${SDK_VERSION}" ]; then
+ SDK_VERSION=10.0.22621.0
+fi
+
+case "$TARGET" in
+aarch64-pc-windows-msvc)
+ SDK_CPU=arm64
+ ;;
+i686-pc-windows-msvc)
+ SDK_CPU=x86
+ ;;
+*)
+ SDK_CPU=x64
+ ;;
+esac
+
+CRT_DIR="microsoft.vc141.crt"
+if [ ! -d "${UNIX_VSPATH}/redist/${SDK_CPU}/$CRT_DIR" ]; then
+ CRT_DIR="microsoft.vc142.crt"
+fi
+if [ ! -d "${UNIX_VSPATH}/redist/${SDK_CPU}/$CRT_DIR" ]; then
+ CRT_DIR="microsoft.vc143.crt"
+fi
+
+export INCLUDE="${VSPATH}/${VCDIR}/include;${VSPATH}/${VCDIR}/atlmfc/include;${VSPATH}/${SDKDIR}/Include/${SDK_VERSION}/ucrt;${VSPATH}/${SDKDIR}/Include/${SDK_VERSION}/shared;${VSPATH}/${SDKDIR}/Include/${SDK_VERSION}/um;${VSPATH}/${SDKDIR}/Include/${SDK_VERSION}/winrt;${VSPATH}/dia sdk/include"
+export LIB="${VSPATH}/${VCDIR}/lib/${SDK_CPU};${VSPATH}/${VCDIR}/atlmfc/lib/${SDK_CPU};${VSPATH}/${SDKDIR}/Lib/${SDK_VERSION}/um/${SDK_CPU};${VSPATH}/${SDKDIR}/Lib/${SDK_VERSION}/ucrt/${SDK_CPU};${VSPATH}/dia sdk/lib/amd64"
+export PATH="${UNIX_VSPATH}/${VCDIR}/bin/hostx64/${SDK_CPU}:${UNIX_VSPATH}/${VCDIR}/bin/hostx86/x86:${UNIX_VSPATH}/${SDKDIR}/bin/${SDK_VERSION}/${SDK_CPU}:${UNIX_VSPATH}/redist/${SDK_CPU}/$CRT_DIR:${UNIX_VSPATH}/${SDKDIR}/redist/ucrt/dlls/${SDK_CPU}:${UNIX_VSPATH}/dia sdk/bin/amd64:$PATH"
diff --git a/taskcluster/scripts/misc/wr-cargotest-macos-build.sh b/taskcluster/scripts/misc/wr-cargotest-macos-build.sh
new file mode 100755
index 0000000000..23b4ccedec
--- /dev/null
+++ b/taskcluster/scripts/misc/wr-cargotest-macos-build.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+set -x -e -v
+
+source ${GECKO_PATH}/taskcluster/scripts/misc/wr-macos-cross-build-setup.sh
+
+export UPLOAD_DIR="${HOME}/artifacts"
+mkdir -p "${UPLOAD_DIR}"
+
+# Do a cross-build for cargo test run
+pushd "${GECKO_PATH}/gfx/wr"
+CARGOFLAGS="-vv --frozen --target=${TARGET_TRIPLE}" \
+ CARGOTESTFLAGS="--no-run" \
+ ci-scripts/macos-debug-tests.sh
+# Package up the test binaries
+cd "target/${TARGET_TRIPLE}"
+mkdir cargo-test-binaries
+mv debug cargo-test-binaries/
+find cargo-test-binaries/debug/deps -type f -maxdepth 1 -executable -print0 > binaries.lst
+tar cjf cargo-test-binaries.tar.bz2 --null -T binaries.lst
+mv cargo-test-binaries.tar.bz2 "${UPLOAD_DIR}"
+# Clean the build
+cd "${GECKO_PATH}/gfx/wr"
+rm -rf target
+popd
diff --git a/taskcluster/scripts/misc/wr-macos-cross-build-setup.sh b/taskcluster/scripts/misc/wr-macos-cross-build-setup.sh
new file mode 100755
index 0000000000..1c9faa4b9b
--- /dev/null
+++ b/taskcluster/scripts/misc/wr-macos-cross-build-setup.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+set -x -e -v
+
+export TARGET_TRIPLE="x86_64-apple-darwin"
+
+MACOS_SYSROOT="${MOZ_FETCHES_DIR}/MacOSX13.3.sdk"
+CLANGDIR="${MOZ_FETCHES_DIR}/clang"
+
+# Deploy the wrench dependencies
+mv ${MOZ_FETCHES_DIR}/wrench-deps/{vendor,.cargo} "${GECKO_PATH}/gfx/wr/"
+
+# Building wrench with the `headless` feature also builds the osmesa-src crate,
+# which includes building C++ code. We have to do a bunch of shenanigans
+# to make this cross-compile properly.
+
+pushd "${MOZ_FETCHES_DIR}/clang/bin"
+
+# Add a pkg-config cross-compile wrapper. Without this, the configure script
+# will use pkg-config from the host, which will find host libraries that are
+# not what we want. This script stolen from
+# https://autotools.io/pkgconfig/cross-compiling.html
+cat > ${TARGET_TRIPLE}-pkg-config <<END_PKGCONFIG_WRAPPER
+#!/bin/sh
+export PKG_CONFIG_DIR=
+export PKG_CONFIG_LIBDIR=${MACOS_SYSROOT}/usr/lib/pkgconfig:${MACOS_SYSROOT}/usr/share/pkgconfig
+export PKG_CONFIG_SYSROOT_DIR=${MACOS_SYSROOT}
+exec pkg-config "\$@"
+END_PKGCONFIG_WRAPPER
+chmod +x "${TARGET_TRIPLE}-pkg-config"
+popd
+
+[ -d "${MOZ_FETCHES_DIR}/clang-mac/clang" ] && cat > ${MOZ_FETCHES_DIR}/clang-mac/clang/bin/llvm-config <<EOF_LLVM_CONFIG
+#!/bin/sh
+${MOZ_FETCHES_DIR}/clang/bin/llvm-config "\$@" | sed 's,${MOZ_FETCHES_DIR}/clang,${MOZ_FETCHES_DIR}/clang-mac/clang,g;s,-lLLVM-[0-9]\+,-lLLVM,g'
+EOF_LLVM_CONFIG
+
+export PATH="${MOZ_FETCHES_DIR}/rustc/bin:${MOZ_FETCHES_DIR}/clang/bin:${MOZ_FETCHES_DIR}/wrench-deps/meson:${PATH}"
+
+# Tell the configure script where to find zlib, because otherwise it tries
+# to use pkg-config to find it, which fails (no .pc file in the macos SDK).
+export ZLIB_CFLAGS="-I${MACOS_SYSROOT}/usr/include"
+export ZLIB_LIBS="-L${MACOS_SYSROOT}/usr/lib -lz"
+
+# Set up compiler and flags for cross-compile. Careful to only export the
+# target-specific CFLAGS/CXXFLAGS variables, to not break any host builds.
+export CC="${CLANGDIR}/bin/clang"
+TARGET_CFLAGS="-fuse-ld=lld -target ${TARGET_TRIPLE} -mmacosx-version-min=10.12 --rtlib=compiler-rt --sysroot ${MACOS_SYSROOT} -Qunused-arguments"
+export CFLAGS_${TARGET_TRIPLE//-/_}="${TARGET_CFLAGS}"
+export CXX="${CLANGDIR}/bin/clang++"
+TARGET_CXXFLAGS="-fuse-ld=lld -target ${TARGET_TRIPLE} -mmacosx-version-min=10.12 --rtlib=compiler-rt --sysroot ${MACOS_SYSROOT} -stdlib=libc++ -Qunused-arguments"
+export CXXFLAGS_${TARGET_TRIPLE//-/_}="${TARGET_CXXFLAGS}"
+export AR="${CLANGDIR}/bin/llvm-ar"
+
+# See documentation in cargo-linker for why we need this. TL;DR is that passing
+# the right arguments to the linker when invoked by cargo is nigh impossible
+# without this.
+export MOZ_CARGO_WRAP_LD="${CC}"
+export MOZ_CARGO_WRAP_LD_CXX="${CXX}"
+export MOZ_CARGO_WRAP_LDFLAGS="${TARGET_CFLAGS}"
+export CARGO_TARGET_X86_64_APPLE_DARWIN_LINKER="${GECKO_PATH}/build/cargo-linker"
diff --git a/taskcluster/scripts/misc/wrench-android-build.sh b/taskcluster/scripts/misc/wrench-android-build.sh
new file mode 100755
index 0000000000..975ba8b85b
--- /dev/null
+++ b/taskcluster/scripts/misc/wrench-android-build.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+set -x -e -v
+
+MODE=${1?"First argument must be debug|release"}
+
+pushd "${MOZ_FETCHES_DIR}"
+mv wrench-deps/{vendor,.cargo,cargo-apk} ${GECKO_PATH}/gfx/wr
+popd
+
+pushd "${GECKO_PATH}/gfx/wr/wrench"
+# These things come from the toolchain dependencies of the job that invokes
+# this script (webrender-wrench-android-build).
+export PATH="${PATH}:${MOZ_FETCHES_DIR}/rustc/bin"
+export PATH="${PATH}:${JAVA_HOME}/bin"
+export ANDROID_SDK_ROOT="${MOZ_FETCHES_DIR}/android-sdk-linux"
+export ANDROID_NDK_ROOT="${MOZ_FETCHES_DIR}/android-ndk"
+
+if [ "$MODE" == "debug" ]; then
+ ../cargo-apk/bin/cargo-apk apk build --frozen --verbose --lib
+elif [ "$MODE" == "release" ]; then
+ ../cargo-apk/bin/cargo-apk apk build --frozen --verbose --lib --release
+else
+ echo "Unknown mode '${MODE}'; must be 'debug' or 'release'"
+ exit 1
+fi
+popd
diff --git a/taskcluster/scripts/misc/wrench-deps-vendoring.sh b/taskcluster/scripts/misc/wrench-deps-vendoring.sh
new file mode 100755
index 0000000000..f81dc1be93
--- /dev/null
+++ b/taskcluster/scripts/misc/wrench-deps-vendoring.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -x -e -v
+
+# This scripts uses `cargo-vendor` to download all the dependencies needed
+# to build `wrench` (a tool used for standalone testing of webrender), and
+# exports those dependencies as a tarball. This avoids having to download
+# these dependencies on every test job that uses `wrench`.
+
+UPLOAD_DIR=$HOME/artifacts
+
+cd $GECKO_PATH
+export PATH=$PATH:$MOZ_FETCHES_DIR/rustc/bin:$HOME/.cargo/bin
+cd gfx/wr/
+mkdir .cargo
+cargo vendor --sync ./Cargo.toml > .cargo/config
+mkdir wrench-deps
+mv vendor .cargo wrench-deps/
+mkdir wrench-deps/cargo-apk
+# Until there's a version of cargo-apk published on crates.io that has
+# https://github.com/rust-windowing/android-ndk-rs/pull/236, we need to use
+# an unpublished version. Additionally, until we update the NDK version used
+# in gecko we must use our own patched version. See bug 1615148.
+(cd $MOZ_FETCHES_DIR/android-ndk-rs/cargo-apk; cargo update -p home --precise 0.5.5)
+cargo install --locked --path $MOZ_FETCHES_DIR/android-ndk-rs/cargo-apk --root wrench-deps/cargo-apk cargo-apk
+
+ci-scripts/install-meson.sh
+mv meson wrench-deps/meson
+
+mkdir -p $UPLOAD_DIR
+tar caf $UPLOAD_DIR/wrench-deps.tar.zst wrench-deps
diff --git a/taskcluster/scripts/misc/wrench-macos-build.sh b/taskcluster/scripts/misc/wrench-macos-build.sh
new file mode 100755
index 0000000000..43842d4510
--- /dev/null
+++ b/taskcluster/scripts/misc/wrench-macos-build.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+set -x -e -v
+
+source ${GECKO_PATH}/taskcluster/scripts/misc/wr-macos-cross-build-setup.sh
+
+# The osmesa-src build which we do as part of the headless build below
+# doesn't seem to always use CFLAGS/CXXFLAGS where expected. Instead we
+# just squash those flags into CC/CXX and everything works out.
+# Export HOST_CC and HOST_CXX without the squashed flags, so that host
+# builds use them and don't see the target flags.
+export HOST_CC="${CC}"
+export HOST_CXX="${CXX}"
+CFLAGS_VAR="CFLAGS_${TARGET_TRIPLE//-/_}"
+CXXFLAGS_VAR="CXXFLAGS_${TARGET_TRIPLE//-/_}"
+export CC="${CC} ${!CFLAGS_VAR}"
+export ${CFLAGS_VAR}=
+export CXX="${CXX} ${!CXXFLAGS_VAR}"
+export ${CXXFLAGS_VAR}=
+
+export MESON_CROSSFILE=${GECKO_PATH}/gfx/wr/ci-scripts/etc/wr-darwin.meson
+export UPLOAD_DIR="${HOME}/artifacts"
+mkdir -p "${UPLOAD_DIR}"
+
+# Do a cross-build without the `headless` feature
+pushd "${GECKO_PATH}/gfx/wr/wrench"
+cargo build --release -vv --frozen --target=${TARGET_TRIPLE}
+# Package up the resulting wrench binary
+cd "../target/${TARGET_TRIPLE}"
+mkdir -p wrench-macos/bin
+mv release/wrench wrench-macos/bin/
+tar cjf wrench-macos.tar.bz2 wrench-macos
+mv wrench-macos.tar.bz2 "${UPLOAD_DIR}"
+# Clean the build
+cd "${GECKO_PATH}/gfx/wr"
+rm -rf target
+popd
+
+# Do a cross-build with the `headless` feature
+pushd "${GECKO_PATH}/gfx/wr/wrench"
+cargo build --release -vv --frozen --target=${TARGET_TRIPLE} --features headless
+# Package up the wrench binary and some libraries that we will need
+cd "../target/${TARGET_TRIPLE}"
+
+# Copy the native macOS libLLVM as dynamic dependency
+cp "${MOZ_FETCHES_DIR}/clang-mac/clang/lib/libLLVM.dylib" release/build/osmesa-src*/out/mesa/src/gallium/targets/osmesa/
+
+mkdir wrench-macos-headless
+mv release wrench-macos-headless/
+tar cjf wrench-macos-headless.tar.bz2 \
+ wrench-macos-headless/release/wrench \
+ wrench-macos-headless/release/build/osmesa-src*/out/mesa/src/gallium/targets/osmesa \
+ wrench-macos-headless/release/build/osmesa-src*/out/mesa/src/mapi/shared-glapi
+mv wrench-macos-headless.tar.bz2 "${UPLOAD_DIR}"
+
+# Clean the build
+cd "${GECKO_PATH}/gfx/wr"
+rm -rf target
+popd
diff --git a/taskcluster/scripts/misc/wrench-windows-tests.sh b/taskcluster/scripts/misc/wrench-windows-tests.sh
new file mode 100644
index 0000000000..52b3a32173
--- /dev/null
+++ b/taskcluster/scripts/misc/wrench-windows-tests.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+set -x -e -v
+
+# This script runs the windows CI scripts for standalone WebRender. The CI
+# scripts build WebRender in various "standalone" (without Gecko)
+# configurations and also run WebRender's reftest suite using the `wrench`
+# tool in the WebRender repository.
+# The builds involved require a number of dependencies to be available,
+# which is all handled below.
+
+cd $GECKO_PATH
+
+export PATH=$PATH:$(cd $MOZ_FETCHES_DIR && pwd)/rustc/bin
+
+. taskcluster/scripts/misc/vs-setup.sh
+
+# Move the wrench-deps vendored crates into place
+mv ${MOZ_FETCHES_DIR}/wrench-deps/{vendor,.cargo} gfx/wr
+cd gfx/wr
+
+# This is needed for the WebRender standalone reftests
+powershell.exe 'iex (Get-Content -Raw ci-scripts\set-screenresolution.ps1); Set-ScreenResolution 1920 1080'
+
+# Run the CI scripts
+export CARGOFLAGS='--verbose --frozen'
+cmd.exe /c 'ci-scripts\windows-tests.cmd'
+
+. $GECKO_PATH/taskcluster/scripts/misc/vs-cleanup.sh
diff --git a/taskcluster/scripts/misc/zstdpy b/taskcluster/scripts/misc/zstdpy
new file mode 100755
index 0000000000..7fc27fded6
--- /dev/null
+++ b/taskcluster/scripts/misc/zstdpy
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""This script compresses and decompresses data using the zstandard compression
+format, as provided by the python-zstandard module.
+
+Data is provided on stdin and output on stdout."""
+
+import sys
+import zstandard
+from argparse import ArgumentParser
+
+
+def main(argv=None):
+ parser = ArgumentParser(description=__doc__)
+ parser.set_defaults(mode="compress")
+ parser.add_argument(
+ "-z",
+ "--compress",
+ dest="mode",
+ action="store_const",
+ const="compress",
+ help="compress the data (this is the default)",
+ )
+ parser.add_argument(
+ "-d",
+ "--decompress",
+ dest="mode",
+ action="store_const",
+ const="decompress",
+ help="decompress the data",
+ )
+ parser.add_argument(
+ "-T",
+ "--threads",
+ dest="threads",
+ default=1,
+ type=int,
+ help="Compress using # working threads. If 0, use number of CPUs on the system. (default 1)",
+ )
+ parser.add_argument(
+ "-l",
+ "--level",
+ dest="level",
+ default=3,
+ type=int,
+ help="Compression level from 1-22 (default 3)",
+ )
+ parser.add_argument(
+ "file",
+ nargs="?",
+ help="File to compress/decompress. Default is stdin.",
+ )
+
+ args = parser.parse_args(argv)
+
+ # The zstd commandline tool uses 0 to specify number of threads equal to
+ # the number of CPUs whereas the python module uses negative numbers to
+ # flag this behavior. Emulate the zstd commandline utility's behavior here
+ if args.threads == 0:
+ args.threads = -1
+
+ if args.file:
+ in_file = open(args.file, "rb")
+ else:
+ in_file = sys.stdin.buffer
+
+ if args.mode == "compress":
+ ctx = zstandard.ZstdCompressor(level=args.level, threads=args.threads)
+ elif args.mode == "decompress":
+ ctx = zstandard.ZstdDecompressor()
+
+ ctx.copy_stream(in_file, sys.stdout.buffer)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/taskcluster/scripts/run-task b/taskcluster/scripts/run-task
new file mode 100755
index 0000000000..fb0d32f8f5
--- /dev/null
+++ b/taskcluster/scripts/run-task
@@ -0,0 +1,1005 @@
+#!/usr/bin/python3 -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Run a task after performing common actions.
+
+This script is meant to be the "driver" for TaskCluster based tasks.
+It receives some common arguments to control the run-time environment.
+
+It performs actions as requested from the arguments. Then it executes
+the requested process and prints its output, prefixing it with the
+current time to improve log usefulness.
+"""
+
+import sys
+
+
+if sys.version_info[0:2] < (3, 5):
+ print('run-task requires Python 3.5+')
+ sys.exit(1)
+
+
+import argparse
+import datetime
+import errno
+import io
+import json
+import os
+import random
+import re
+import shutil
+import signal
+import socket
+import stat
+import subprocess
+
+import urllib.error
+import urllib.request
+
+from threading import Thread
+
+FINGERPRINT_URL = 'http://taskcluster/secrets/v1/secret/project/taskcluster/gecko/hgfingerprint'
+FALLBACK_FINGERPRINT = {
+ 'fingerprints':
+ "sha256:4D:EB:21:6E:35:2F:99:C6:8F:C3:47:9B:57:B8:6C:17:15:8F:86:09:D4:6C:17:1D:87:B0:DE:F9:0E:51:70:FC,"
+ "sha256:90:85:39:A8:4F:47:20:58:98:0D:48:4D:8A:AC:71:DB:5C:AF:76:44:F1:B1:3E:56:92:FF:21:8C:C9:A9:F7:11"
+}
+
+HGMOINTERNAL_CONFIG_URL = 'http://taskcluster/secrets/v1/secret/project/taskcluster/gecko/hgmointernal'
+
+CACHE_UID_GID_MISMATCH = '''
+There is a UID/GID mismatch on the cache. This likely means:
+
+a) different tasks are running as a different user/group
+b) different Docker images have different UID/GID for the same user/group
+
+Our cache policy is that the UID/GID for ALL tasks must be consistent
+for the lifetime of the cache. This eliminates permissions problems due
+to file/directory user/group ownership.
+
+To make this error go away, ensure that all Docker images are use
+a consistent UID/GID and that all tasks using this cache are running as
+the same user/group.
+'''
+
+
+NON_EMPTY_VOLUME = '''
+error: volume %s is not empty
+
+Our Docker image policy requires volumes to be empty.
+
+The volume was likely populated as part of building the Docker image.
+Change the Dockerfile and anything run from it to not create files in
+any VOLUME.
+
+A lesser possibility is that you stumbled upon a TaskCluster platform bug
+where it fails to use new volumes for tasks.
+'''
+
+
+FETCH_CONTENT_NOT_FOUND = '''
+error: fetch-content script not found
+
+The script at `taskcluster/scripts/misc/fetch-content` could not be
+detected in the current environment.
+
+If this task clones gecko, make sure the GECKO_PATH environment variable
+is set to proper location. Otherwise, the script may need to be mounted
+or added to the task's docker image then added to the PATH.
+'''
+
+# The exit code to use when caches should be purged and the task retried.
+# This is EX_OSFILE (from sysexits.h):
+# Some system file does not exist, cannot be opened, or has some
+# sort of error (e.g., syntax error).
+EXIT_PURGE_CACHE = 72
+
+
+IS_MACOSX = sys.platform == 'darwin'
+IS_POSIX = os.name == 'posix'
+IS_WINDOWS = os.name == 'nt'
+
+
+def print_line(prefix, m):
+ now = datetime.datetime.utcnow().isoformat().encode('utf-8')
+ # slice microseconds to 3 decimals.
+ now = now[:-3] if now[-7:-6] == b'.' else now
+ bytes = b'[%s %sZ] %s' % (prefix, now, m)
+ written = 0
+ while written < len(bytes):
+ written += (sys.stdout.buffer.write(bytes[written:]) or 0)
+ sys.stdout.buffer.flush()
+
+
+def run_and_prefix_output(prefix, args, *, extra_env=None, cwd=None):
+ """Runs a process and prefixes its output with the time.
+
+ Returns the process exit code.
+ """
+ print_line(
+ prefix,
+ b"executing %r%s\n" % (args, b"in %s" % (cwd.encode("utf-8"),) if cwd else b""),
+ )
+
+ env = dict(os.environ)
+ env.update(extra_env or {})
+
+ # Note: TaskCluster's stdin is a TTY. This attribute is lost
+ # when we pass sys.stdin to the invoked process. If we cared
+ # to preserve stdin as a TTY, we could make this work. But until
+ # someone needs it, don't bother.
+
+ # We want stdout to be bytes on Python 3. That means we can't use
+ # universal_newlines=True (because it implies text mode). But
+ # p.stdout.readline() won't work for bytes text streams. So, on Python 3,
+ # we manually install a latin1 stream wrapper. This allows us to readline()
+ # and preserves bytes, without losing any data.
+
+ p = subprocess.Popen(args,
+ # Disable buffering because we want to receive output
+ # as it is generated so timestamps in logs are
+ # accurate.
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ stdin=sys.stdin.fileno(),
+ env=env,
+ cwd=cwd)
+
+ stdout = io.TextIOWrapper(p.stdout, encoding='latin1')
+
+ while True:
+ data = stdout.readline().encode('latin1')
+
+ if data == b'':
+ break
+
+ print_line(prefix, data)
+
+ return p.wait()
+
+
+def get_posix_user_group(user, group):
+ import grp
+ import pwd
+
+ try:
+ user_record = pwd.getpwnam(user)
+ except KeyError:
+ print('could not find user %s; specify a valid user with --user' % user)
+ sys.exit(1)
+
+ try:
+ group_record = grp.getgrnam(group)
+ except KeyError:
+ print('could not find group %s; specify a valid group with --group' %
+ group)
+ sys.exit(1)
+
+ # Most tasks use worker:worker. We require they have a specific numeric ID
+ # because otherwise it is too easy for files written to caches to have
+ # mismatched numeric IDs, which results in permissions errors.
+ if user_record.pw_name == 'worker' and user_record.pw_uid != 1000:
+ print('user `worker` must have uid=1000; got %d' % user_record.pw_uid)
+ sys.exit(1)
+
+ if group_record.gr_name == 'worker' and group_record.gr_gid != 1000:
+ print('group `worker` must have gid=1000; got %d' % group_record.gr_gid)
+ sys.exit(1)
+
+ # Find all groups to which this user is a member.
+ gids = [g.gr_gid for g in grp.getgrall() if group in g.gr_mem]
+
+ return user_record, group_record, gids
+
+
+def write_audit_entry(path, msg):
+ now = datetime.datetime.utcnow().isoformat().encode('utf-8')
+ with open(path, 'ab') as fh:
+ fh.write(b'[%sZ %s] %s\n' % (
+ now, os.environb.get(b'TASK_ID', b'UNKNOWN'), msg))
+
+
+WANTED_DIR_MODE = stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
+
+
+def set_dir_permissions(path, uid, gid):
+ st = os.lstat(path)
+
+ if st.st_uid != uid or st.st_gid != gid:
+ os.chown(path, uid, gid)
+
+ # Also make sure dirs are writable in case we need to delete
+ # them.
+ if st.st_mode & WANTED_DIR_MODE != WANTED_DIR_MODE:
+ os.chmod(path, st.st_mode | WANTED_DIR_MODE)
+
+
+def chown_recursive(path, user, group, uid, gid):
+ print_line(b'chown',
+ b'recursively changing ownership of %s to %s:%s\n' %
+ (path.encode('utf-8'), user.encode('utf-8'), group.encode(
+ 'utf-8')))
+
+ set_dir_permissions(path, uid, gid)
+
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ set_dir_permissions(os.path.join(root, d), uid, gid)
+
+ for f in files:
+ # File may be a symlink that points to nowhere. In which case
+ # os.chown() would fail because it attempts to follow the
+ # symlink. We only care about directory entries, not what
+ # they point to. So setting the owner of the symlink should
+ # be sufficient.
+ os.lchown(os.path.join(root, f), uid, gid)
+
+
+def configure_cache_posix(cache, user, group,
+ untrusted_caches, running_as_root):
+ """Configure a cache path on POSIX platforms.
+
+ For each cache, we write out a special file denoting attributes and
+ capabilities of run-task and the task being executed. These attributes
+ are used by subsequent run-task invocations to validate that use of
+ the cache is acceptable.
+
+ We /could/ blow away the cache data on requirements mismatch.
+ While this would be convenient, this could result in "competing" tasks
+ effectively undoing the other's work. This would slow down task
+ execution in aggregate. Without monitoring for this, people may not notice
+ the problem and tasks would be slower than they could be. We follow the
+ principle of "fail fast" to ensure optimal task execution.
+
+ We also write an audit log of who used the caches. This log is printed
+ during failures to help aid debugging.
+ """
+
+ our_requirements = {
+ # Include a version string that we can bump whenever to trigger
+ # fresh caches. The actual value is not relevant and doesn't need
+ # to follow any explicit order. Since taskgraph bakes this file's
+ # hash into cache names, any change to this file/version is sufficient
+ # to force the use of a new cache.
+ b'version=1',
+ # Include the UID and GID the task will run as to ensure that tasks
+ # with different UID and GID don't share the same cache.
+ b'uid=%d' % user.pw_uid,
+ b'gid=%d' % group.gr_gid,
+ }
+
+ requires_path = os.path.join(cache, '.cacherequires')
+ audit_path = os.path.join(cache, '.cachelog')
+
+ # The cache is empty. Configure it.
+ if not os.listdir(cache):
+ print_line(b'cache', b'cache %s is empty; writing requirements: '
+ b'%s\n' % (
+ cache.encode('utf-8'), b' '.join(sorted(our_requirements))))
+
+ # We write a requirements file so future invocations know what the
+ # requirements are.
+ with open(requires_path, 'wb') as fh:
+ fh.write(b'\n'.join(sorted(our_requirements)))
+
+ # And make it read-only as a precaution against deletion.
+ os.chmod(requires_path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
+
+ write_audit_entry(audit_path,
+ b'created; requirements: %s' %
+ b', '.join(sorted(our_requirements)))
+
+ set_dir_permissions(cache, user.pw_uid, group.gr_gid)
+ return
+
+ # The cache has content and we have a requirements file. Validate
+ # requirements alignment.
+ if os.path.exists(requires_path):
+ with open(requires_path, 'rb') as fh:
+ wanted_requirements = set(fh.read().splitlines())
+
+ print_line(b'cache', b'cache %s exists; requirements: %s\n' % (
+ cache.encode('utf-8'), b' '.join(sorted(wanted_requirements))))
+
+ missing = wanted_requirements - our_requirements
+
+ # Allow requirements mismatch for uid/gid if and only if caches
+ # are untrusted. This allows cache behavior on Try to be
+ # reasonable. Otherwise, random tasks could "poison" cache
+ # usability by introducing uid/gid mismatches. For untrusted
+ # environments like Try, this is a perfectly reasonable thing to
+ # allow.
+ if missing and untrusted_caches and running_as_root and \
+ all(s.startswith((b'uid=', b'gid=')) for s in missing):
+ print_line(b'cache',
+ b'cache %s uid/gid mismatch; this is acceptable '
+ b'because caches for this task are untrusted; '
+ b'changing ownership to facilitate cache use\n' %
+ cache.encode('utf-8'))
+ chown_recursive(cache, user.pw_name, group.gr_name, user.pw_uid,
+ group.gr_gid)
+
+ # And write out the updated reality.
+ with open(requires_path, 'wb') as fh:
+ fh.write(b'\n'.join(sorted(our_requirements)))
+
+ write_audit_entry(audit_path,
+ b'chown; requirements: %s' %
+ b', '.join(sorted(our_requirements)))
+
+ elif missing:
+ print('error: requirements for populated cache %s differ from '
+ 'this task' % cache)
+ print('cache requirements: %s' % ' '.join(sorted(
+ s.decode('utf-8') for s in wanted_requirements)))
+ print('our requirements: %s' % ' '.join(sorted(
+ s.decode('utf-8') for s in our_requirements)))
+ if any(s.startswith((b'uid=', b'gid=')) for s in missing):
+ print(CACHE_UID_GID_MISMATCH)
+
+ write_audit_entry(audit_path,
+ b'requirements mismatch; wanted: %s' %
+ b', '.join(sorted(our_requirements)))
+
+ print('')
+ print('audit log:')
+ with open(audit_path, 'r') as fh:
+ print(fh.read())
+
+ return True
+ else:
+ write_audit_entry(audit_path, b'used')
+
+ # We don't need to adjust permissions here because the cache is
+ # associated with a uid/gid and the first task should have set
+ # a proper owner/group.
+
+ return
+
+ # The cache has content and no requirements file. This shouldn't
+ # happen because run-task should be the first thing that touches a
+ # cache.
+ print('error: cache %s is not empty and is missing a '
+ '.cacherequires file; the cache names for this task are '
+ 'likely mis-configured or TASKCLUSTER_CACHES is not set '
+ 'properly' % cache)
+
+ write_audit_entry(audit_path, b'missing .cacherequires')
+ return True
+
+
+def configure_volume_posix(volume, user, group, running_as_root):
+ # The only time we should see files in the volume is if the Docker
+ # image build put files there.
+ #
+ # For the sake of simplicity, our policy is that volumes should be
+ # empty. This also has the advantage that an empty volume looks
+ # a lot like an empty cache. Tasks can rely on caches being
+ # swapped in and out on any volume without any noticeable change
+ # of behavior.
+ volume_files = os.listdir(volume)
+ if volume_files:
+ print(NON_EMPTY_VOLUME % volume)
+ print('entries in root directory: %s' %
+ ' '.join(sorted(volume_files)))
+ sys.exit(1)
+
+ # The volume is almost certainly owned by root:root. Chown it so it
+ # is writable.
+
+ if running_as_root:
+ print_line(b'volume', b'changing ownership of volume %s '
+ b'to %d:%d\n' % (volume.encode('utf-8'),
+ user.pw_uid,
+ group.gr_gid))
+ set_dir_permissions(volume, user.pw_uid, group.gr_gid)
+
+
+def vcs_checkout(source_repo, dest, store_path,
+ base_repo=None, revision=None, branch=None,
+ fetch_hgfingerprint=False, sparse_profile=None):
+ # Specify method to checkout a revision. This defaults to revisions as
+ # SHA-1 strings, but also supports symbolic revisions like `tip` via the
+ # branch flag.
+ if revision:
+ revision_flag = '--revision'
+ revision_value = revision
+ elif branch:
+ revision_flag = '--branch'
+ revision_value = branch
+ else:
+ print('revision is not specified for checkout')
+ sys.exit(1)
+
+ if IS_MACOSX or IS_POSIX:
+ hg_bin = 'hg'
+ elif IS_WINDOWS:
+ # This is where OCC installs it in the AMIs.
+ hg_bin = r'C:\Program Files\Mercurial\hg.exe'
+ if not os.path.exists(hg_bin):
+ print('could not find Mercurial executable: %s' % hg_bin)
+ sys.exit(1)
+
+ store_path = os.path.abspath(store_path)
+ args = [
+ hg_bin,
+ 'robustcheckout',
+ '--sharebase', store_path,
+ '--purge',
+ ]
+
+ # Obtain certificate fingerprints. Without this, the checkout will use the fingerprint
+ # on the system, which is managed some other way (such as puppet)
+ if fetch_hgfingerprint:
+ try:
+ print_line(b'vcs', b'fetching hg.mozilla.org fingerprint from %s\n' %
+ FINGERPRINT_URL.encode('utf-8'))
+ res = urllib.request.urlopen(FINGERPRINT_URL, timeout=10)
+ secret = res.read()
+ try:
+ secret = json.loads(secret.decode('utf-8'))
+ except ValueError:
+ print_line(b'vcs', b'invalid JSON in hg fingerprint secret')
+ sys.exit(1)
+ except (urllib.error.URLError, socket.timeout):
+ print_line(b'vcs', b'Unable to retrieve current hg.mozilla.org fingerprint'
+ b'using the secret service, using fallback instead.')
+ # XXX This fingerprint will not be accurate if running on an old
+ # revision after the server fingerprint has changed.
+ secret = {'secret': FALLBACK_FINGERPRINT}
+
+ hgmo_fingerprint = secret['secret']['fingerprints']
+ args.extend([
+ '--config', 'hostsecurity.hg.mozilla.org:fingerprints=%s' % hgmo_fingerprint,
+ ])
+
+ if base_repo:
+ args.extend(['--upstream', base_repo])
+ if sparse_profile:
+ args.extend(['--sparseprofile', sparse_profile])
+
+ dest = os.path.abspath(dest)
+ args.extend([
+ revision_flag, revision_value,
+ source_repo, dest,
+ ])
+
+ res = run_and_prefix_output(b'vcs', args,
+ extra_env={'PYTHONUNBUFFERED': '1'})
+ if res:
+ sys.exit(res)
+
+ # Update the current revision hash and ensure that it is well formed.
+ revision = subprocess.check_output(
+ [hg_bin, 'log',
+ '--rev', '.',
+ '--template', '{node}'],
+ cwd=dest,
+ # Triggers text mode on Python 3.
+ universal_newlines=True)
+
+ assert re.match('^[a-f0-9]{40}$', revision)
+
+ msg = ("TinderboxPrint:<a href={source_repo}/rev/{revision} "
+ "title='Built from {repo_name} revision {revision}'>"
+ "{revision}</a>\n".format(revision=revision,
+ source_repo=source_repo,
+ repo_name=source_repo.split('/')[-1]))
+
+ print_line(b'vcs', msg.encode('utf-8'))
+
+ return revision
+
+
+def fetch_artifacts():
+ print_line(b'fetches', b'fetching artifacts\n')
+
+ fetch_content = shutil.which('fetch-content')
+ if not fetch_content and os.environ.get('GECKO_PATH'):
+ fetch_content = os.path.join(os.environ['GECKO_PATH'], 'taskcluster',
+ 'scripts', 'misc', 'fetch-content')
+
+ if not fetch_content or not os.path.isfile(fetch_content):
+ fetch_content = os.path.join(os.path.dirname(__file__),
+ 'fetch-content')
+
+ if not os.path.isfile(fetch_content):
+ print(FETCH_CONTENT_NOT_FOUND)
+ sys.exit(1)
+
+ cmd = [sys.executable, '-u', fetch_content, 'task-artifacts']
+ res = run_and_prefix_output(b'fetches', cmd)
+ if res:
+ sys.exit(res)
+
+ print_line(b'fetches', b'finished fetching artifacts\n')
+
+
+def add_vcs_arguments(parser, project, name):
+ """Adds arguments to ArgumentParser to control VCS options for a project."""
+
+ parser.add_argument('--%s-checkout' % project,
+ help='Directory where %s checkout should be created' %
+ name)
+ parser.add_argument('--%s-sparse-profile' % project,
+ help='Path to sparse profile for %s checkout' % name)
+
+
+def resolve_checkout_url(base_repo, head_repo):
+ """Resolve the Mercurial URL to perform a checkout against, either the
+ public hg.mozilla.org service or a CI-only regional mirror.
+
+ The config will be of the form:
+ {
+ "aws/us-west-2": { # key built from `TASKCLUSTER_WORKER_LOCATION` variable
+ "rate": 0.5,
+ "domain": "us-west-2.hgmointernal.net"
+ },
+ "google/us-central1": {...}
+ }
+ """
+ worker_location = os.getenv('TASKCLUSTER_WORKER_LOCATION')
+ if not worker_location:
+ print_line(b'vcs', b'TASKCLUSTER_WORKER_LOCATION environment variable not set; '
+ b'using public hg.mozilla.org service\n')
+ return base_repo, head_repo
+
+ try:
+ worker_location = json.loads(worker_location)
+ except json.JSONDecodeError:
+ print_line(b'vcs', b'Could not decode TASKCLUSTER_WORKER_LOCATION environment variable '
+ b'as JSON. Content: %s\n' % worker_location.encode('utf-8'))
+ print_line(b'vcs', b'using public hg.mozilla.org service\n')
+ return base_repo, head_repo
+
+ if 'cloud' not in worker_location or 'region' not in worker_location:
+ print_line(b'vcs', b'TASKCLUSTER_WORKER_LOCATION missing required keys; '
+ b'using public hg.mozilla.org service\n')
+ return base_repo, head_repo
+
+ config_key = '%(cloud)s/%(region)s' % worker_location
+
+ try:
+ print_line(b'vcs', b'fetching hgmointernal config from %s\n' %
+ HGMOINTERNAL_CONFIG_URL.encode('utf-8'))
+
+ # Get the hgmointernal config Taskcluster secret
+ res = urllib.request.urlopen(HGMOINTERNAL_CONFIG_URL, timeout=10)
+ hgmointernal_config = json.loads(res.read().decode('utf-8'))['secret']
+
+ # Use public hg service if region not yet supported
+ if config_key not in hgmointernal_config:
+ print_line(b'vcs', b'region %s not yet supported; using public '
+ b'hg.mozilla.org service\n' % config_key.encode('utf-8'))
+
+ return base_repo, head_repo
+
+ # Only send a percentage of traffic to the internal mirror
+ rate = float(hgmointernal_config[config_key]['rate'])
+
+ if random.random() > rate:
+ print_line(b'vcs', b'hgmointernal rate miss; using '
+ b'public hg.mozilla.org service\n')
+ return base_repo, head_repo
+
+ print_line(b'vcs', b'hgmointernal rate hit; cloning from '
+ b'private hgweb mirror\n')
+
+ mirror_domain = hgmointernal_config[config_key]['domain']
+
+ if base_repo and base_repo.startswith('https://hg.mozilla.org'):
+ base_repo = base_repo.replace('hg.mozilla.org', mirror_domain, 1)
+
+ if head_repo and head_repo.startswith('https://hg.mozilla.org'):
+ head_repo = head_repo.replace('hg.mozilla.org', mirror_domain, 1)
+
+ return base_repo, head_repo
+
+ except (KeyError, ValueError):
+ print_line(b'vcs', b'invalid JSON in hgmointernal config; '
+ b'falling back to public hg.mozilla.org service\n')
+
+ except (urllib.error.URLError, socket.timeout):
+ print_line(b'vcs', b'Unable to retrieve hgmointernal config using '
+ b'the secret service; falling back to public hg.mozilla.org '
+ b'service\n')
+
+ return base_repo, head_repo
+
+
+def collect_vcs_options(args, project):
+ checkout = getattr(args, '%s_checkout' % project)
+ sparse_profile = getattr(args, '%s_sparse_profile' % project)
+
+ env_prefix = project.upper()
+
+ base_repo = os.environ.get('%s_BASE_REPOSITORY' % env_prefix)
+ head_repo = os.environ.get('%s_HEAD_REPOSITORY' % env_prefix)
+ revision = os.environ.get('%s_HEAD_REV' % env_prefix)
+ branch = os.environ.get('%s_HEAD_REF' % env_prefix)
+
+ store_path = os.environ.get('HG_STORE_PATH')
+
+ # Expand ~ in some paths.
+ if checkout:
+ checkout = os.path.expanduser(checkout)
+ if store_path:
+ store_path = os.path.expanduser(store_path)
+
+ # Some callers set the base repository to mozilla-central for historical
+ # reasons. Switch to mozilla-unified because robustcheckout works best
+ # with it.
+ if base_repo == 'https://hg.mozilla.org/mozilla-central':
+ base_repo = 'https://hg.mozilla.org/mozilla-unified'
+
+ # No need to check the hgmointernal config if we aren't performing
+ # a checkout.
+ if checkout:
+ base_repo, head_repo = resolve_checkout_url(base_repo, head_repo)
+
+ return {
+ 'store-path': store_path,
+ 'project': project,
+ 'env-prefix': env_prefix,
+ 'checkout': checkout,
+ 'sparse-profile': sparse_profile,
+ 'base-repo': base_repo,
+ 'head-repo': head_repo,
+ 'revision': revision,
+ 'branch': branch,
+ }
+
+
+def vcs_checkout_from_args(args, project):
+ options = collect_vcs_options(args, project)
+
+ if not options['checkout']:
+ if options['branch'] and not options['revision']:
+ print('task should be defined in terms of non-symbolic revision')
+ sys.exit(1)
+ return
+
+ os.environ['%s_HEAD_REV' % options['env-prefix']] = vcs_checkout(
+ options['head-repo'],
+ options['checkout'],
+ options['store-path'],
+ base_repo=options['base-repo'],
+ revision=options['revision'],
+ fetch_hgfingerprint=args.fetch_hgfingerprint,
+ branch=options['branch'],
+ sparse_profile=options['sparse-profile'])
+
+
+def maybe_run_resource_monitoring():
+ """Run the resource monitor if available.
+
+ Discussion in https://github.com/taskcluster/taskcluster-rfcs/pull/160
+ and https://bugzil.la/1648051
+ """
+ if 'MOZ_FETCHES' not in os.environ:
+ return
+ if 'RESOURCE_MONITOR_OUTPUT' not in os.environ:
+ return
+
+ prefix = b'resource_monitor'
+
+ executable = '{}/resource-monitor/resource-monitor{}'.format(
+ os.environ.get('MOZ_FETCHES_DIR'), '.exe' if IS_WINDOWS else '')
+
+ if not os.path.exists(executable) or not os.access(executable, os.X_OK):
+ print_line(prefix, b"%s not executable\n" % executable.encode('utf-8'))
+ return
+ args = [
+ executable,
+ '-process',
+ str(os.getpid()),
+ '-output',
+ os.environ["RESOURCE_MONITOR_OUTPUT"],
+ ]
+ print_line(prefix, b"Resource monitor starting: %s\n" % str(args).encode('utf-8'))
+ # Avoid environment variables the payload doesn't need.
+ del os.environ['RESOURCE_MONITOR_OUTPUT']
+
+ # Without CREATE_NEW_PROCESS_GROUP Windows signals will attempt to kill run-task, too.
+ process = subprocess.Popen(args,
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ creationflags=subprocess.CREATE_NEW_PROCESS_GROUP if IS_WINDOWS else 0,
+ cwd=os.getcwd())
+
+ def capture_output():
+ fh = io.TextIOWrapper(process.stdout, encoding='latin1')
+ while True:
+ data = fh.readline().encode('latin1')
+ if data == b'':
+ break
+ print_line(prefix, data)
+
+ monitor_process = Thread(target=capture_output)
+ monitor_process.start()
+ return process
+
+
+def main(args):
+ print_line(b'setup', b'run-task started in %s\n' % os.getcwd().encode('utf-8'))
+ running_as_root = IS_POSIX and os.getuid() == 0
+
+ # Set a reasonable limit to the number of open files.
+ # Running under docker inherits the system defaults, which are not subject
+ # to the "standard" limits set by pam_limits.so, and while they work well
+ # for servers that may receive a lot of connections, they cause performance
+ # problems for things that close file descriptors before forking (for good
+ # reasons), like python's `subprocess.Popen(..., close_fds=True)` (and while
+ # the default was close_fds=False in python2, that changed in python3).
+ # In some cases, Firefox does the same thing when spawning subprocesses.
+ # Processes spawned by this one will inherit the limit set here.
+ try:
+ import resource
+ # Keep the hard limit the same, though, allowing processes to change their
+ # soft limit if they need to (Firefox does, for instance).
+ (soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
+ limit = os.environ.get('MOZ_LIMIT_NOFILE')
+ if limit:
+ limit = int(limit)
+ else:
+ # If no explicit limit is given, use 1024 if it's less than the current
+ # soft limit. For instance, the default on macOS is 256, so we'd pick
+ # that rather than 1024.
+ limit = min(soft, 1024)
+ # Now apply the limit, if it's different from the original one.
+ if limit != soft:
+ resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))
+ except ImportError:
+ # The resource module is UNIX only.
+ pass
+
+ # Arguments up to '--' are ours. After are for the main task
+ # to be executed.
+ try:
+ i = args.index('--')
+ our_args = args[0:i]
+ task_args = args[i + 1:]
+ except ValueError:
+ our_args = args
+ task_args = []
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--user', default='worker', help='user to run as')
+ parser.add_argument('--group', default='worker', help='group to run as')
+ parser.add_argument('--task-cwd', help='directory to run the provided command in')
+
+ add_vcs_arguments(parser, 'gecko', 'Firefox')
+ add_vcs_arguments(parser, 'comm', 'Comm')
+
+ parser.add_argument('--fetch-hgfingerprint', action='store_true',
+ help='Fetch the latest hgfingerprint from the secrets store, '
+ 'using the taskclsuerProxy')
+
+ args = parser.parse_args(our_args)
+
+ uid = gid = gids = None
+ if IS_POSIX and running_as_root:
+ user, group, gids = get_posix_user_group(args.user, args.group)
+ uid = user.pw_uid
+ gid = group.gr_gid
+
+ if running_as_root and os.path.exists("/dev/kvm"):
+ # Ensure kvm permissions for worker, required for Android x86
+ st = os.stat("/dev/kvm")
+ os.chmod("/dev/kvm", st.st_mode | 0o666)
+
+ # Validate caches.
+ #
+ # Taskgraph should pass in a list of paths that are caches via an
+ # environment variable (which we don't want to pass down to child
+ # processes).
+
+ if 'TASKCLUSTER_CACHES' in os.environ:
+ caches = os.environ['TASKCLUSTER_CACHES'].split(';')
+ del os.environ['TASKCLUSTER_CACHES']
+ else:
+ caches = []
+
+ if 'TASKCLUSTER_UNTRUSTED_CACHES' in os.environ:
+ untrusted_caches = True
+ del os.environ['TASKCLUSTER_UNTRUSTED_CACHES']
+ else:
+ untrusted_caches = False
+
+ for cache in caches:
+ if not os.path.isdir(cache):
+ print('error: cache %s is not a directory; this should never '
+ 'happen' % cache)
+ return 1
+
+ if running_as_root:
+ purge = configure_cache_posix(cache, user, group, untrusted_caches,
+ running_as_root)
+
+ if purge:
+ return EXIT_PURGE_CACHE
+
+ if 'TASKCLUSTER_VOLUMES' in os.environ:
+ volumes = os.environ['TASKCLUSTER_VOLUMES'].split(';')
+ del os.environ['TASKCLUSTER_VOLUMES']
+ else:
+ volumes = []
+
+ if volumes and not IS_POSIX:
+ print('assertion failed: volumes not expected on Windows')
+ return 1
+
+ # Sanitize volumes.
+ for volume in volumes:
+ # If a volume is a cache, it was dealt with above.
+ if volume in caches:
+ print_line(b'volume', b'volume %s is a cache\n' %
+ volume.encode('utf-8'))
+ continue
+
+ if running_as_root:
+ configure_volume_posix(volume, user, group, running_as_root)
+
+ all_caches_and_volumes = set(map(os.path.normpath, caches))
+ all_caches_and_volumes |= set(map(os.path.normpath, volumes))
+
+ def path_in_cache_or_volume(path):
+ path = os.path.normpath(path)
+
+ while path:
+ if path in all_caches_and_volumes:
+ return True
+
+ path, child = os.path.split(path)
+ if not child:
+ break
+
+ return False
+
+ def prepare_checkout_dir(checkout):
+ if not checkout:
+ return
+
+ # The checkout path becomes the working directory. Since there are
+ # special cache files in the cache's root directory and working
+ # directory purging could blow them away, disallow this scenario.
+ if os.path.exists(os.path.join(checkout, '.cacherequires')):
+ print('error: cannot perform vcs checkout into cache root: %s' %
+ checkout)
+ sys.exit(1)
+
+ # TODO given the performance implications, consider making this a fatal
+ # error.
+ if not path_in_cache_or_volume(checkout):
+ print_line(b'vcs', b'WARNING: vcs checkout path (%s) not in cache '
+ b'or volume; performance will likely suffer\n' %
+ checkout.encode('utf-8'))
+
+ # Ensure the directory for the source checkout exists.
+ try:
+ os.makedirs(os.path.dirname(checkout))
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # And that it is owned by the appropriate user/group.
+ if running_as_root:
+ os.chown(os.path.dirname(checkout), uid, gid)
+
+ def prepare_hg_store_path():
+ # And ensure the shared store path exists and has proper permissions.
+ if 'HG_STORE_PATH' not in os.environ:
+ print('error: HG_STORE_PATH environment variable not set')
+ sys.exit(1)
+
+ store_path = os.environ['HG_STORE_PATH']
+
+ if not path_in_cache_or_volume(store_path):
+ print_line(b'vcs', b'WARNING: HG_STORE_PATH (%s) not in cache or '
+ b'volume; performance will likely suffer\n' %
+ store_path.encode('utf-8'))
+
+ try:
+ os.makedirs(store_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ if running_as_root:
+ os.chown(store_path, uid, gid)
+
+ prepare_checkout_dir(args.gecko_checkout)
+ if args.gecko_checkout or args.comm_checkout:
+ prepare_hg_store_path()
+
+ if IS_POSIX and running_as_root:
+ # Drop permissions to requested user.
+ # This code is modeled after what `sudo` was observed to do in a Docker
+ # container. We do not bother calling setrlimit() because containers have
+ # their own limits.
+ print_line(b'setup', b'running as %s:%s\n' % (
+ args.user.encode('utf-8'), args.group.encode('utf-8')))
+
+ os.setgroups(gids)
+ os.umask(0o22)
+ os.setresgid(gid, gid, gid)
+ os.setresuid(uid, uid, uid)
+
+ vcs_checkout_from_args(args, 'gecko')
+ vcs_checkout_from_args(args, 'comm')
+
+ resource_process = None
+
+ try:
+ for k in ('GECKO_PATH', 'MOZ_FETCHES_DIR', 'UPLOAD_DIR', 'MOZ_PYTHON_HOME'):
+ if k in os.environ:
+ # Normalize paths to use forward slashes. Some shell scripts
+ # tolerate that better on Windows.
+ os.environ[k] = os.path.abspath(os.environ[k]).replace(os.sep, '/')
+ print_line(b'setup', b'%s is %s\n' % (
+ k.encode('utf-8'),
+ os.environ[k].encode('utf-8')))
+
+ if 'MOZ_FETCHES' in os.environ:
+ fetch_artifacts()
+
+ # If Python is a fetch dependency, add it to the PATH and setting
+ # the mozilla-specific MOZ_PYTHON_HOME to relocate binaries.
+ if 'MOZ_PYTHON_HOME' in os.environ:
+
+ print_line(b'setup',
+ b'Setting up local python environment\n')
+ prev = [os.environ['PATH']] if 'PATH' in os.environ else []
+
+ moz_python_home = os.environ['MOZ_PYTHON_HOME']
+ if IS_WINDOWS:
+ ext = '.exe'
+ moz_python_bindir = moz_python_home
+ else:
+ ext = ''
+ moz_python_bindir = moz_python_home + '/bin'
+
+
+ # just a sanity check
+ candidate = os.path.join(moz_python_bindir, f'python3{ext}')
+ if not os.path.exists(candidate):
+ raise RuntimeError("Inconsistent Python installation: "
+ "archive found, but no python3 binary "
+ "detected")
+
+ new = os.environ['PATH'] = os.pathsep.join([moz_python_bindir]
+ + prev)
+
+ # Relocate the python binary. Standard way uses PYTHONHOME, but
+ # this conflicts with system python (e.g. used by hg) so we
+ # maintain a small patch to use MOZPYTHONHOME instead.
+ os.environ['MOZPYTHONHOME'] = moz_python_home
+
+ print_line(b'setup',
+ b'updated PATH with python artifact: '
+ + new.encode() + b'\n')
+
+
+ resource_process = maybe_run_resource_monitoring()
+
+ return run_and_prefix_output(b'task', task_args, cwd=args.task_cwd)
+ finally:
+ if resource_process:
+ print_line(b'resource_monitor', b'terminating\n')
+ if IS_WINDOWS:
+ # .terminate() on Windows is not a graceful shutdown, due to
+ # differences in signals. CTRL_BREAK_EVENT will work provided
+ # the subprocess is in a different process group, so this script
+ # isn't also killed.
+ os.kill(resource_process.pid, signal.CTRL_BREAK_EVENT)
+ else:
+ resource_process.terminate()
+ resource_process.wait()
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/taskcluster/scripts/tester/run-wizard b/taskcluster/scripts/tester/run-wizard
new file mode 100755
index 0000000000..4ec5a5d337
--- /dev/null
+++ b/taskcluster/scripts/tester/run-wizard
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this,
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import datetime
+import os
+import subprocess
+import sys
+import time
+from distutils.spawn import find_executable
+from textwrap import wrap
+
+here = os.path.dirname(os.path.abspath(__file__))
+MOZHARNESS_WORKDIR = os.path.expanduser(os.path.join('~', 'workspace', 'build'))
+
+MACH_SETUP_FINISHED = """
+Mozharness has finished downloading the build and tests to:
+{}
+
+A limited mach environment has also been set up and added to the $PATH, but
+it may be missing the command you need. To see a list of commands, run:
+ $ mach help
+""".lstrip().format(MOZHARNESS_WORKDIR)
+
+MACH_SETUP_FAILED = """
+Could not set up mach environment, no mach binary detected.
+""".lstrip()
+
+
+def call(cmd, **kwargs):
+ print(" ".join(cmd))
+ return subprocess.call(cmd, **kwargs)
+
+
+def wait_for_run_mozharness(timeout=60):
+ starttime = datetime.datetime.now()
+ while datetime.datetime.now() - starttime < datetime.timedelta(seconds=timeout):
+ if os.path.isfile(os.path.join(here, 'run-mozharness')):
+ break
+ time.sleep(0.2)
+ else:
+ print("Timed out after %d seconds waiting for the 'run-mozharness' binary" % timeout)
+ return 1
+
+
+def setup_mach_environment():
+ mach_src = os.path.join(MOZHARNESS_WORKDIR, 'tests', 'mach')
+ if not os.path.isfile(mach_src):
+ return 1
+
+ mach_dest = os.path.expanduser(os.path.join('~', 'bin', 'mach'))
+ if os.path.exists(mach_dest):
+ os.remove(mach_dest)
+ os.symlink(mach_src, mach_dest)
+ return 0
+
+
+def run_mozharness(*args):
+ wait_for_run_mozharness()
+ try:
+ return call(['run-mozharness'] + list(args))
+ finally:
+ setup_mach_environment()
+
+
+def setup():
+ """Run the mozharness script without the 'run-tests' action.
+
+ This will do all the necessary setup steps like creating a virtualenv and
+ downloading the tests and firefox binary. But it stops before running the
+ tests.
+ """
+ status = run_mozharness('--no-run-tests')
+
+ if find_executable('mach'):
+ print(MACH_SETUP_FINISHED)
+ else:
+ print(MACH_SETUP_FAILED)
+
+ return status
+
+
+def clone():
+ """Clone the correct gecko repository and update to the proper revision."""
+ base_repo = os.environ['GECKO_HEAD_REPOSITORY']
+ dest = os.path.expanduser(os.path.join('~', 'gecko'))
+
+ # Specify method to checkout a revision. This defaults to revisions as
+ # SHA-1 strings, but also supports symbolic revisions like `tip` via the
+ # branch flag.
+ if os.environ.get('GECKO_HEAD_REV'):
+ revision_flag = b'--revision'
+ revision = os.environ['GECKO_HEAD_REV']
+ elif os.environ.get('GECKO_HEAD_REF'):
+ revision_flag = b'--branch'
+ revision = os.environ['GECKO_HEAD_REF']
+ else:
+ print('revision is not specified for checkout')
+ return 1
+
+ # TODO Bug 1301382 - pin hg.mozilla.org fingerprint.
+ call([
+ b'/usr/bin/hg', b'robustcheckout',
+ b'--sharebase', os.environ['HG_STORE_PATH'],
+ b'--purge',
+ b'--upstream', b'https://hg.mozilla.org/mozilla-unified',
+ revision_flag, revision,
+ base_repo, dest
+ ])
+ print("Finished cloning to {} at revision {}.".format(dest, revision))
+
+
+def exit():
+ pass
+
+
+OPTIONS = [
+ ('Resume task', run_mozharness,
+ "Resume the original task without modification. This can be useful for "
+ "passively monitoring it from another shell."),
+ ('Setup task', setup,
+ "Setup the task (download the application and tests) but don't run the "
+ "tests just yet. The tests can be run with a custom configuration later. "
+ "This will provide a mach environment (experimental)."),
+ ('Clone gecko', clone,
+ "Perform a clone of gecko using the task's repo and update it to the "
+ "task's revision."),
+ ('Exit', exit, "Exit this wizard and return to the shell.")
+]
+
+
+def _fmt_options():
+ max_line_len = 60
+ max_name_len = max(len(o[0]) for o in OPTIONS)
+
+ # TODO Pad will be off if there are more than 9 options.
+ pad = ' ' * (max_name_len+6)
+
+ msg = []
+ for i, (name, _, desc) in enumerate(OPTIONS):
+ desc = wrap(desc, width=max_line_len)
+ desc = [desc[0]] + [pad + l for l in desc[1:]]
+
+ optstr = '{}) {} - {}\n'.format(
+ i+1, name.ljust(max_name_len), '\n'.join(desc))
+ msg.append(optstr)
+ msg.append("Select one of the above options: ")
+ return '\n'.join(msg)
+
+
+def wizard():
+ print("This wizard can help you get started with some common debugging "
+ "workflows.\nWhat would you like to do?\n")
+ print(_fmt_options(), end="")
+ choice = None
+ while True:
+ choice = raw_input().decode('utf8')
+ try:
+ choice = int(choice)-1
+ if 0 <= choice < len(OPTIONS):
+ break
+ except ValueError:
+ pass
+
+ print("Must provide an integer from 1-{}:".format(len(OPTIONS)))
+
+ func = OPTIONS[choice][1]
+ ret = func()
+
+ print("Use the 'run-wizard' command to start this wizard again.")
+ return ret
+
+
+if __name__ == '__main__':
+ sys.exit(wizard())
diff --git a/taskcluster/scripts/tester/test-linux.sh b/taskcluster/scripts/tester/test-linux.sh
new file mode 100755
index 0000000000..1d87c1f2b8
--- /dev/null
+++ b/taskcluster/scripts/tester/test-linux.sh
@@ -0,0 +1,287 @@
+#! /bin/bash -xe
+
+set -x -e
+
+echo "running as" $(id)
+
+# Detect distribution
+. /etc/os-release
+if [ "${ID}" == "ubuntu" ]; then
+ DISTRIBUTION="Ubuntu"
+elif [ "${ID}" == "debian" ]; then
+ DISTRIBUTION="Debian"
+else
+ DISTRIBUTION="Unknown"
+fi
+
+# Detect release version if supported
+FILE="/etc/lsb-release"
+if [ -e $FILE ] ; then
+ . /etc/lsb-release
+ RELEASE="${DISTRIB_RELEASE}"
+else
+ RELEASE="unknown"
+fi
+
+####
+# Taskcluster friendly wrapper for performing fx desktop tests via mozharness.
+####
+
+# Inputs, with defaults
+
+: GECKO_PATH ${GECKO_PATH}
+: MOZHARNESS_PATH ${MOZHARNESS_PATH}
+: MOZHARNESS_URL ${MOZHARNESS_URL}
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: MOZHARNESS_OPTIONS ${MOZHARNESS_OPTIONS}
+: MOZ_ENABLE_WAYLAND ${MOZ_ENABLE_WAYLAND}
+: NEED_XVFB ${NEED_XVFB:=true}
+: NEED_WINDOW_MANAGER ${NEED_WINDOW_MANAGER:=false}
+: NEED_PULSEAUDIO ${NEED_PULSEAUDIO:=false}
+: NEED_COMPIZ ${NEED_COPMPIZ:=false}
+: START_VNC ${START_VNC:=false}
+: TASKCLUSTER_INTERACTIVE ${TASKCLUSTER_INTERACTIVE:=false}
+: mozharness args "${@}"
+: WORKING_DIR ${WORKING_DIR:=$(pwd)}
+: WORKSPACE ${WORKSPACE:=${WORKING_DIR%/}/workspace}
+
+set -v
+mkdir -p "$WORKSPACE"
+cd "$WORKSPACE"
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[test-linux.sh:error]" "${@}"
+ exit 1
+}
+
+# start pulseaudio
+maybe_start_pulse() {
+ if $NEED_PULSEAUDIO; then
+ # call pulseaudio for Ubuntu only
+ if [ $DISTRIBUTION == "Ubuntu" ]; then
+ pulseaudio --daemonize --log-level=4 --log-time=1 --log-target=stderr --start --fail -vvvvv --exit-idle-time=-1 --cleanup-shm --dump-conf
+ fi
+ fi
+}
+
+# test required parameters are supplied
+if [ -z "${MOZHARNESS_PATH}" -a -z "${MOZHARNESS_URL}" ]; then
+ fail "MOZHARNESS_PATH or MOZHARNESS_URL must be defined";
+fi
+
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
+
+if [ $MOZ_ENABLE_WAYLAND ]; then
+ NEED_XVFB=true
+ NEED_WINDOW_MANAGER=true
+fi
+
+# make sure artifact directories exist
+mkdir -p "$WORKSPACE/logs"
+mkdir -p "$WORKING_DIR/artifacts/public"
+mkdir -p "$WORKSPACE/build/blobber_upload_dir"
+
+cleanup_mutter() {
+ local mutter_pids=`ps aux | grep 'mutter --wayland' | grep -v grep | awk '{print $2}'`
+ if [ "$mutter_pids" != "" ]; then
+ echo "Killing the following Mutter processes: $mutter_pids"
+ sudo kill $mutter_pids
+ else
+ echo "No Mutter processes to kill"
+ fi
+}
+
+cleanup() {
+ local rv=$?
+ if [[ -s $HOME/.xsession-errors ]]; then
+ # To share X issues
+ cp "$HOME/.xsession-errors" "$WORKING_DIR/artifacts/public/xsession-errors.log"
+ fi
+ if [ $MOZ_ENABLE_WAYLAND ]; then
+ cleanup_mutter
+ fi
+ if $NEED_XVFB; then
+ cleanup_xvfb
+ fi
+ exit $rv
+}
+trap cleanup EXIT INT
+
+# Download mozharness with exponential backoff
+# curl already applies exponential backoff, but not for all
+# failed cases, apparently, as we keep getting failed downloads
+# with 404 code.
+download_mozharness() {
+ local max_attempts=10
+ local timeout=1
+ local attempt=0
+
+ echo "Downloading mozharness"
+
+ while [[ $attempt < $max_attempts ]]; do
+ if curl --fail -o mozharness.zip --retry 10 -L $MOZHARNESS_URL; then
+ rm -rf mozharness
+ if unzip -q mozharness.zip -d mozharness; then
+ return 0
+ fi
+ echo "error unzipping mozharness.zip" >&2
+ else
+ echo "failed to download mozharness zip" >&2
+ fi
+ echo "Download failed, retrying in $timeout seconds..." >&2
+ sleep $timeout
+ timeout=$((timeout*2))
+ attempt=$((attempt+1))
+ done
+
+ fail "Failed to download and unzip mozharness"
+}
+
+# Download mozharness if we're told to.
+if [ ${MOZHARNESS_URL} ]; then
+ download_mozharness
+ rm mozharness.zip
+
+ if ! [ -d mozharness ]; then
+ fail "mozharness zip did not contain mozharness/"
+ fi
+
+ MOZHARNESS_PATH=`pwd`/mozharness
+fi
+
+# run XVfb in the background, if necessary
+if $NEED_XVFB; then
+ # note that this file is not available when run under native-worker
+ . $HOME/scripts/xvfb.sh
+ start_xvfb '1600x1200x24' 0
+fi
+
+if $START_VNC; then
+ x11vnc > "$WORKING_DIR/artifacts/public/x11vnc.log" 2>&1 &
+fi
+
+if $NEED_WINDOW_MANAGER; then
+ # This is read by xsession to select the window manager
+ . /etc/lsb-release
+ if [ $DISTRIBUTION == "Ubuntu" ] && [ $RELEASE == "16.04" ]; then
+ echo DESKTOP_SESSION=ubuntu > $HOME/.xsessionrc
+ elif [ $DISTRIBUTION == "Ubuntu" ] && [ $RELEASE == "18.04" ]; then
+ echo export DESKTOP_SESSION=gnome > $HOME/.xsessionrc
+ echo export XDG_CURRENT_DESKTOP=GNOME > $HOME/.xsessionrc
+ if [ $MOZ_ENABLE_WAYLAND ]; then
+ echo export XDG_SESSION_TYPE=wayland >> $HOME/.xsessionrc
+ else
+ echo export XDG_SESSION_TYPE=x11 >> $HOME/.xsessionrc
+ fi
+ else
+ :
+ fi
+
+ # DISPLAY has already been set above
+ # XXX: it would be ideal to add a semaphore logic to make sure that the
+ # window manager is ready
+ /etc/X11/Xsession 2>&1 &
+
+ # Turn off the screen saver and screen locking
+ gsettings set org.gnome.desktop.screensaver idle-activation-enabled false
+ gsettings set org.gnome.desktop.screensaver lock-enabled false
+ gsettings set org.gnome.desktop.screensaver lock-delay 3600
+
+ # Disable the screen saver
+ xset s off s reset
+
+ # This starts the gnome-keyring-daemon with an unlocked login keyring. libsecret uses this to
+ # store secrets. Firefox uses libsecret to store a key that protects sensitive information like
+ # credit card numbers.
+ if test -z "$DBUS_SESSION_BUS_ADDRESS" ; then
+ # if not found, launch a new one
+ eval `dbus-launch --sh-syntax`
+ fi
+ eval `echo '' | /usr/bin/gnome-keyring-daemon -r -d --unlock --components=secrets`
+
+ # Run mutter as nested wayland compositor to provide Wayland environment
+ # on top of XVfb.
+ if [ $MOZ_ENABLE_WAYLAND ]; then
+ env | grep "DISPLAY"
+ export XDG_RUNTIME_DIR=$WORKING_DIR
+ mutter --display=:0 --wayland --nested &
+ export WAYLAND_DISPLAY=wayland-0
+ retry_count=0
+ max_retries=5
+ until [ $retry_count -gt $max_retries ]; do
+ if [ -S "$XDG_RUNTIME_DIR/$WAYLAND_DISPLAY" ]; then
+ retry_count=$(($max_retries + 1))
+ else
+ retry_count=$(($retry_count + 1))
+ echo "Waiting for Mutter, retry: $retry_count"
+ sleep 2
+ fi
+ done
+ fi
+fi
+
+if [[ $NEED_COMPIZ == true ]] && [[ $RELEASE == 16.04 ]]; then
+ compiz 2>&1 &
+elif [[ $NEED_COMPIZ == true ]] && [[ $RELEASE == 18.04 ]]; then
+ compiz --replace 2>&1 &
+fi
+
+# Bug 1607713 - set cursor position to 0,0 to avoid odd libx11 interaction
+if [ $NEED_WINDOW_MANAGER ] && [ $DISPLAY == ':0' ]; then
+ xwit -root -warp 0 0
+fi
+
+maybe_start_pulse
+
+# For telemetry purposes, the build process wants information about the
+# source it is running
+export MOZ_SOURCE_REPO="${GECKO_HEAD_REPOSITORY}"
+export MOZ_SOURCE_CHANGESET="${GECKO_HEAD_REV}"
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config-file ${MOZHARNESS_PATH}/configs/${cfg}"
+done
+
+if [ -n "$MOZHARNESS_OPTIONS" ]; then
+ options=""
+ for option in $MOZHARNESS_OPTIONS; do
+ options="$options --$option"
+ done
+fi
+
+# Use |mach python| if a source checkout exists so in-tree packages are
+# available.
+[[ -x "${GECKO_PATH}/mach" ]] && python="${PYTHON:-python3} ${GECKO_PATH}/mach python" || python="${PYTHON:-python2.7}"
+
+# Save the computed mozharness command to a binary which is useful for
+# interactive mode.
+mozharness_bin="$HOME/bin/run-mozharness"
+mkdir -p $(dirname $mozharness_bin)
+
+echo -e "#!/usr/bin/env bash
+# Some mozharness scripts assume base_work_dir is in
+# the current working directory, see bug 1279237
+cd "$WORKSPACE"
+cmd=\"${python} ${MOZHARNESS_PATH}/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} ${options} ${@} \${@}\"
+echo \"Running: \${cmd}\"
+exec \${cmd}" > ${mozharness_bin}
+chmod +x ${mozharness_bin}
+
+# In interactive mode, the user will be prompted with options for what to do.
+if ! $TASKCLUSTER_INTERACTIVE; then
+ # run the given mozharness script and configs, but pass the rest of the
+ # arguments in from our own invocation
+ ${mozharness_bin};
+fi
+
+# Run a custom mach command (this is typically used by action tasks to run
+# harnesses in a particular way)
+if [ "$CUSTOM_MACH_COMMAND" ]; then
+ eval "'$WORKSPACE/build/venv/bin/python' '$WORKSPACE/build/tests/mach' ${CUSTOM_MACH_COMMAND} ${@}"
+ exit $?
+fi