summaryrefslogtreecommitdiffstats
path: root/src/ci/docker/scripts
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /src/ci/docker/scripts
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/ci/docker/scripts')
-rw-r--r--src/ci/docker/scripts/android-base-apt-get.sh19
-rw-r--r--src/ci/docker/scripts/android-ndk.sh31
-rwxr-xr-xsrc/ci/docker/scripts/android-sdk-manager.py192
-rwxr-xr-xsrc/ci/docker/scripts/android-sdk.sh29
-rwxr-xr-xsrc/ci/docker/scripts/android-start-emulator.sh16
-rwxr-xr-xsrc/ci/docker/scripts/cmake.sh34
-rw-r--r--src/ci/docker/scripts/cross-apt-packages.sh29
-rw-r--r--src/ci/docker/scripts/crosstool-ng-1.24.sh13
-rw-r--r--src/ci/docker/scripts/crosstool-ng.sh12
-rw-r--r--src/ci/docker/scripts/emscripten.sh24
-rwxr-xr-xsrc/ci/docker/scripts/freebsd-toolchain.sh83
-rw-r--r--src/ci/docker/scripts/illumos-toolchain.sh177
-rw-r--r--src/ci/docker/scripts/make3.sh10
-rw-r--r--src/ci/docker/scripts/musl-patch-configure.diff13
-rw-r--r--src/ci/docker/scripts/musl-toolchain.sh74
-rw-r--r--src/ci/docker/scripts/musl.sh43
-rw-r--r--src/ci/docker/scripts/qemu-bare-bones-addentropy.c33
-rw-r--r--src/ci/docker/scripts/qemu-bare-bones-rcS28
-rw-r--r--src/ci/docker/scripts/rustbuild-setup.sh5
-rw-r--r--src/ci/docker/scripts/sccache.sh20
20 files changed, 885 insertions, 0 deletions
diff --git a/src/ci/docker/scripts/android-base-apt-get.sh b/src/ci/docker/scripts/android-base-apt-get.sh
new file mode 100644
index 000000000..f1761f806
--- /dev/null
+++ b/src/ci/docker/scripts/android-base-apt-get.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+set -ex
+
+apt-get update
+apt-get install -y --no-install-recommends \
+ ca-certificates \
+ cmake \
+ curl \
+ file \
+ g++ \
+ git \
+ libssl-dev \
+ make \
+ ninja-build \
+ pkg-config \
+ python3 \
+ sudo \
+ unzip \
+ xz-utils
diff --git a/src/ci/docker/scripts/android-ndk.sh b/src/ci/docker/scripts/android-ndk.sh
new file mode 100644
index 000000000..ba70c62ea
--- /dev/null
+++ b/src/ci/docker/scripts/android-ndk.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+set -ex
+
+URL=https://dl.google.com/android/repository
+
+download_ndk() {
+ mkdir -p /android/ndk
+ cd /android/ndk
+ curl -fO $URL/$1
+ unzip -q $1
+ rm $1
+ mv android-ndk-* ndk
+}
+
+make_standalone_toolchain() {
+ # See https://developer.android.com/ndk/guides/standalone_toolchain.htm
+ python3 /android/ndk/ndk/build/tools/make_standalone_toolchain.py \
+ --install-dir /android/ndk/$1-$2 \
+ --arch $1 \
+ --api $2
+}
+
+remove_ndk() {
+ rm -rf /android/ndk/ndk
+}
+
+download_and_make_toolchain() {
+ download_ndk $1 && \
+ make_standalone_toolchain $2 $3 && \
+ remove_ndk
+}
diff --git a/src/ci/docker/scripts/android-sdk-manager.py b/src/ci/docker/scripts/android-sdk-manager.py
new file mode 100755
index 000000000..c9e2961f6
--- /dev/null
+++ b/src/ci/docker/scripts/android-sdk-manager.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python3
+# Simpler reimplementation of Android's sdkmanager
+# Extra features of this implementation are pinning and mirroring
+
+# These URLs are the Google repositories containing the list of available
+# packages and their versions. The list has been generated by listing the URLs
+# fetched while executing `tools/bin/sdkmanager --list`
+BASE_REPOSITORY = "https://dl.google.com/android/repository/"
+REPOSITORIES = [
+ "sys-img/android/sys-img2-1.xml",
+ "sys-img/android-wear/sys-img2-1.xml",
+ "sys-img/android-wear-cn/sys-img2-1.xml",
+ "sys-img/android-tv/sys-img2-1.xml",
+ "sys-img/google_apis/sys-img2-1.xml",
+ "sys-img/google_apis_playstore/sys-img2-1.xml",
+ "addon2-1.xml",
+ "glass/addon2-1.xml",
+ "extras/intel/addon2-1.xml",
+ "repository2-1.xml",
+]
+
+# Available hosts: linux, macosx and windows
+HOST_OS = "linux"
+
+# Mirroring options
+MIRROR_BUCKET = "rust-lang-ci-mirrors"
+MIRROR_BUCKET_REGION = "us-west-1"
+MIRROR_BASE_DIR = "rustc/android/"
+
+import argparse
+import hashlib
+import os
+import subprocess
+import sys
+import tempfile
+import urllib.request
+import xml.etree.ElementTree as ET
+
+class Package:
+ def __init__(self, path, url, sha1, deps=None):
+ if deps is None:
+ deps = []
+ self.path = path.strip()
+ self.url = url.strip()
+ self.sha1 = sha1.strip()
+ self.deps = deps
+
+ def download(self, base_url):
+ _, file = tempfile.mkstemp()
+ url = base_url + self.url
+ subprocess.run(["curl", "-o", file, url], check=True)
+ # Ensure there are no hash mismatches
+ with open(file, "rb") as f:
+ sha1 = hashlib.sha1(f.read()).hexdigest()
+ if sha1 != self.sha1:
+ raise RuntimeError(
+ "hash mismatch for package " + self.path + ": " +
+ sha1 + " vs " + self.sha1 + " (known good)"
+ )
+ return file
+
+ def __repr__(self):
+ return "<Package "+self.path+" at "+self.url+" (sha1="+self.sha1+")"
+
+def fetch_url(url):
+ page = urllib.request.urlopen(url)
+ return page.read()
+
+def fetch_repository(base, repo_url):
+ packages = {}
+ root = ET.fromstring(fetch_url(base + repo_url))
+ for package in root:
+ if package.tag != "remotePackage":
+ continue
+ path = package.attrib["path"]
+
+ for archive in package.find("archives"):
+ host_os = archive.find("host-os")
+ if host_os is not None and host_os.text != HOST_OS:
+ continue
+ complete = archive.find("complete")
+ url = os.path.join(os.path.dirname(repo_url), complete.find("url").text)
+ sha1 = complete.find("checksum").text
+
+ deps = []
+ dependencies = package.find("dependencies")
+ if dependencies is not None:
+ for dep in dependencies:
+ deps.append(dep.attrib["path"])
+
+ packages[path] = Package(path, url, sha1, deps)
+ break
+
+ return packages
+
+def fetch_repositories():
+ packages = {}
+ for repo in REPOSITORIES:
+ packages.update(fetch_repository(BASE_REPOSITORY, repo))
+ return packages
+
+class Lockfile:
+ def __init__(self, path):
+ self.path = path
+ self.packages = {}
+ if os.path.exists(path):
+ with open(path) as f:
+ for line in f:
+ path, url, sha1 = line.split(" ")
+ self.packages[path] = Package(path, url, sha1)
+
+ def add(self, packages, name, *, update=True):
+ if name not in packages:
+ raise NameError("package not found: " + name)
+ if not update and name in self.packages:
+ return
+ self.packages[name] = packages[name]
+ for dep in packages[name].deps:
+ self.add(packages, dep, update=False)
+
+ def save(self):
+ packages = list(sorted(self.packages.values(), key=lambda p: p.path))
+ with open(self.path, "w") as f:
+ for package in packages:
+ f.write(package.path + " " + package.url + " " + package.sha1 + "\n")
+
+def cli_add_to_lockfile(args):
+ lockfile = Lockfile(args.lockfile)
+ packages = fetch_repositories()
+ for package in args.packages:
+ lockfile.add(packages, package)
+ lockfile.save()
+
+def cli_update_mirror(args):
+ lockfile = Lockfile(args.lockfile)
+ for package in lockfile.packages.values():
+ path = package.download(BASE_REPOSITORY)
+ subprocess.run([
+ "aws", "s3", "mv", path,
+ "s3://" + MIRROR_BUCKET + "/" + MIRROR_BASE_DIR + package.url,
+ "--profile=" + args.awscli_profile,
+ ], check=True)
+
+def cli_install(args):
+ lockfile = Lockfile(args.lockfile)
+ for package in lockfile.packages.values():
+ # Download the file from the mirror into a temp file
+ url = "https://" + MIRROR_BUCKET + ".s3-" + MIRROR_BUCKET_REGION + \
+ ".amazonaws.com/" + MIRROR_BASE_DIR
+ downloaded = package.download(url)
+ # Extract the file in a temporary directory
+ extract_dir = tempfile.mkdtemp()
+ subprocess.run([
+ "unzip", "-q", downloaded, "-d", extract_dir,
+ ], check=True)
+ # Figure out the prefix used in the zip
+ subdirs = [d for d in os.listdir(extract_dir) if not d.startswith(".")]
+ if len(subdirs) != 1:
+ raise RuntimeError("extracted directory contains more than one dir")
+ # Move the extracted files in the proper directory
+ dest = os.path.join(args.dest, package.path.replace(";", "/"))
+ os.makedirs("/".join(dest.split("/")[:-1]), exist_ok=True)
+ os.rename(os.path.join(extract_dir, subdirs[0]), dest)
+ os.unlink(downloaded)
+
+def cli():
+ parser = argparse.ArgumentParser()
+ subparsers = parser.add_subparsers()
+
+ add_to_lockfile = subparsers.add_parser("add-to-lockfile")
+ add_to_lockfile.add_argument("lockfile")
+ add_to_lockfile.add_argument("packages", nargs="+")
+ add_to_lockfile.set_defaults(func=cli_add_to_lockfile)
+
+ update_mirror = subparsers.add_parser("update-mirror")
+ update_mirror.add_argument("lockfile")
+ update_mirror.add_argument("--awscli-profile", default="default")
+ update_mirror.set_defaults(func=cli_update_mirror)
+
+ install = subparsers.add_parser("install")
+ install.add_argument("lockfile")
+ install.add_argument("dest")
+ install.set_defaults(func=cli_install)
+
+ args = parser.parse_args()
+ if not hasattr(args, "func"):
+ print("error: a subcommand is required (see --help)")
+ exit(1)
+ args.func(args)
+
+if __name__ == "__main__":
+ cli()
diff --git a/src/ci/docker/scripts/android-sdk.sh b/src/ci/docker/scripts/android-sdk.sh
new file mode 100755
index 000000000..23360d309
--- /dev/null
+++ b/src/ci/docker/scripts/android-sdk.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+set -ex
+
+export ANDROID_HOME=/android/sdk
+PATH=$PATH:"${ANDROID_HOME}/tools/bin"
+LOCKFILE="${ANDROID_HOME}/android-sdk.lock"
+
+# To add a new packages to the SDK or to update an existing one you need to
+# run the command:
+#
+# android-sdk-manager.py add-to-lockfile $LOCKFILE <package-name>
+#
+# Then, after every lockfile update the mirror has to be synchronized as well:
+#
+# android-sdk-manager.py update-mirror $LOCKFILE
+#
+/scripts/android-sdk-manager.py install "${LOCKFILE}" "${ANDROID_HOME}"
+
+details=$(cat "${LOCKFILE}" \
+ | grep system-images \
+ | sed 's/^system-images;android-\([0-9]\+\);default;\([a-z0-9-]\+\) /\1 \2 /g')
+api="$(echo "${details}" | awk '{print($1)}')"
+abi="$(echo "${details}" | awk '{print($2)}')"
+
+# See https://developer.android.com/studio/command-line/avdmanager.html for
+# usage of `avdmanager`.
+echo no | avdmanager create avd \
+ -n "$abi-$api" \
+ -k "system-images;android-$api;default;$abi"
diff --git a/src/ci/docker/scripts/android-start-emulator.sh b/src/ci/docker/scripts/android-start-emulator.sh
new file mode 100755
index 000000000..09f0d1375
--- /dev/null
+++ b/src/ci/docker/scripts/android-start-emulator.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+set -ex
+
+# Setting SHELL to a file instead on a symlink helps android
+# emulator identify the system
+export SHELL=/bin/bash
+
+# Using the default qemu2 engine makes time::tests::since_epoch fails because
+# the emulator date is set to unix epoch (in armeabi-v7a-18 image). Using
+# classic engine the emulator starts with the current date and the tests run
+# fine. If another image is used, this need to be evaluated again.
+nohup nohup emulator @armeabi-v7a-18 \
+ -engine classic -no-window -partition-size 2047 0<&- &>/dev/null &
+
+exec "$@"
diff --git a/src/ci/docker/scripts/cmake.sh b/src/ci/docker/scripts/cmake.sh
new file mode 100755
index 000000000..f124dbdaa
--- /dev/null
+++ b/src/ci/docker/scripts/cmake.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -ex
+
+hide_output() {
+ set +x
+ on_err="
+echo ERROR: An error was encountered with the build.
+cat /tmp/cmake_build.log
+exit 1
+"
+ trap "$on_err" ERR
+ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
+ PING_LOOP_PID=$!
+ "$@" &> /tmp/cmake_build.log
+ trap - ERR
+ kill $PING_LOOP_PID
+ rm /tmp/cmake_build.log
+ set -x
+}
+
+# LLVM 12 requires CMake 3.13.4 or higher.
+# This script is not necessary for images using Ubuntu 20.04 or newer.
+CMAKE=3.13.4
+curl -L https://github.com/Kitware/CMake/releases/download/v$CMAKE/cmake-$CMAKE.tar.gz | tar xzf -
+
+mkdir cmake-build
+cd cmake-build
+hide_output ../cmake-$CMAKE/configure
+hide_output make -j$(nproc)
+hide_output make install
+
+cd ..
+rm -rf cmake-build
+rm -rf cmake-$CMAKE
diff --git a/src/ci/docker/scripts/cross-apt-packages.sh b/src/ci/docker/scripts/cross-apt-packages.sh
new file mode 100644
index 000000000..2f8bf1194
--- /dev/null
+++ b/src/ci/docker/scripts/cross-apt-packages.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ automake \
+ bison \
+ bzip2 \
+ ca-certificates \
+ cmake \
+ curl \
+ file \
+ flex \
+ g++ \
+ gawk \
+ gdb \
+ git \
+ gperf \
+ help2man \
+ libncurses-dev \
+ libssl-dev \
+ libtool-bin \
+ make \
+ ninja-build \
+ patch \
+ pkg-config \
+ python3 \
+ sudo \
+ texinfo \
+ unzip \
+ wget \
+ xz-utils
diff --git a/src/ci/docker/scripts/crosstool-ng-1.24.sh b/src/ci/docker/scripts/crosstool-ng-1.24.sh
new file mode 100644
index 000000000..3a40f6cdd
--- /dev/null
+++ b/src/ci/docker/scripts/crosstool-ng-1.24.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -ex
+
+# Mirrored from https://github.com/crosstool-ng/crosstool-ng/archive/crosstool-ng-1.24.0.tar.gz
+url="https://ci-mirrors.rust-lang.org/rustc/crosstool-ng-1.24.0.tar.gz"
+curl -Lf $url | tar xzf -
+cd crosstool-ng-crosstool-ng-1.24.0
+./bootstrap
+./configure --prefix=/usr/local
+make -j$(nproc)
+make install
+cd ..
+rm -rf crosstool-ng-crosstool-ng-1.24.0
diff --git a/src/ci/docker/scripts/crosstool-ng.sh b/src/ci/docker/scripts/crosstool-ng.sh
new file mode 100644
index 000000000..1d0c28c8e
--- /dev/null
+++ b/src/ci/docker/scripts/crosstool-ng.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -ex
+
+url="https://github.com/crosstool-ng/crosstool-ng/archive/crosstool-ng-1.22.0.tar.gz"
+curl -Lf $url | tar xzf -
+cd crosstool-ng-crosstool-ng-1.22.0
+./bootstrap
+./configure --prefix=/usr/local
+make -j$(nproc)
+make install
+cd ..
+rm -rf crosstool-ng-crosstool-ng-1.22.0
diff --git a/src/ci/docker/scripts/emscripten.sh b/src/ci/docker/scripts/emscripten.sh
new file mode 100644
index 000000000..56dc96283
--- /dev/null
+++ b/src/ci/docker/scripts/emscripten.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -ex
+
+hide_output() {
+ set +x
+ on_err="
+echo ERROR: An error was encountered with the build.
+cat /tmp/build.log
+exit 1
+"
+ trap "$on_err" ERR
+ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
+ PING_LOOP_PID=$!
+ "$@" &> /tmp/build.log
+ trap - ERR
+ kill $PING_LOOP_PID
+ rm -f /tmp/build.log
+ set -x
+}
+
+git clone https://github.com/emscripten-core/emsdk.git /emsdk-portable
+cd /emsdk-portable
+hide_output ./emsdk install 1.39.20
+./emsdk activate 1.39.20
diff --git a/src/ci/docker/scripts/freebsd-toolchain.sh b/src/ci/docker/scripts/freebsd-toolchain.sh
new file mode 100755
index 000000000..4a4cac1b7
--- /dev/null
+++ b/src/ci/docker/scripts/freebsd-toolchain.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# ignore-tidy-linelength
+
+set -eux
+
+arch=$1
+binutils_version=2.25.1
+freebsd_version=12.3
+triple=$arch-unknown-freebsd12
+sysroot=/usr/local/$triple
+
+hide_output() {
+ set +x
+ local on_err="
+echo ERROR: An error was encountered with the build.
+cat /tmp/build.log
+exit 1
+"
+ trap "$on_err" ERR
+ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
+ local ping_loop_pid=$!
+ "$@" &> /tmp/build.log
+ trap - ERR
+ kill $ping_loop_pid
+ set -x
+}
+
+# First up, build binutils
+mkdir binutils
+cd binutils
+curl https://ftp.gnu.org/gnu/binutils/binutils-${binutils_version}.tar.bz2 | tar xjf -
+mkdir binutils-build
+cd binutils-build
+hide_output ../binutils-${binutils_version}/configure \
+ --target="$triple" --with-sysroot="$sysroot"
+hide_output make -j"$(getconf _NPROCESSORS_ONLN)"
+hide_output make install
+cd ../..
+rm -rf binutils
+
+# Next, download the FreeBSD libraries and header files
+mkdir -p "$sysroot"
+case $arch in
+ (x86_64) freebsd_arch=amd64 ;;
+ (i686) freebsd_arch=i386 ;;
+esac
+
+files_to_extract=(
+"./usr/include"
+"./usr/lib/*crt*.o"
+)
+# Try to unpack only the libraries the build needs, to save space.
+for lib in c cxxrt gcc_s m thr util; do
+ files_to_extract=("${files_to_extract[@]}" "./lib/lib${lib}.*" "./usr/lib/lib${lib}.*")
+done
+for lib in c++ c_nonshared compiler_rt execinfo gcc pthread rt ssp_nonshared procstat devstat kvm; do
+ files_to_extract=("${files_to_extract[@]}" "./usr/lib/lib${lib}.*")
+done
+
+# Originally downloaded from:
+# URL=https://download.freebsd.org/ftp/releases/${freebsd_arch}/${freebsd_version}-RELEASE/base.txz
+URL=https://ci-mirrors.rust-lang.org/rustc/2022-05-06-freebsd-${freebsd_version}-${freebsd_arch}-base.txz
+curl "$URL" | tar xJf - -C "$sysroot" --wildcards "${files_to_extract[@]}"
+
+# Clang can do cross-builds out of the box, if we give it the right
+# flags. (The local binutils seem to work, but they set the ELF
+# header "OS/ABI" (EI_OSABI) field to SysV rather than FreeBSD, so
+# there might be other problems.)
+#
+# The --target option is last because the cross-build of LLVM uses
+# --target without an OS version ("-freebsd" vs. "-freebsd12"). This
+# makes Clang default to libstdc++ (which no longer exists), and also
+# controls other features, like GNU-style symbol table hashing and
+# anything predicated on the version number in the __FreeBSD__
+# preprocessor macro.
+for tool in clang clang++; do
+ tool_path=/usr/local/bin/${triple}-${tool}
+ cat > "$tool_path" <<EOF
+#!/bin/sh
+exec $tool --sysroot=$sysroot --prefix=${sysroot}/bin "\$@" --target=$triple
+EOF
+ chmod +x "$tool_path"
+done
diff --git a/src/ci/docker/scripts/illumos-toolchain.sh b/src/ci/docker/scripts/illumos-toolchain.sh
new file mode 100644
index 000000000..3f1d5f342
--- /dev/null
+++ b/src/ci/docker/scripts/illumos-toolchain.sh
@@ -0,0 +1,177 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -o xtrace
+
+ARCH="$1"
+PHASE="$2"
+
+JOBS="$(getconf _NPROCESSORS_ONLN)"
+
+case "$ARCH" in
+x86_64)
+ SYSROOT_MACH='i386'
+ ;;
+*)
+ printf 'ERROR: unknown architecture: %s\n' "$ARCH"
+ exit 1
+esac
+
+BUILD_TARGET="$ARCH-pc-solaris2.10"
+
+#
+# The illumos and the Solaris build both use the same GCC-level host triple,
+# though different versions of GCC are used and with different configure
+# options. To ensure as little accidental cross-pollination as possible, we
+# build the illumos toolchain in a specific directory tree and just symlink the
+# expected tools into /usr/local/bin at the end. We omit /usr/local/bin from
+# PATH here for similar reasons.
+#
+PREFIX="/opt/illumos/$ARCH"
+export PATH="$PREFIX/bin:/usr/bin:/bin:/usr/sbin:/sbin"
+
+#
+# NOTE: The compiler version selected here is more specific than might appear.
+# GCC 7.X releases do not appear to cross-compile correctly for Solaris
+# targets, at least insofar as they refuse to enable TLS in libstdc++. When
+# changing the GCC version in future, one must carefully verify that TLS is
+# enabled in all of the static libraries we intend to include in output
+# binaries.
+#
+GCC_VERSION='8.4.0'
+GCC_SUM='e30a6e52d10e1f27ed55104ad233c30bd1e99cfb5ff98ab022dc941edd1b2dd4'
+GCC_BASE="gcc-$GCC_VERSION"
+GCC_TAR="gcc-$GCC_VERSION.tar.xz"
+GCC_URL="https://ftp.gnu.org/gnu/gcc/$GCC_BASE/$GCC_TAR"
+
+SYSROOT_VER='20181213-de6af22ae73b-v1'
+SYSROOT_SUM='ee792d956dfa6967453cebe9286a149143290d296a8ce4b8a91d36bea89f8112'
+SYSROOT_TAR="illumos-sysroot-$SYSROOT_MACH-$SYSROOT_VER.tar.gz"
+SYSROOT_URL='https://github.com/illumos/sysroot/releases/download/'
+SYSROOT_URL+="$SYSROOT_VER/$SYSROOT_TAR"
+SYSROOT_DIR="$PREFIX/sysroot"
+
+BINUTILS_VERSION='2.25.1'
+BINUTILS_SUM='b5b14added7d78a8d1ca70b5cb75fef57ce2197264f4f5835326b0df22ac9f22'
+BINUTILS_BASE="binutils-$BINUTILS_VERSION"
+BINUTILS_TAR="$BINUTILS_BASE.tar.bz2"
+BINUTILS_URL="https://ftp.gnu.org/gnu/binutils/$BINUTILS_TAR"
+
+
+download_file() {
+ local file="$1"
+ local url="$2"
+ local sum="$3"
+
+ while :; do
+ if [[ -f "$file" ]]; then
+ if ! h="$(sha256sum "$file" | awk '{ print $1 }')"; then
+ printf 'ERROR: reading hash\n' >&2
+ exit 1
+ fi
+
+ if [[ "$h" == "$sum" ]]; then
+ return 0
+ fi
+
+ printf 'WARNING: hash mismatch: %s != expected %s\n' \
+ "$h" "$sum" >&2
+ rm -f "$file"
+ fi
+
+ printf 'Downloading: %s\n' "$url"
+ if ! curl -f -L -o "$file" "$url"; then
+ rm -f "$file"
+ sleep 1
+ fi
+ done
+}
+
+
+case "$PHASE" in
+sysroot)
+ download_file "/tmp/$SYSROOT_TAR" "$SYSROOT_URL" "$SYSROOT_SUM"
+ mkdir -p "$SYSROOT_DIR"
+ cd "$SYSROOT_DIR"
+ tar -xzf "/tmp/$SYSROOT_TAR"
+ rm -f "/tmp/$SYSROOT_TAR"
+ ;;
+
+binutils)
+ download_file "/tmp/$BINUTILS_TAR" "$BINUTILS_URL" "$BINUTILS_SUM"
+ mkdir -p /ws/src/binutils
+ cd /ws/src/binutils
+ tar -xjf "/tmp/$BINUTILS_TAR"
+ rm -f "/tmp/$BINUTILS_TAR"
+
+ mkdir -p /ws/build/binutils
+ cd /ws/build/binutils
+ "/ws/src/binutils/$BINUTILS_BASE/configure" \
+ --prefix="$PREFIX" \
+ --target="$BUILD_TARGET" \
+ --program-prefix="$ARCH-illumos-" \
+ --with-sysroot="$SYSROOT_DIR"
+
+ make -j "$JOBS"
+
+ mkdir -p "$PREFIX"
+ make install
+
+ cd /
+ rm -rf /ws/src/binutils /ws/build/binutils
+ ;;
+
+gcc)
+ download_file "/tmp/$GCC_TAR" "$GCC_URL" "$GCC_SUM"
+ mkdir -p /ws/src/gcc
+ cd /ws/src/gcc
+ tar -xJf "/tmp/$GCC_TAR"
+ rm -f "/tmp/$GCC_TAR"
+
+ mkdir -p /ws/build/gcc
+ cd /ws/build/gcc
+ export CFLAGS='-fPIC'
+ export CXXFLAGS='-fPIC'
+ export CXXFLAGS_FOR_TARGET='-fPIC'
+ export CFLAGS_FOR_TARGET='-fPIC'
+ "/ws/src/gcc/$GCC_BASE/configure" \
+ --prefix="$PREFIX" \
+ --target="$BUILD_TARGET" \
+ --program-prefix="$ARCH-illumos-" \
+ --with-sysroot="$SYSROOT_DIR" \
+ --with-gnu-as \
+ --with-gnu-ld \
+ --disable-nls \
+ --disable-libgomp \
+ --disable-libquadmath \
+ --disable-libssp \
+ --disable-libvtv \
+ --disable-libcilkrts \
+ --disable-libada \
+ --disable-libsanitizer \
+ --disable-libquadmath-support \
+ --disable-shared \
+ --enable-tls
+
+ make -j "$JOBS"
+
+ mkdir -p "$PREFIX"
+ make install
+
+ #
+ # Link toolchain commands into /usr/local/bin so that cmake and others
+ # can find them:
+ #
+ (cd "$PREFIX/bin" && ls -U) | grep "^$ARCH-illumos-" |
+ xargs -t -I% ln -s "$PREFIX/bin/%" '/usr/local/bin/'
+
+ cd /
+ rm -rf /ws/src/gcc /ws/build/gcc
+ ;;
+
+*)
+ printf 'ERROR: unknown phase "%s"\n' "$PHASE" >&2
+ exit 100
+ ;;
+esac
diff --git a/src/ci/docker/scripts/make3.sh b/src/ci/docker/scripts/make3.sh
new file mode 100644
index 000000000..283700d06
--- /dev/null
+++ b/src/ci/docker/scripts/make3.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -ex
+
+curl -f https://ftp.gnu.org/gnu/make/make-3.81.tar.gz | tar xzf -
+cd make-3.81
+./configure --prefix=/usr
+make
+make install
+cd ..
+rm -rf make-3.81
diff --git a/src/ci/docker/scripts/musl-patch-configure.diff b/src/ci/docker/scripts/musl-patch-configure.diff
new file mode 100644
index 000000000..6e106b450
--- /dev/null
+++ b/src/ci/docker/scripts/musl-patch-configure.diff
@@ -0,0 +1,13 @@
+diff --git a/configure b/configure
+index 86801281..ed2f7998 100755
+--- a/configure
++++ b/configure
+@@ -398,7 +398,7 @@ test "$debug" = yes && CFLAGS_AUTO=-g
+ #
+ printf "checking whether we should preprocess assembly to add debugging information... "
+ if fnmatch '-g*|*\ -g*' "$CFLAGS_AUTO $CFLAGS" &&
+- test -f "tools/add-cfi.$ARCH.awk" &&
++ test -f "$srcdir/tools/add-cfi.$ARCH.awk" &&
+ printf ".file 1 \"srcfile.s\"\n.line 1\n.cfi_startproc\n.cfi_endproc" | $CC -g -x assembler -c -o /dev/null 2>/dev/null -
+ then
+ ADD_CFI=yes
diff --git a/src/ci/docker/scripts/musl-toolchain.sh b/src/ci/docker/scripts/musl-toolchain.sh
new file mode 100644
index 000000000..e358b8139
--- /dev/null
+++ b/src/ci/docker/scripts/musl-toolchain.sh
@@ -0,0 +1,74 @@
+#!/bin/sh
+# This script runs `musl-cross-make` to prepare C toolchain (Binutils, GCC, musl itself)
+# and builds static libunwind that we distribute for static target.
+#
+# Versions of the toolchain components are configurable in `musl-cross-make/Makefile` and
+# musl unlike GLIBC is forward compatible so upgrading it shouldn't break old distributions.
+# Right now we have: Binutils 2.31.1, GCC 9.2.0, musl 1.1.24.
+
+# ignore-tidy-linelength
+
+set -ex
+
+hide_output() {
+ set +x
+ on_err="
+echo ERROR: An error was encountered with the build.
+cat /tmp/build.log
+exit 1
+"
+ trap "$on_err" ERR
+ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
+ PING_LOOP_PID=$!
+ "$@" &> /tmp/build.log
+ trap - ERR
+ kill $PING_LOOP_PID
+ rm /tmp/build.log
+ set -x
+}
+
+ARCH=$1
+TARGET=$ARCH-linux-musl
+
+# Don't depend on the mirrors of sabotage linux that musl-cross-make uses.
+LINUX_HEADERS_SITE=https://ci-mirrors.rust-lang.org/rustc/sabotage-linux-tarballs
+
+OUTPUT=/usr/local
+shift
+
+# Ancient binutils versions don't understand debug symbols produced by more recent tools.
+# Apparently applying `-fPIC` everywhere allows them to link successfully.
+# Enable debug info. If we don't do so, users can't debug into musl code,
+# debuggers can't walk the stack, etc. Fixes #90103.
+export CFLAGS="-fPIC -g1 $CFLAGS"
+
+git clone https://github.com/richfelker/musl-cross-make # -b v0.9.9
+cd musl-cross-make
+# A few commits ahead of v0.9.9 to include the cowpatch fix:
+git checkout a54eb56f33f255dfca60be045f12a5cfaf5a72a9
+
+# Fix the cfi detection script in musl's configure so cfi is generated
+# when debug info is asked for. This patch is derived from
+# https://git.musl-libc.org/cgit/musl/commit/?id=c4d4028dde90562f631edf559fbc42d8ec1b29de.
+# When we upgrade to a version that includes this commit, we can remove the patch.
+mkdir patches/musl-1.1.24
+cp ../musl-patch-configure.diff patches/musl-1.1.24/0001-fix-cfi-detection.diff
+
+hide_output make -j$(nproc) TARGET=$TARGET MUSL_VER=1.1.24 LINUX_HEADERS_SITE=$LINUX_HEADERS_SITE
+hide_output make install TARGET=$TARGET MUSL_VER=1.1.24 LINUX_HEADERS_SITE=$LINUX_HEADERS_SITE OUTPUT=$OUTPUT
+
+cd -
+
+# Install musl library to make binaries executable
+ln -s $OUTPUT/$TARGET/lib/libc.so /lib/ld-musl-$ARCH.so.1
+echo $OUTPUT/$TARGET/lib >> /etc/ld-musl-$ARCH.path
+
+# Now when musl bootstraps itself create proper toolchain symlinks to make build and tests easier
+if [ "$REPLACE_CC" = "1" ]; then
+ for exec in cc gcc; do
+ ln -s $TARGET-gcc /usr/local/bin/$exec
+ done
+ for exec in cpp c++ g++; do
+ ln -s $TARGET-g++ /usr/local/bin/$exec
+ done
+fi
diff --git a/src/ci/docker/scripts/musl.sh b/src/ci/docker/scripts/musl.sh
new file mode 100644
index 000000000..3e5dc4af0
--- /dev/null
+++ b/src/ci/docker/scripts/musl.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+set -ex
+
+hide_output() {
+ set +x
+ on_err="
+echo ERROR: An error was encountered with the build.
+cat /tmp/build.log
+exit 1
+"
+ trap "$on_err" ERR
+ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
+ PING_LOOP_PID=$!
+ "$@" &> /tmp/build.log
+ trap - ERR
+ kill $PING_LOOP_PID
+ rm /tmp/build.log
+ set -x
+}
+
+TAG=$1
+shift
+
+# Ancient binutils versions don't understand debug symbols produced by more recent tools.
+# Apparently applying `-fPIC` everywhere allows them to link successfully.
+export CFLAGS="-fPIC $CFLAGS"
+
+MUSL=musl-1.1.24
+
+# may have been downloaded in a previous run
+if [ ! -d $MUSL ]; then
+ curl https://www.musl-libc.org/releases/$MUSL.tar.gz | tar xzf -
+fi
+
+cd $MUSL
+./configure --enable-debug --disable-shared --prefix=/musl-$TAG "$@"
+if [ "$TAG" = "i586" -o "$TAG" = "i686" ]; then
+ hide_output make -j$(nproc) AR=ar RANLIB=ranlib
+else
+ hide_output make -j$(nproc)
+fi
+hide_output make install
+hide_output make clean
diff --git a/src/ci/docker/scripts/qemu-bare-bones-addentropy.c b/src/ci/docker/scripts/qemu-bare-bones-addentropy.c
new file mode 100644
index 000000000..815b5b04f
--- /dev/null
+++ b/src/ci/docker/scripts/qemu-bare-bones-addentropy.c
@@ -0,0 +1,33 @@
+#include <assert.h>
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <linux/random.h>
+
+#define N 2048
+
+struct entropy {
+ int ent_count;
+ int size;
+ unsigned char data[N];
+};
+
+int main() {
+ struct entropy buf;
+ ssize_t n;
+
+ int random_fd = open("/dev/random", O_RDWR);
+ assert(random_fd >= 0);
+
+ while ((n = read(0, &buf.data, N)) > 0) {
+ buf.ent_count = n * 8;
+ buf.size = n;
+ if (ioctl(random_fd, RNDADDENTROPY, &buf) != 0) {
+ perror("failed to add entropy");
+ }
+ }
+
+ return 0;
+}
diff --git a/src/ci/docker/scripts/qemu-bare-bones-rcS b/src/ci/docker/scripts/qemu-bare-bones-rcS
new file mode 100644
index 000000000..3c29bedc1
--- /dev/null
+++ b/src/ci/docker/scripts/qemu-bare-bones-rcS
@@ -0,0 +1,28 @@
+#!/bin/sh
+mount -t proc none /proc
+mount -t sysfs none /sys
+/sbin/mdev -s
+
+# fill up our entropy pool, if we don't do this then anything with a hash map
+# will likely block forever as the kernel is pretty unlikely to have enough
+# entropy.
+/addentropy < /addentropy
+cat /dev/urandom | head -n 2048 | /addentropy
+
+# Set up IP that qemu expects. This confgures eth0 with the public IP that QEMU
+# will communicate to as well as the loopback 127.0.0.1 address.
+ifconfig eth0 10.0.2.15
+ifconfig lo up
+
+# Configure DNS resolution of 'localhost' to work
+echo 'hosts: files dns' >> /ubuntu/etc/nsswitch.conf
+echo '127.0.0.1 localhost' >> /ubuntu/etc/hosts
+
+# prepare the chroot
+mount -t proc proc /ubuntu/proc/
+mount --rbind /sys /ubuntu/sys/
+mount --rbind /dev /ubuntu/dev/
+
+# Execute our `testd` inside the ubuntu chroot
+cp /testd /ubuntu/testd
+chroot /ubuntu /testd &
diff --git a/src/ci/docker/scripts/rustbuild-setup.sh b/src/ci/docker/scripts/rustbuild-setup.sh
new file mode 100644
index 000000000..baf2a6868
--- /dev/null
+++ b/src/ci/docker/scripts/rustbuild-setup.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+set -ex
+
+groupadd -r rustbuild && useradd -m -r -g rustbuild rustbuild
+mkdir /x-tools && chown rustbuild:rustbuild /x-tools
diff --git a/src/ci/docker/scripts/sccache.sh b/src/ci/docker/scripts/sccache.sh
new file mode 100644
index 000000000..6c713e1f8
--- /dev/null
+++ b/src/ci/docker/scripts/sccache.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+# ignore-tidy-linelength
+
+set -ex
+
+case "$(uname -m)" in
+ x86_64)
+ url="https://ci-mirrors.rust-lang.org/rustc/2021-08-24-sccache-v0.2.15-x86_64-unknown-linux-musl"
+ ;;
+ aarch64)
+ url="https://ci-mirrors.rust-lang.org/rustc/2021-08-25-sccache-v0.2.15-aarch64-unknown-linux-musl"
+ ;;
+ *)
+ echo "unsupported architecture: $(uname -m)"
+ exit 1
+esac
+
+curl -fo /usr/local/bin/sccache "${url}"
+chmod +x /usr/local/bin/sccache