diff options
Diffstat (limited to 'src/ci/scripts')
-rwxr-xr-x | src/ci/scripts/checkout-submodules.sh | 68 | ||||
-rwxr-xr-x | src/ci/scripts/collect-cpu-stats.sh | 9 | ||||
-rwxr-xr-x | src/ci/scripts/disable-git-crlf-conversion.sh | 13 | ||||
-rwxr-xr-x | src/ci/scripts/dump-environment.sh | 19 | ||||
-rwxr-xr-x | src/ci/scripts/enable-docker-ipv6.sh | 15 | ||||
-rwxr-xr-x | src/ci/scripts/install-awscli.sh | 37 | ||||
-rwxr-xr-x | src/ci/scripts/install-clang.sh | 70 | ||||
-rwxr-xr-x | src/ci/scripts/install-mingw.sh | 72 | ||||
-rwxr-xr-x | src/ci/scripts/install-msys2.sh | 32 | ||||
-rwxr-xr-x | src/ci/scripts/install-ninja.sh | 16 | ||||
-rwxr-xr-x | src/ci/scripts/install-sccache.sh | 20 | ||||
-rwxr-xr-x | src/ci/scripts/install-wix.sh | 17 | ||||
-rwxr-xr-x | src/ci/scripts/run-build-from-ci.sh | 21 | ||||
-rwxr-xr-x | src/ci/scripts/setup-environment.sh | 47 | ||||
-rwxr-xr-x | src/ci/scripts/should-skip-this.sh | 52 | ||||
-rwxr-xr-x | src/ci/scripts/upload-artifacts.sh | 47 | ||||
-rwxr-xr-x | src/ci/scripts/verify-backported-commits.sh | 142 | ||||
-rwxr-xr-x | src/ci/scripts/verify-channel.sh | 38 | ||||
-rwxr-xr-x | src/ci/scripts/verify-line-endings.sh | 24 | ||||
-rwxr-xr-x | src/ci/scripts/verify-stable-version-number.sh | 30 |
20 files changed, 789 insertions, 0 deletions
diff --git a/src/ci/scripts/checkout-submodules.sh b/src/ci/scripts/checkout-submodules.sh new file mode 100755 index 000000000..f6cb8f8a6 --- /dev/null +++ b/src/ci/scripts/checkout-submodules.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# Check out all our submodules, but more quickly than using git by using one of +# our custom scripts + +set -o errexit +set -o pipefail +set -o nounset + +if [ ! -d ".git" ]; then + echo "Error: This must run in the root of the repository" + exit 1 +fi + +ci_dir=$(cd $(dirname $0) && pwd)/.. +. "$ci_dir/shared.sh" + +# On the beta channel we'll be automatically calculating the prerelease version +# via the git history, so unshallow our shallow clone from CI. +if [ "$(releaseChannel)" = "beta" ]; then + git fetch origin --unshallow beta master +fi + +function fetch_github_commit_archive { + local module=$1 + local cached="download-${module//\//-}.tar.gz" + retry sh -c "rm -f $cached && \ + curl -f -sSL -o $cached $2" + mkdir $module + touch "$module/.git" + # On Windows, the default behavior is to emulate symlinks by copying + # files. However, that ends up being order-dependent while extracting, + # which can cause a failure if the symlink comes first. This env var + # causes tar to use real symlinks instead, which are allowed to dangle. + export MSYS=winsymlinks:nativestrict + tar -C $module --strip-components=1 -xf $cached + rm $cached +} + +included="src/llvm-project src/doc/book src/doc/rust-by-example" +modules="$(git config --file .gitmodules --get-regexp '\.path$' | cut -d' ' -f2)" +modules=($modules) +use_git="" +urls="$(git config --file .gitmodules --get-regexp '\.url$' | cut -d' ' -f2)" +urls=($urls) +# shellcheck disable=SC2068 +for i in ${!modules[@]}; do + module=${modules[$i]} + if [[ " $included " = *" $module "* ]]; then + commit="$(git ls-tree HEAD $module | awk '{print $3}')" + git rm $module + url=${urls[$i]} + url=${url/\.git/} + fetch_github_commit_archive $module "$url/archive/$commit.tar.gz" & + bg_pids[${i}]=$! + continue + else + use_git="$use_git $module" + fi +done +retry sh -c "git submodule deinit -f $use_git && \ + git submodule sync && \ + git submodule update -j 16 --init --recursive --depth 1 $use_git" +STATUS=0 +for pid in ${bg_pids[*]} +do + wait $pid || STATUS=1 +done +exit ${STATUS} diff --git a/src/ci/scripts/collect-cpu-stats.sh b/src/ci/scripts/collect-cpu-stats.sh new file mode 100755 index 000000000..853b4628f --- /dev/null +++ b/src/ci/scripts/collect-cpu-stats.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# Spawn a background process to collect CPU usage statistics which we'll upload +# at the end of the build. See the comments in the script here for more +# information. + +set -euo pipefail +IFS=$'\n\t' + +python3 src/ci/cpu-usage-over-time.py &> cpu-usage.csv & diff --git a/src/ci/scripts/disable-git-crlf-conversion.sh b/src/ci/scripts/disable-git-crlf-conversion.sh new file mode 100755 index 000000000..6de080a9f --- /dev/null +++ b/src/ci/scripts/disable-git-crlf-conversion.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Disable automatic line ending conversion, which is enabled by default on +# GitHub's Windows image. Having the conversion enabled caused regressions both +# in our test suite (it broke miri tests) and in the ecosystem, since we +# started shipping install scripts with CRLF endings instead of the old LF. +# +# Note that we do this a couple times during the build as the PATH and current +# user/directory change, e.g. when mingw is enabled. + +set -euo pipefail +IFS=$'\n\t' + +git config --replace-all --global core.autocrlf false diff --git a/src/ci/scripts/dump-environment.sh b/src/ci/scripts/dump-environment.sh new file mode 100755 index 000000000..c6774b52a --- /dev/null +++ b/src/ci/scripts/dump-environment.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# This script dumps information about the build environment to stdout. + +set -euo pipefail +IFS=$'\n\t' + +echo "environment variables:" +printenv | sort +echo + +echo "disk usage:" +df -h +echo + +echo "biggest files in the working dir:" +set +o pipefail +du . | sort -nr | head -n100 +set -o pipefail +echo diff --git a/src/ci/scripts/enable-docker-ipv6.sh b/src/ci/scripts/enable-docker-ipv6.sh new file mode 100755 index 000000000..03d5a75e2 --- /dev/null +++ b/src/ci/scripts/enable-docker-ipv6.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Looks like docker containers have IPv6 disabled by default, so let's turn it +# on since libstd tests require it + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isLinux; then + sudo mkdir -p /etc/docker + echo '{"ipv6":true,"fixed-cidr-v6":"fd9a:8454:6789:13f7::/64"}' \ + | sudo tee /etc/docker/daemon.json + sudo service docker restart +fi diff --git a/src/ci/scripts/install-awscli.sh b/src/ci/scripts/install-awscli.sh new file mode 100755 index 000000000..3d8f0de7a --- /dev/null +++ b/src/ci/scripts/install-awscli.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# This script downloads and installs awscli from the packages mirrored in our +# own S3 bucket. This follows the recommendations at: +# +# https://packaging.python.org/guides/index-mirrors-and-caches/#caching-with-pip +# +# To create a new mirrored copy you can run the command: +# +# pip wheel awscli +# +# Before compressing please make sure all the wheels end with `-none-any.whl`. +# If that's not the case you'll need to remove the non-cross-platform ones and +# replace them with the .tar.gz downloaded from https://pypi.org. Also make +# sure it's possible to call this script with both Python 2 and Python 3. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +MIRROR="${MIRRORS_BASE}/2019-07-27-awscli.tar" +DEPS_DIR="/tmp/awscli-deps" + +pip="pip" +pipflags="" +if isLinux; then + pip="pip3" + pipflags="--user" + + sudo apt-get install -y python3-setuptools python3-wheel + ciCommandAddPath "${HOME}/.local/bin" +fi + +mkdir -p "${DEPS_DIR}" +curl "${MIRROR}" | tar xf - -C "${DEPS_DIR}" +"${pip}" install ${pipflags} --no-index "--find-links=${DEPS_DIR}" awscli +rm -rf "${DEPS_DIR}" diff --git a/src/ci/scripts/install-clang.sh b/src/ci/scripts/install-clang.sh new file mode 100755 index 000000000..0bc8a0389 --- /dev/null +++ b/src/ci/scripts/install-clang.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# ignore-tidy-linelength +# This script installs clang on the local machine. Note that we don't install +# clang on Linux since its compiler story is just so different. Each container +# has its own toolchain configured appropriately already. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +# Update both macOS's and Windows's tarballs when bumping the version here. +LLVM_VERSION="14.0.5" + +if isMacOS; then + # If the job selects a specific Xcode version, use that instead of + # downloading our own version. + if [[ ${USE_XCODE_CLANG-0} -eq 1 ]]; then + bindir="$(xcode-select --print-path)/Toolchains/XcodeDefault.xctoolchain/usr/bin" + else + file="${MIRRORS_BASE}/clang%2Bllvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz" + retry curl -f "${file}" -o "clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz" + tar xJf "clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz" + bindir="$(pwd)/clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin/bin" + fi + + ciCommandSetEnv CC "${bindir}/clang" + ciCommandSetEnv CXX "${bindir}/clang++" + + # macOS 10.15 onwards doesn't have libraries in /usr/include anymore: those + # are now located deep into the filesystem, under Xcode's own files. The + # native clang is configured to use the correct path, but our custom one + # doesn't. This sets the SDKROOT environment variable to the SDK so that + # our own clang can figure out the correct include path on its own. + ciCommandSetEnv SDKROOT "$(xcrun --sdk macosx --show-sdk-path)" + + # Configure `AR` specifically so rustbuild doesn't try to infer it as + # `clang-ar` by accident. + ciCommandSetEnv AR "ar" +elif isWindows && [[ ${CUSTOM_MINGW-0} -ne 1 ]]; then + + if [[ ${WINDOWS_SDK_20348_HACK-0} -eq 1 ]]; then + rm -rf '/c/Program Files (x86)/Windows Kits/10/include/10.0.20348.0' + mv '/c/Program Files (x86)/Windows Kits/10/include/'10.0.{19041,20348}.0 + fi + + # If we're compiling for MSVC then we, like most other distribution builders, + # switch to clang as the compiler. This'll allow us eventually to enable LTO + # amongst LLVM and rustc. Note that we only do this on MSVC as I don't think + # clang has an output mode compatible with MinGW that we need. If it does we + # should switch to clang for MinGW as well! + # + # The LLVM installer is an NSIS installer, which we can extract with 7z. We + # don't want to run the installer directly; extracting it is more reliable + # in CI environments. + + mkdir -p citools/clang-rust + cd citools + retry curl -f "${MIRRORS_BASE}/LLVM-${LLVM_VERSION}-win64.exe" \ + -o "LLVM-${LLVM_VERSION}-win64.exe" + 7z x -oclang-rust/ "LLVM-${LLVM_VERSION}-win64.exe" + ciCommandSetEnv RUST_CONFIGURE_ARGS \ + "${RUST_CONFIGURE_ARGS} --set llvm.clang-cl=$(pwd)/clang-rust/bin/clang-cl.exe" +fi + +if isWindows; then + # GitHub image 20210928.2 added LLVM, but it is broken (and we don't want + # to use it anyways). + rm -rf /c/Program\ Files/LLVM +fi diff --git a/src/ci/scripts/install-mingw.sh b/src/ci/scripts/install-mingw.sh new file mode 100755 index 000000000..1685fbbbb --- /dev/null +++ b/src/ci/scripts/install-mingw.sh @@ -0,0 +1,72 @@ +#!/bin/bash +# If we need to download a custom MinGW, do so here and set the path +# appropriately. +# +# Here we also do a pretty heinous thing which is to mangle the MinGW +# installation we just downloaded. Currently, as of this writing, we're using +# MinGW-w64 builds of gcc, and that's currently at 6.3.0. We use 6.3.0 as it +# appears to be the first version which contains a fix for #40546, builds +# randomly failing during LLVM due to ar.exe/ranlib.exe failures. +# +# Unfortunately, though, 6.3.0 *also* is the first version of MinGW-w64 builds +# to contain a regression in gdb (#40184). As a result if we were to use the +# gdb provided (7.11.1) then we would fail all debuginfo tests. +# +# In order to fix spurious failures (pretty high priority) we use 6.3.0. To +# avoid disabling gdb tests we download an *old* version of gdb, specifically +# that found inside the 6.2.0 distribution. We then overwrite the 6.3.0 gdb +# with the 6.2.0 gdb to get tests passing. +# +# Note that we don't literally overwrite the gdb.exe binary because it appears +# to just use gdborig.exe, so that's the binary we deal with instead. +# +# Otherwise install MinGW through `pacman` + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +MINGW_ARCHIVE_32="i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z" +MINGW_ARCHIVE_64="x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z" + +if isWindows; then + case "${CI_JOB_NAME}" in + *i686*) + bits=32 + arch=i686 + mingw_archive="${MINGW_ARCHIVE_32}" + ;; + *x86_64*) + bits=64 + arch=x86_64 + mingw_archive="${MINGW_ARCHIVE_64}" + ;; + *aarch64*) + # aarch64 is a cross-compiled target. Use the x86_64 + # mingw, since that's the host architecture. + bits=64 + arch=x86_64 + mingw_archive="${MINGW_ARCHIVE_64}" + ;; + *) + echo "src/ci/scripts/install-mingw.sh can't detect the builder's architecture" + echo "please tweak it to recognize the builder named '${CI_JOB_NAME}'" + exit 1 + ;; + esac + + if [[ "${CUSTOM_MINGW-0}" -ne 1 ]]; then + pacman -S --noconfirm --needed mingw-w64-$arch-toolchain mingw-w64-$arch-cmake \ + mingw-w64-$arch-gcc \ + mingw-w64-$arch-python # the python package is actually for python3 + ciCommandAddPath "$(ciCheckoutPath)/msys2/mingw${bits}/bin" + else + mingw_dir="mingw${bits}" + + curl -o mingw.7z "${MIRRORS_BASE}/${mingw_archive}" + 7z x -y mingw.7z > /dev/null + curl -o "${mingw_dir}/bin/gdborig.exe" "${MIRRORS_BASE}/2017-04-20-${bits}bit-gdborig.exe" + ciCommandAddPath "$(pwd)/${mingw_dir}/bin" + fi +fi diff --git a/src/ci/scripts/install-msys2.sh b/src/ci/scripts/install-msys2.sh new file mode 100755 index 000000000..0aa4b42a6 --- /dev/null +++ b/src/ci/scripts/install-msys2.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Download and install MSYS2, needed primarily for the test suite (run-make) but +# also used by the MinGW toolchain for assembling things. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isWindows; then + msys2Path="c:/msys64" + mkdir -p "${msys2Path}/home/${USERNAME}" + ciCommandAddPath "${msys2Path}/usr/bin" + + # Detect the native Python version installed on the agent. On GitHub + # Actions, the C:\hostedtoolcache\windows\Python directory contains a + # subdirectory for each installed Python version. + # + # The -V flag of the sort command sorts the input by version number. + native_python_version="$(ls /c/hostedtoolcache/windows/Python | sort -Vr | head -n 1)" + + # Make sure we use the native python interpreter instead of some msys equivalent + # one way or another. The msys interpreters seem to have weird path conversions + # baked in which break LLVM's build system one way or another, so let's use the + # native version which keeps everything as native as possible. + python_home="/c/hostedtoolcache/windows/Python/${native_python_version}/x64" + if ! [[ -f "${python_home}/python3.exe" ]]; then + cp "${python_home}/python.exe" "${python_home}/python3.exe" + fi + ciCommandAddPath "C:\\hostedtoolcache\\windows\\Python\\${native_python_version}\\x64" + ciCommandAddPath "C:\\hostedtoolcache\\windows\\Python\\${native_python_version}\\x64\\Scripts" +fi diff --git a/src/ci/scripts/install-ninja.sh b/src/ci/scripts/install-ninja.sh new file mode 100755 index 000000000..b8261d8a6 --- /dev/null +++ b/src/ci/scripts/install-ninja.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Note that this is originally from the github releases patch of Ninja + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isWindows; then + mkdir ninja + curl -o ninja.zip "${MIRRORS_BASE}/2017-03-15-ninja-win.zip" + 7z x -oninja ninja.zip + rm ninja.zip + ciCommandSetEnv "RUST_CONFIGURE_ARGS" "${RUST_CONFIGURE_ARGS} --enable-ninja" + ciCommandAddPath "$(pwd)/ninja" +fi diff --git a/src/ci/scripts/install-sccache.sh b/src/ci/scripts/install-sccache.sh new file mode 100755 index 000000000..e143152f3 --- /dev/null +++ b/src/ci/scripts/install-sccache.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# This script installs sccache on the local machine. Note that we don't install +# sccache on Linux since it's installed elsewhere through all the containers. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isMacOS; then + curl -fo /usr/local/bin/sccache "${MIRRORS_BASE}/2021-08-25-sccache-v0.2.15-x86_64-apple-darwin" + chmod +x /usr/local/bin/sccache +elif isWindows; then + mkdir -p sccache + curl -fo sccache/sccache.exe "${MIRRORS_BASE}/2018-04-26-sccache-x86_64-pc-windows-msvc" + ciCommandAddPath "$(pwd)/sccache" +fi + +# FIXME: we should probably install sccache outside the containers and then +# mount it inside the containers so we can centralize all installation here. diff --git a/src/ci/scripts/install-wix.sh b/src/ci/scripts/install-wix.sh new file mode 100755 index 000000000..688f1a49c --- /dev/null +++ b/src/ci/scripts/install-wix.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# We use the WIX toolset to create combined installers for Windows, and these +# binaries are downloaded from https://github.com/wixtoolset/wix3 originally + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isWindows; then + ciCommandSetEnv WIX "$(pwd)/wix" + + curl -O "${MIRRORS_BASE}/wix311-binaries.zip" + mkdir -p wix/bin + cd wix/bin + 7z x ../../wix311-binaries.zip +fi diff --git a/src/ci/scripts/run-build-from-ci.sh b/src/ci/scripts/run-build-from-ci.sh new file mode 100755 index 000000000..c02117f45 --- /dev/null +++ b/src/ci/scripts/run-build-from-ci.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Start the CI build. You shouldn't run this locally: call either src/ci/run.sh +# or src/ci/docker/run.sh instead. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +export CI="true" +export SRC=. + +# Remove any preexisting rustup installation since it can interfere +# with the cargotest step and its auto-detection of things like Clippy in +# the environment +rustup self uninstall -y || true +if [ -z "${IMAGE+x}" ]; then + src/ci/run.sh +else + src/ci/docker/run.sh "${IMAGE}" +fi diff --git a/src/ci/scripts/setup-environment.sh b/src/ci/scripts/setup-environment.sh new file mode 100755 index 000000000..0bc35f932 --- /dev/null +++ b/src/ci/scripts/setup-environment.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# This script guesses some environment variables based on the builder name and +# the current platform, to reduce the amount of variables defined in the CI +# configuration. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +# Load extra environment variables +vars="${EXTRA_VARIABLES-}" +echo "${vars}" | jq '' >/dev/null # Validate JSON and exit on errors +for key in $(echo "${vars}" | jq "keys[]" -r); do + # On Windows, for whatever reason, $key contains the BOM character in it, + # and that messes up `jq ".${key}"`. This line strips the BOM from the key. + # + # https://unix.stackexchange.com/a/381263 + key="$(echo "${key}" | sed '1s/^\xEF\xBB\xBF//')" + + echo "adding extra environment variable ${key}" + value="$(echo "${vars}" | jq ".${key}" -r)" + export "${key}"="${value}" + ciCommandSetEnv "${key}" "${value}" +done + +# Builders starting with `dist-` are dist builders, but if they also end with +# `-alt` they are alternate dist builders. +if [[ "${CI_JOB_NAME}" = dist-* ]]; then + if [[ "${CI_JOB_NAME}" = *-alt ]]; then + echo "alternate dist builder detected, setting DEPLOY_ALT=1" + ciCommandSetEnv DEPLOY_ALT 1 + else + echo "normal dist builder detected, setting DEPLOY=1" + ciCommandSetEnv DEPLOY 1 + fi +fi + +# All the Linux builds happen inside Docker. +if isLinux; then + if [[ -z "${IMAGE+x}" ]]; then + echo "linux builder detected, using docker to run the build" + ciCommandSetEnv IMAGE "${CI_JOB_NAME}" + else + echo "a custom docker image is already set" + fi +fi diff --git a/src/ci/scripts/should-skip-this.sh b/src/ci/scripts/should-skip-this.sh new file mode 100755 index 000000000..c863f1b68 --- /dev/null +++ b/src/ci/scripts/should-skip-this.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Set the SKIP_JOB environment variable if this job is supposed to only run +# when submodules are updated and they were not. The following time consuming +# tasks will be skipped when the environment variable is present. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if [[ -n "${CI_ONLY_WHEN_SUBMODULES_CHANGED-}" ]]; then + git fetch "https://github.com/$GITHUB_REPOSITORY" "$GITHUB_BASE_REF" + BASE_COMMIT="$(git merge-base FETCH_HEAD HEAD)" + + echo "Searching for toolstate changes between $BASE_COMMIT and $(git rev-parse HEAD)" + + if git diff "$BASE_COMMIT" | grep --quiet "^index .* 160000"; then + # Submodules pseudo-files inside git have the 160000 permissions, so when + # those files are present in the diff a submodule was updated. + echo "Submodules were updated" + elif ! git diff --quiet "$BASE_COMMIT" -- src/tools/clippy src/tools/rustfmt; then + # There is not an easy blanket search for subtrees. For now, manually list + # the subtrees. + echo "Clippy or rustfmt subtrees were updated" + elif ! (git diff --quiet "$BASE_COMMIT" -- \ + src/test/rustdoc-gui \ + src/librustdoc \ + src/ci/docker/host-x86_64/x86_64-gnu-tools/Dockerfile \ + src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version \ + src/tools/rustdoc-gui); then + # There was a change in either rustdoc or in its GUI tests. + echo "Rustdoc was updated" + else + echo "Not executing this job since no submodules nor subtrees were updated" + ciCommandSetEnv SKIP_JOB 1 + exit 0 + fi +fi + +if [[ -n "${CI_ONLY_WHEN_CHANNEL-}" ]]; then + if [[ "${CI_ONLY_WHEN_CHANNEL}" = "$(cat src/ci/channel)" ]]; then + echo "The channel is the expected one" + else + echo "Not executing this job as the channel is not the expected one" + ciCommandSetEnv SKIP_JOB 1 + exit 0 + fi +fi + + +echo "Executing the job since there is no skip rule preventing the execution" +exit 0 diff --git a/src/ci/scripts/upload-artifacts.sh b/src/ci/scripts/upload-artifacts.sh new file mode 100755 index 000000000..ffa1859fc --- /dev/null +++ b/src/ci/scripts/upload-artifacts.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Upload all the artifacts to our S3 bucket. All the files inside ${upload_dir} +# will be uploaded to the deploy bucket and eventually signed and released in +# static.rust-lang.org. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +upload_dir="$(mktemp -d)" + +build_dir=build +if isLinux; then + build_dir=obj/build +fi + +# Release tarballs produced by a dist builder. +if [[ "${DEPLOY-0}" -eq "1" ]] || [[ "${DEPLOY_ALT-0}" -eq "1" ]]; then + dist_dir="${build_dir}/dist" + rm -rf "${dist_dir}/doc" + cp -r "${dist_dir}"/* "${upload_dir}" +fi + +# CPU usage statistics. +cp cpu-usage.csv "${upload_dir}/cpu-${CI_JOB_NAME}.csv" + +# Build metrics generated by x.py. +cp "${build_dir}/metrics.json" "${upload_dir}/metrics-${CI_JOB_NAME}.json" + +# Toolstate data. +if [[ -n "${DEPLOY_TOOLSTATES_JSON+x}" ]]; then + cp /tmp/toolstate/toolstates.json "${upload_dir}/${DEPLOY_TOOLSTATES_JSON}" +fi + +echo "Files that will be uploaded:" +ls -lah "${upload_dir}" +echo + +deploy_dir="rustc-builds" +if [[ "${DEPLOY_ALT-0}" -eq "1" ]]; then + deploy_dir="rustc-builds-alt" +fi +deploy_url="s3://${DEPLOY_BUCKET}/${deploy_dir}/$(ciCommit)" + +retry aws s3 cp --storage-class INTELLIGENT_TIERING \ + --no-progress --recursive --acl public-read "${upload_dir}" "${deploy_url}" diff --git a/src/ci/scripts/verify-backported-commits.sh b/src/ci/scripts/verify-backported-commits.sh new file mode 100755 index 000000000..d3da6d1ac --- /dev/null +++ b/src/ci/scripts/verify-backported-commits.sh @@ -0,0 +1,142 @@ +#!/bin/bash +# Ensure commits in beta are in master & commits in stable are in beta + master. +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +# We don't care about commits that predate this automation check, so we pass a +# `<limit>` argument to `git cherry`. +BETA_LIMIT="53fd98ca776cb875bc9e5514f56b52eb74f9e7a9" +STABLE_LIMIT="a178d0322ce20e33eac124758e837cbd80a6f633" + +verify_backported_commits_main() { + ci_base_branch=$(ciBaseBranch) + + if [[ "$ci_base_branch" != "beta" && "$ci_base_branch" != "stable" ]]; then + echo 'Skipping. This is only run when merging to the beta or stable branches.' + exit 0 + fi + + if [[ $ci_base_branch == "beta" ]]; then + verify_cherries master "$BETA_LIMIT" \ + || exit 1 + + elif [[ $ci_base_branch == "stable" ]]; then + (verify_cherries master "$STABLE_LIMIT" \ + & verify_cherries beta "$STABLE_LIMIT") \ + || exit 1 + + fi +} + +# Verify all commits in `HEAD` are backports of a commit in <upstream>. See +# https://git-scm.com/docs/git-cherry for an explanation of the arguments. +# +# $1 = <upstream> +# $2 = <limit> +verify_cherries() { + # commits that lack a `backport-of` comment. + local no_backports=() + # commits with an incorrect `backport-of` comment. + local bad_backports=() + + commits=$(git cherry "origin/$1" HEAD "$2") + + if [[ -z "$commits" ]]; then + echo "All commits in \`HEAD\` are present in \`$1\`" + return 0 + fi + + commits=$(echo "$commits" | grep '^\+' | cut -c 3-) + + while read sha; do + # Check each commit in <current>..<upstream> + backport_sha=$(get_backport "$sha") + + if [[ "$backport_sha" == "nothing" ]]; then + echo "✓ \`$sha\` backports nothing" + continue + fi + + if [[ -z "$backport_sha" ]]; then + no_backports+=("$sha") + continue + fi + + if ! is_in_master "$backport_sha"; then + bad_backports+=("$sha") + continue + fi + + echo "✓ \`$sha\` backports \`$backport_sha\`" + done <<< "$commits" + + failure=0 + + if [ ${#no_backports[@]} -ne 0 ]; then + echo 'Error: Could not find backports for all commits.' + echo + echo 'All commits in \`HEAD\` are required to have a corresponding upstream commit.' + echo 'It looks like the following commits:' + echo + for commit in "${no_backports[@]}"; do + echo " $commit" + done + echo + echo "do not match any commits in \`$1\`. If this was intended, add the text" + echo '\`backport-of: <SHA of a commit already in master>\`' + echo 'somewhere in the message of each of these commits.' + echo + failure=1 + fi + + if [ ${#bad_backports[@]} -ne 0 ]; then + echo 'Error: Found incorrectly marked commits.' + echo + echo 'The following commits:' + echo + for commit in "${bad_backports[@]}"; do + echo " $commit" + done + echo + echo 'have commit messages marked \`backport-of: <SHA>\`, but the SHA is not in' + echo '\`master\`.' + echo + failure=1 + fi + + return $failure +} + +# Get the backport of a commit. It echoes one of: +# +# 1. A SHA of the backported commit +# 2. The string "nothing" +# 3. An empty string +# +# $1 = <sha> +get_backport() { + # This regex is: + # + # ^.* - throw away any extra starting characters + # backport-of: - prefix + # \s\? - optional space + # \(\) - capture group + # [a-f0-9]\+\|nothing - a SHA or the text 'nothing' + # .* - throw away any extra ending characters + # \1 - replace it with the first match + # {s//\1/p;q} - print the first occurrence and quit + # + git show -s --format=%B "$1" \ + | sed -n '/^.*backport-of:\s\?\([a-f0-9]\+\|nothing\).*/{s//\1/p;q}' +} + +# Check if a commit is in master. +# +# $1 = <sha> +is_in_master() { + git merge-base --is-ancestor "$1" origin/master 2> /dev/null +} + +verify_backported_commits_main diff --git a/src/ci/scripts/verify-channel.sh b/src/ci/scripts/verify-channel.sh new file mode 100755 index 000000000..cd28748a4 --- /dev/null +++ b/src/ci/scripts/verify-channel.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# We want to make sure all PRs are targeting the right branch when they're +# opened, otherwise we risk (for example) to land a beta-specific change to the +# master branch. This script ensures the branch of the PR matches the channel. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isCiBranch auto || isCiBranch try || isCiBranch try-perf; then + echo "channel verification is only executed on PR builds" + exit +fi + +channel=$(cat "$(ciCheckoutPath)/src/ci/channel") +case "${channel}" in + nightly) + channel_branch="master" + ;; + beta) + channel_branch="beta" + ;; + stable) + channel_branch="stable" + ;; + *) + echo "error: unknown channel defined in src/ci/channel: ${channel}" + exit 1 +esac + +branch="$(ciBaseBranch)" +if [[ "${branch}" != "${channel_branch}" ]]; then + echo "error: PRs changing the \`${channel}\` channel should be sent to the \ +\`${channel_branch}\` branch!" + + exit 1 +fi diff --git a/src/ci/scripts/verify-line-endings.sh b/src/ci/scripts/verify-line-endings.sh new file mode 100755 index 000000000..f3cac13ea --- /dev/null +++ b/src/ci/scripts/verify-line-endings.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# See also the disable for autocrlf, this just checks that it worked. +# +# We check both in rust-lang/rust and in a submodule to make sure both are +# accurate. Submodules are checked out significantly later than the main +# repository in this script, so settings can (and do!) change between then. +# +# Linux (and maybe macOS) builders don't currently have dos2unix so just only +# run this step on Windows. + +set -euo pipefail +IFS=$'\n\t' + +source "$(cd "$(dirname "$0")" && pwd)/../shared.sh" + +if isWindows; then + # print out the git configuration so we can better investigate failures in + # the following + git config --list --show-origin + dos2unix -ih Cargo.lock src/tools/rust-installer/install-template.sh + endings=$(dos2unix -ic Cargo.lock src/tools/rust-installer/install-template.sh) + # if endings has non-zero length, error out + if [ -n "$endings" ]; then exit 1 ; fi +fi diff --git a/src/ci/scripts/verify-stable-version-number.sh b/src/ci/scripts/verify-stable-version-number.sh new file mode 100755 index 000000000..82eb3833c --- /dev/null +++ b/src/ci/scripts/verify-stable-version-number.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# On the stable channel, check whether we're trying to build artifacts with the +# same version number of a release that's already been published, and fail the +# build if that's the case. +# +# It's a mistake whenever that happens: the release process won't start if it +# detects a duplicate version number, and the artifacts would have to be +# rebuilt anyway. + +set -euo pipefail +IFS=$'\n\t' + +if [[ "$(cat src/ci/channel)" != "stable" ]]; then + echo "This script only works on the stable channel. Skipping the check." + exit 0 +fi + +version="$(cat src/version)" +url="https://static.rust-lang.org/dist/channel-rust-${version}.toml" + +if curl --silent --fail "${url}" >/dev/null; then + echo "The version number ${version} matches an existing release." + echo + echo "If you're trying to prepare a point release, remember to change the" + echo "version number in the src/version file." + exit 1 +else + echo "The version number ${version} does not match any released version!" + exit 0 +fi |