summaryrefslogtreecommitdiffstats
path: root/build/build-clang
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /build/build-clang
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'build/build-clang')
-rw-r--r--build/build-clang/README57
-rw-r--r--build/build-clang/android-mangling-error.patch34
-rw-r--r--build/build-clang/bug47258-extract-symbols-mbcs.patch13
-rwxr-xr-xbuild/build-clang/build-clang.py1067
-rw-r--r--build/build-clang/clang-10-linux64.json27
-rw-r--r--build/build-clang/clang-11-android.json55
-rw-r--r--build/build-clang/clang-11-linux64-aarch64-cross.json21
-rw-r--r--build/build-clang/clang-11-linux64.json24
-rw-r--r--build/build-clang/clang-11-macosx64.json22
-rwxr-xr-xbuild/build-clang/clang-11-mingw.json14
-rw-r--r--build/build-clang/clang-11-win64-2stage.json14
-rw-r--r--build/build-clang/clang-11-win64.json18
-rw-r--r--build/build-clang/clang-5.0-linux64.json12
-rw-r--r--build/build-clang/clang-7-linux64.json19
-rw-r--r--build/build-clang/clang-linux64.json28
-rw-r--r--build/build-clang/clang-tidy-ci.patch26
-rw-r--r--build/build-clang/clang-tidy-external-linux64.json17
-rw-r--r--build/build-clang/clang-tidy-linux64.json16
-rw-r--r--build/build-clang/clang-tidy-macosx64.json23
-rw-r--r--build/build-clang/clang-tidy-no-errors.patch12
-rw-r--r--build/build-clang/clang-tidy-win64.json15
-rw-r--r--build/build-clang/compiler-rt-cross-compile.patch15
-rw-r--r--build/build-clang/compiler-rt-no-codesign.patch21
-rw-r--r--build/build-clang/critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch75
-rw-r--r--build/build-clang/downgrade-mangling-error.patch23
-rw-r--r--build/build-clang/find_symbolizer_linux.patch58
-rw-r--r--build/build-clang/find_symbolizer_linux_clang_10.patch58
-rw-r--r--build/build-clang/llvmorg-11-init-15486-gfc937806efd-dont-jump-to-landing-pads.patch100
-rw-r--r--build/build-clang/llvmorg-11-init-4265-g2dcbdba8540.patch106
-rw-r--r--build/build-clang/llvmorg-11-init-4265-g2dcbdba8540_clang_10.patch106
-rw-r--r--build/build-clang/llvmorg-12-init-10926-gb79e990f401-LTO-new-pass-manager.patch66
-rw-r--r--build/build-clang/loosen-msvc-detection.patch22
-rw-r--r--build/build-clang/r350774.patch14
-rw-r--r--build/build-clang/rG7e18aeba5062.patch255
-rw-r--r--build/build-clang/rG7e18aeba5062_clang_10.patch249
-rw-r--r--build/build-clang/rename_gcov_flush.patch40
-rw-r--r--build/build-clang/rename_gcov_flush_7.patch14
-rw-r--r--build/build-clang/rename_gcov_flush_clang_10.patch42
-rw-r--r--build/build-clang/rename_gcov_flush_clang_11.patch26
-rw-r--r--build/build-clang/revert-r362047-and-r362065.patch62
-rw-r--r--build/build-clang/static-llvm-symbolizer.patch12
-rw-r--r--build/build-clang/tsan-hang-be41a98ac222.patch100
-rw-r--r--build/build-clang/tsan-hang-be41a98ac222_clang_10.patch100
-rw-r--r--build/build-clang/unpoison-thread-stacks.patch62
-rw-r--r--build/build-clang/unpoison-thread-stacks_clang_10.patch64
45 files changed, 3224 insertions, 0 deletions
diff --git a/build/build-clang/README b/build/build-clang/README
new file mode 100644
index 0000000000..8906886b55
--- /dev/null
+++ b/build/build-clang/README
@@ -0,0 +1,57 @@
+build-clang.py
+==============
+
+A script to build clang from source.
+
+```
+usage: build-clang.py [-h] -c CONFIG [--clean]
+
+optional arguments:
+ -h, --help show this help message and exit
+ -c CONFIG, --config CONFIG
+ Clang configuration file
+ --clean Clean the build directory
+```
+
+Pre-requisites
+--------------
+* Working build toolchain.
+* git
+* CMake
+* Ninja
+* Python 2.7 and 3
+
+Please use the latest available CMake for your platform to avoid surprises.
+
+Config file format
+------------------
+
+build-clang.py accepts a JSON config format with the following fields:
+
+* stages: Use 1, 2, 3 or 4 to select different compiler stages. The default is 3.
+* python_path: Path to the Python 2.7 installation on the machine building clang.
+* gcc_dir: Path to the gcc toolchain installation, only required on Linux.
+* cc: Path to the bootsraping C Compiler.
+* cxx: Path to the bootsraping C++ Compiler.
+* as: Path to the assembler tool.
+* ar: Path to the library archiver tool.
+* ranlib: Path to the ranlib tool (optional).
+* libtool: Path to the libtool tool (optional).
+* ld: Path to the linker.
+* patches: Optional list of patches to apply.
+* build_type: The type of build to make. Supported types: Release, Debug, RelWithDebInfo or MinSizeRel.
+* build_libcxx: Whether to build with libcxx. The default is false.
+* build_clang_tidy: Whether to build clang-tidy with the Mozilla checks imported. The default is false.
+* osx_cross_compile: Whether to invoke CMake for OS X cross compile builds.
+* assertions: Whether to enable LLVM assertions. The default is false.
+* pgo: Whether to build with PGO (requires stages == 4). The default is false.
+
+The revisions are defined in taskcluster/ci/fetch/toolchains.yml. They are usually commit sha1s corresponding to upstream tags.
+
+Environment Variables
+---------------------
+
+The following environment variables are used for cross-compile builds targeting OS X on Linux.
+
+* CROSS_CCTOOLS_PATH: Path to the cctools directory where the cross compiler toolchain is located.
+* CROSS_SYSROOT: Path to the OS X SDK directory for cross compile builds.
diff --git a/build/build-clang/android-mangling-error.patch b/build/build-clang/android-mangling-error.patch
new file mode 100644
index 0000000000..af32f59c05
--- /dev/null
+++ b/build/build-clang/android-mangling-error.patch
@@ -0,0 +1,34 @@
+Workaround segfault in clang's mangling code that is tickled when
+attempting to mangle the declaration:
+ std:__ndk1::__find_detail::__find_exactly_one_checked::__matches
+in the <tuple> header in the Android NDK.
+This codepath is exercised by MozsearchIndexer.cpp (the searchfox
+indexer) when indexing on Android. See also
+https://bugs.llvm.org/show_bug.cgi?id=40747
+
+diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
+index 2dc04f2f3d8..054fc27003d 100644
+--- a/clang/lib/AST/ItaniumMangle.cpp
++++ b/clang/lib/AST/ItaniumMangle.cpp
+@@ -3495,16 +3495,21 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
+ // ::= <expr-primary>
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ // ::= L <type <value float> E # floating literal
+ // ::= L <mangled-name> E # external name
+ // ::= fpT # 'this' expression
+ QualType ImplicitlyConvertedToType;
+
+ recurse:
++ if (!E) {
++ Out << "MOZ_WE_HACKED_AROUND_BUG_1500941";
++ return;
++ }
++
+ switch (E->getStmtClass()) {
+ case Expr::NoStmtClass:
+ #define ABSTRACT_STMT(Type)
+ #define EXPR(Type, Base)
+ #define STMT(Type, Base) \
+ case Expr::Type##Class:
+ #include "clang/AST/StmtNodes.inc"
+ // fallthrough
diff --git a/build/build-clang/bug47258-extract-symbols-mbcs.patch b/build/build-clang/bug47258-extract-symbols-mbcs.patch
new file mode 100644
index 0000000000..69a95df072
--- /dev/null
+++ b/build/build-clang/bug47258-extract-symbols-mbcs.patch
@@ -0,0 +1,13 @@
+diff --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py
+index 43f603963a2..01fe10d36f0 100755
+--- a/llvm/utils/extract_symbols.py
++++ b/llvm/utils/extract_symbols.py
+@@ -32,7 +32,7 @@ import argparse
+ def dumpbin_get_symbols(lib):
+ process = subprocess.Popen(['dumpbin','/symbols',lib], bufsize=1,
+ stdout=subprocess.PIPE, stdin=subprocess.PIPE,
+- universal_newlines=True)
++ universal_newlines=True, encoding='mbcs')
+ process.stdin.close()
+ for line in process.stdout:
+ # Look for external symbols that are defined in some section
diff --git a/build/build-clang/build-clang.py b/build/build-clang/build-clang.py
new file mode 100755
index 0000000000..c935e3dfc8
--- /dev/null
+++ b/build/build-clang/build-clang.py
@@ -0,0 +1,1067 @@
+#!/usr/bin/python3
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Only necessary for flake8 to be happy...
+from __future__ import print_function
+
+import os
+import os.path
+import shutil
+import subprocess
+import platform
+import json
+import argparse
+import fnmatch
+import glob
+import errno
+import re
+import sys
+import tarfile
+from contextlib import contextmanager
+from distutils.dir_util import copy_tree
+
+from shutil import which
+
+import zstandard
+
+
+def symlink(source, link_name):
+ os_symlink = getattr(os, "symlink", None)
+ if callable(os_symlink):
+ os_symlink(source, link_name)
+ else:
+ if os.path.isdir(source):
+ # Fall back to copying the directory :(
+ copy_tree(source, link_name)
+
+
+def check_run(args):
+ print(" ".join(args), file=sys.stderr, flush=True)
+ if args[0] == "cmake":
+ # CMake `message(STATUS)` messages, as appearing in failed source code
+ # compiles, appear on stdout, so we only capture that.
+ p = subprocess.Popen(args, stdout=subprocess.PIPE)
+ lines = []
+ for line in p.stdout:
+ lines.append(line)
+ sys.stdout.write(line.decode())
+ sys.stdout.flush()
+ r = p.wait()
+ if r != 0:
+ cmake_output_re = re.compile(b'See also "(.*/CMakeOutput.log)"')
+ cmake_error_re = re.compile(b'See also "(.*/CMakeError.log)"')
+
+ def find_first_match(re):
+ for l in lines:
+ match = re.search(l)
+ if match:
+ return match
+
+ output_match = find_first_match(cmake_output_re)
+ error_match = find_first_match(cmake_error_re)
+
+ def dump_file(log):
+ with open(log, "rb") as f:
+ print("\nContents of", log, "follow\n", file=sys.stderr)
+ print(f.read(), file=sys.stderr)
+
+ if output_match:
+ dump_file(output_match.group(1))
+ if error_match:
+ dump_file(error_match.group(1))
+ else:
+ r = subprocess.call(args)
+ assert r == 0
+
+
+def run_in(path, args):
+ with chdir(path):
+ check_run(args)
+
+
+@contextmanager
+def chdir(path):
+ d = os.getcwd()
+ print('cd "%s"' % path, file=sys.stderr)
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ print('cd "%s"' % d, file=sys.stderr)
+ os.chdir(d)
+
+
+def patch(patch, srcdir):
+ patch = os.path.realpath(patch)
+ check_run(["patch", "-d", srcdir, "-p1", "-i", patch, "--fuzz=0", "-s"])
+
+
+def import_clang_tidy(source_dir, build_clang_tidy_alpha, build_clang_tidy_external):
+ clang_plugin_path = os.path.join(os.path.dirname(sys.argv[0]), "..", "clang-plugin")
+ clang_tidy_path = os.path.join(source_dir, "clang-tools-extra/clang-tidy")
+ sys.path.append(clang_plugin_path)
+ from import_mozilla_checks import do_import
+
+ import_options = {
+ "alpha": build_clang_tidy_alpha,
+ "external": build_clang_tidy_external,
+ }
+ do_import(clang_plugin_path, clang_tidy_path, import_options)
+
+
+def build_package(package_build_dir, cmake_args):
+ if not os.path.exists(package_build_dir):
+ os.mkdir(package_build_dir)
+ # If CMake has already been run, it may have been run with different
+ # arguments, so we need to re-run it. Make sure the cached copy of the
+ # previous CMake run is cleared before running it again.
+ if os.path.exists(package_build_dir + "/CMakeCache.txt"):
+ os.remove(package_build_dir + "/CMakeCache.txt")
+ if os.path.exists(package_build_dir + "/CMakeFiles"):
+ shutil.rmtree(package_build_dir + "/CMakeFiles")
+
+ run_in(package_build_dir, ["cmake"] + cmake_args)
+ run_in(package_build_dir, ["ninja", "install", "-v"])
+
+
+@contextmanager
+def updated_env(env):
+ old_env = os.environ.copy()
+ os.environ.update(env)
+ yield
+ os.environ.clear()
+ os.environ.update(old_env)
+
+
+def build_tar_package(name, base, directory):
+ name = os.path.realpath(name)
+ print("tarring {} from {}/{}".format(name, base, directory), file=sys.stderr)
+ assert name.endswith(".tar.zst")
+
+ cctx = zstandard.ZstdCompressor()
+ with open(name, "wb") as f, cctx.stream_writer(f) as z:
+ with tarfile.open(mode="w|", fileobj=z) as tf:
+ with chdir(base):
+ tf.add(directory)
+
+
+def mkdir_p(path):
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST or not os.path.isdir(path):
+ raise
+
+
+def delete(path):
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ try:
+ os.unlink(path)
+ except Exception:
+ pass
+
+
+def install_libgcc(gcc_dir, clang_dir, is_final_stage):
+ gcc_bin_dir = os.path.join(gcc_dir, "bin")
+
+ # Copy over gcc toolchain bits that clang looks for, to ensure that
+ # clang is using a consistent version of ld, since the system ld may
+ # be incompatible with the output clang produces. But copy it to a
+ # target-specific directory so a cross-compiler to Mac doesn't pick
+ # up the (Linux-specific) ld with disastrous results.
+ #
+ # Only install this for the bootstrap process; we expect any consumers of
+ # the newly-built toolchain to provide an appropriate ld themselves.
+ if not is_final_stage:
+ x64_bin_dir = os.path.join(clang_dir, "x86_64-unknown-linux-gnu", "bin")
+ mkdir_p(x64_bin_dir)
+ shutil.copy2(os.path.join(gcc_bin_dir, "ld"), x64_bin_dir)
+
+ out = subprocess.check_output(
+ [os.path.join(gcc_bin_dir, "gcc"), "-print-libgcc-file-name"]
+ )
+
+ libgcc_dir = os.path.dirname(out.decode().rstrip())
+ clang_lib_dir = os.path.join(
+ clang_dir,
+ "lib",
+ "gcc",
+ "x86_64-unknown-linux-gnu",
+ os.path.basename(libgcc_dir),
+ )
+ mkdir_p(clang_lib_dir)
+ copy_tree(libgcc_dir, clang_lib_dir, preserve_symlinks=True)
+ libgcc_dir = os.path.join(gcc_dir, "lib64")
+ clang_lib_dir = os.path.join(clang_dir, "lib")
+ copy_tree(libgcc_dir, clang_lib_dir, preserve_symlinks=True)
+ libgcc_dir = os.path.join(gcc_dir, "lib32")
+ clang_lib_dir = os.path.join(clang_dir, "lib32")
+ copy_tree(libgcc_dir, clang_lib_dir, preserve_symlinks=True)
+ include_dir = os.path.join(gcc_dir, "include")
+ clang_include_dir = os.path.join(clang_dir, "include")
+ copy_tree(include_dir, clang_include_dir, preserve_symlinks=True)
+
+
+def install_import_library(build_dir, clang_dir):
+ shutil.copy2(
+ os.path.join(build_dir, "lib", "clang.lib"), os.path.join(clang_dir, "lib")
+ )
+
+
+def install_asan_symbols(build_dir, clang_dir):
+ lib_path_pattern = os.path.join("lib", "clang", "*.*.*", "lib", "windows")
+ src_path = glob.glob(
+ os.path.join(build_dir, lib_path_pattern, "clang_rt.asan_dynamic-*.pdb")
+ )
+ dst_path = glob.glob(os.path.join(clang_dir, lib_path_pattern))
+
+ if len(src_path) != 1:
+ raise Exception("Source path pattern did not resolve uniquely")
+
+ if len(src_path) != 1:
+ raise Exception("Destination path pattern did not resolve uniquely")
+
+ shutil.copy2(src_path[0], dst_path[0])
+
+
+def is_darwin():
+ return platform.system() == "Darwin"
+
+
+def is_linux():
+ return platform.system() == "Linux"
+
+
+def is_windows():
+ return platform.system() == "Windows"
+
+
+def build_one_stage(
+ cc,
+ cxx,
+ asm,
+ ld,
+ ar,
+ ranlib,
+ libtool,
+ src_dir,
+ stage_dir,
+ package_name,
+ build_libcxx,
+ osx_cross_compile,
+ build_type,
+ assertions,
+ python_path,
+ gcc_dir,
+ libcxx_include_dir,
+ build_wasm,
+ compiler_rt_source_dir=None,
+ runtimes_source_link=None,
+ compiler_rt_source_link=None,
+ is_final_stage=False,
+ android_targets=None,
+ extra_targets=None,
+ pgo_phase=None,
+):
+ if is_final_stage and (android_targets or extra_targets):
+ # Linking compiler-rt under "runtimes" activates LLVM_RUNTIME_TARGETS
+ # and related arguments.
+ symlink(compiler_rt_source_dir, runtimes_source_link)
+ try:
+ os.unlink(compiler_rt_source_link)
+ except Exception:
+ pass
+
+ if not os.path.exists(stage_dir):
+ os.mkdir(stage_dir)
+
+ build_dir = stage_dir + "/build"
+ inst_dir = stage_dir + "/" + package_name
+
+ # cmake doesn't deal well with backslashes in paths.
+ def slashify_path(path):
+ return path.replace("\\", "/")
+
+ def cmake_base_args(cc, cxx, asm, ld, ar, ranlib, libtool, inst_dir):
+ machine_targets = "X86;ARM;AArch64" if is_final_stage else "X86"
+ cmake_args = [
+ "-GNinja",
+ "-DCMAKE_C_COMPILER=%s" % slashify_path(cc[0]),
+ "-DCMAKE_CXX_COMPILER=%s" % slashify_path(cxx[0]),
+ "-DCMAKE_ASM_COMPILER=%s" % slashify_path(asm[0]),
+ "-DCMAKE_LINKER=%s" % slashify_path(ld[0]),
+ "-DCMAKE_AR=%s" % slashify_path(ar),
+ "-DCMAKE_C_FLAGS=%s" % " ".join(cc[1:]),
+ "-DCMAKE_CXX_FLAGS=%s" % " ".join(cxx[1:]),
+ "-DCMAKE_ASM_FLAGS=%s" % " ".join(asm[1:]),
+ "-DCMAKE_EXE_LINKER_FLAGS=%s" % " ".join(ld[1:]),
+ "-DCMAKE_SHARED_LINKER_FLAGS=%s" % " ".join(ld[1:]),
+ "-DCMAKE_BUILD_TYPE=%s" % build_type,
+ "-DCMAKE_INSTALL_PREFIX=%s" % inst_dir,
+ "-DLLVM_TARGETS_TO_BUILD=%s" % machine_targets,
+ "-DLLVM_ENABLE_ASSERTIONS=%s" % ("ON" if assertions else "OFF"),
+ "-DPYTHON_EXECUTABLE=%s" % slashify_path(python_path),
+ "-DLLVM_TOOL_LIBCXX_BUILD=%s" % ("ON" if build_libcxx else "OFF"),
+ "-DLLVM_ENABLE_BINDINGS=OFF",
+ ]
+ if "TASK_ID" in os.environ:
+ cmake_args += [
+ "-DCLANG_REPOSITORY_STRING=taskcluster-%s" % os.environ["TASK_ID"],
+ ]
+ if not is_final_stage:
+ cmake_args += ["-DLLVM_ENABLE_PROJECTS=clang;compiler-rt"]
+ if build_wasm:
+ cmake_args += ["-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly"]
+ if is_linux():
+ cmake_args += ["-DLLVM_BINUTILS_INCDIR=%s/include" % gcc_dir]
+ cmake_args += ["-DLLVM_ENABLE_LIBXML2=FORCE_ON"]
+ if is_windows():
+ cmake_args.insert(-1, "-DLLVM_EXPORT_SYMBOLS_FOR_PLUGINS=ON")
+ cmake_args.insert(-1, "-DLLVM_USE_CRT_RELEASE=MT")
+ else:
+ # libllvm as a shared library is not supported on Windows
+ cmake_args += ["-DLLVM_LINK_LLVM_DYLIB=ON"]
+ if ranlib is not None:
+ cmake_args += ["-DCMAKE_RANLIB=%s" % slashify_path(ranlib)]
+ if libtool is not None:
+ cmake_args += ["-DCMAKE_LIBTOOL=%s" % slashify_path(libtool)]
+ if osx_cross_compile:
+ cmake_args += [
+ "-DCMAKE_SYSTEM_NAME=Darwin",
+ "-DCMAKE_SYSTEM_VERSION=10.10",
+ # Xray requires a OSX 10.12 SDK (https://bugs.llvm.org/show_bug.cgi?id=38959)
+ "-DCOMPILER_RT_BUILD_XRAY=OFF",
+ "-DLIBCXXABI_LIBCXX_INCLUDES=%s" % libcxx_include_dir,
+ "-DCMAKE_OSX_SYSROOT=%s" % slashify_path(os.getenv("CROSS_SYSROOT")),
+ "-DCMAKE_FIND_ROOT_PATH=%s" % slashify_path(os.getenv("CROSS_SYSROOT")),
+ "-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER",
+ "-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY",
+ "-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY",
+ "-DCMAKE_MACOSX_RPATH=ON",
+ "-DCMAKE_OSX_ARCHITECTURES=x86_64",
+ "-DDARWIN_osx_ARCHS=x86_64",
+ "-DDARWIN_osx_SYSROOT=%s" % slashify_path(os.getenv("CROSS_SYSROOT")),
+ "-DLLVM_DEFAULT_TARGET_TRIPLE=x86_64-apple-darwin",
+ ]
+ # Starting in LLVM 11 (which requires SDK 10.12) the build tries to
+ # detect the SDK version by calling xcrun. Cross-compiles don't have
+ # an xcrun, so we have to set the version explicitly.
+ if "MacOSX10.12.sdk" in os.getenv("CROSS_SYSROOT"):
+ cmake_args += [
+ "-DDARWIN_macosx_OVERRIDE_SDK_VERSION=10.12",
+ ]
+ if pgo_phase == "gen":
+ # Per https://releases.llvm.org/10.0.0/docs/HowToBuildWithPGO.html
+ cmake_args += [
+ "-DLLVM_BUILD_INSTRUMENTED=IR",
+ "-DLLVM_BUILD_RUNTIME=No",
+ ]
+ if pgo_phase == "use":
+ cmake_args += [
+ "-DLLVM_PROFDATA_FILE=%s/merged.profdata" % stage_dir,
+ ]
+ return cmake_args
+
+ cmake_args = []
+
+ runtime_targets = []
+ if is_final_stage:
+ if android_targets:
+ runtime_targets = list(sorted(android_targets.keys()))
+ if extra_targets:
+ runtime_targets.extend(sorted(extra_targets))
+
+ if runtime_targets:
+ cmake_args += [
+ "-DLLVM_BUILTIN_TARGETS=%s" % ";".join(runtime_targets),
+ "-DLLVM_RUNTIME_TARGETS=%s" % ";".join(runtime_targets),
+ ]
+
+ for target in runtime_targets:
+ cmake_args += [
+ "-DRUNTIMES_%s_COMPILER_RT_BUILD_PROFILE=ON" % target,
+ "-DRUNTIMES_%s_COMPILER_RT_BUILD_SANITIZERS=ON" % target,
+ "-DRUNTIMES_%s_COMPILER_RT_BUILD_XRAY=OFF" % target,
+ "-DRUNTIMES_%s_SANITIZER_ALLOW_CXXABI=OFF" % target,
+ "-DRUNTIMES_%s_COMPILER_RT_BUILD_LIBFUZZER=OFF" % target,
+ "-DRUNTIMES_%s_COMPILER_RT_INCLUDE_TESTS=OFF" % target,
+ "-DRUNTIMES_%s_LLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF" % target,
+ "-DRUNTIMES_%s_LLVM_INCLUDE_TESTS=OFF" % target,
+ ]
+
+ # The above code flipped switches to build various runtime libraries on
+ # Android; we now have to provide all the necessary compiler switches to
+ # make that work.
+ if is_final_stage and android_targets:
+ cmake_args += [
+ "-DLLVM_LIBDIR_SUFFIX=64",
+ ]
+
+ android_link_flags = "-fuse-ld=lld"
+
+ for target, cfg in android_targets.items():
+ sysroot_dir = cfg["ndk_sysroot"].format(**os.environ)
+ android_gcc_dir = cfg["ndk_toolchain"].format(**os.environ)
+ android_include_dirs = cfg["ndk_includes"]
+ api_level = cfg["api_level"]
+
+ android_flags = [
+ "-isystem %s" % d.format(**os.environ) for d in android_include_dirs
+ ]
+ android_flags += ["--gcc-toolchain=%s" % android_gcc_dir]
+ android_flags += ["-D__ANDROID_API__=%s" % api_level]
+
+ # Our flags go last to override any --gcc-toolchain that may have
+ # been set earlier.
+ rt_c_flags = " ".join(cc[1:] + android_flags)
+ rt_cxx_flags = " ".join(cxx[1:] + android_flags)
+ rt_asm_flags = " ".join(asm[1:] + android_flags)
+
+ for kind in ("BUILTINS", "RUNTIMES"):
+ for var, arg in (
+ ("ANDROID", "1"),
+ ("CMAKE_ASM_FLAGS", rt_asm_flags),
+ ("CMAKE_CXX_FLAGS", rt_cxx_flags),
+ ("CMAKE_C_FLAGS", rt_c_flags),
+ ("CMAKE_EXE_LINKER_FLAGS", android_link_flags),
+ ("CMAKE_SHARED_LINKER_FLAGS", android_link_flags),
+ ("CMAKE_SYSROOT", sysroot_dir),
+ ("ANDROID_NATIVE_API_LEVEL", api_level),
+ ):
+ cmake_args += ["-D%s_%s_%s=%s" % (kind, target, var, arg)]
+
+ cmake_args += cmake_base_args(cc, cxx, asm, ld, ar, ranlib, libtool, inst_dir)
+ cmake_args += [src_dir]
+ build_package(build_dir, cmake_args)
+
+ if is_linux():
+ install_libgcc(gcc_dir, inst_dir, is_final_stage)
+ # For some reasons the import library clang.lib of clang.exe is not
+ # installed, so we copy it by ourselves.
+ if is_windows():
+ # The compiler-rt cmake scripts don't allow to build it for multiple
+ # targets at once on Windows, so manually build the 32-bits compiler-rt
+ # during the final stage.
+ build_32_bit = False
+ if is_final_stage:
+ # Only build the 32-bits compiler-rt when we originally built for
+ # 64-bits, which we detect through the contents of the LIB
+ # environment variable, which we also adjust for a 32-bits build
+ # at the same time.
+ old_lib = os.environ["LIB"]
+ new_lib = []
+ for l in old_lib.split(os.pathsep):
+ if l.endswith("x64"):
+ l = l[:-3] + "x86"
+ build_32_bit = True
+ elif l.endswith("amd64"):
+ l = l[:-5]
+ build_32_bit = True
+ new_lib.append(l)
+ if build_32_bit:
+ os.environ["LIB"] = os.pathsep.join(new_lib)
+ compiler_rt_build_dir = stage_dir + "/compiler-rt"
+ compiler_rt_inst_dir = inst_dir + "/lib/clang/"
+ subdirs = os.listdir(compiler_rt_inst_dir)
+ assert len(subdirs) == 1
+ compiler_rt_inst_dir += subdirs[0]
+ cmake_args = cmake_base_args(
+ [os.path.join(inst_dir, "bin", "clang-cl.exe"), "-m32"] + cc[1:],
+ [os.path.join(inst_dir, "bin", "clang-cl.exe"), "-m32"] + cxx[1:],
+ [os.path.join(inst_dir, "bin", "clang-cl.exe"), "-m32"] + asm[1:],
+ ld,
+ ar,
+ ranlib,
+ libtool,
+ compiler_rt_inst_dir,
+ )
+ cmake_args += [
+ "-DLLVM_CONFIG_PATH=%s"
+ % slashify_path(os.path.join(inst_dir, "bin", "llvm-config")),
+ os.path.join(src_dir, "projects", "compiler-rt"),
+ ]
+ build_package(compiler_rt_build_dir, cmake_args)
+ os.environ["LIB"] = old_lib
+ if is_final_stage:
+ install_import_library(build_dir, inst_dir)
+ install_asan_symbols(build_dir, inst_dir)
+
+
+# Return the absolute path of a build tool. We first look to see if the
+# variable is defined in the config file, and if so we make sure it's an
+# absolute path to an existing tool, otherwise we look for a program in
+# $PATH named "key".
+#
+# This expects the name of the key in the config file to match the name of
+# the tool in the default toolchain on the system (for example, "ld" on Unix
+# and "link" on Windows).
+def get_tool(config, key):
+ f = None
+ if key in config:
+ f = config[key].format(**os.environ)
+ if os.path.isabs(f):
+ if not os.path.exists(f):
+ raise ValueError("%s must point to an existing path" % key)
+ return f
+
+ # Assume that we have the name of some program that should be on PATH.
+ tool = which(f) if f else which(key)
+ if not tool:
+ raise ValueError("%s not found on PATH" % (f or key))
+ return tool
+
+
+# This function is intended to be called on the final build directory when
+# building clang-tidy. Also clang-format binaries are included that can be used
+# in conjunction with clang-tidy.
+# As a separate binary we also ship clangd for the language server protocol that
+# can be used as a plugin in `vscode`.
+# Its job is to remove all of the files which won't be used for clang-tidy or
+# clang-format to reduce the download size. Currently when this function
+# finishes its job, it will leave final_dir with a layout like this:
+#
+# clang/
+# bin/
+# clang-apply-replacements
+# clang-format
+# clang-tidy
+# clangd
+# include/
+# * (nothing will be deleted here)
+# lib/
+# clang/
+# 4.0.0/
+# include/
+# * (nothing will be deleted here)
+# share/
+# clang/
+# clang-format-diff.py
+# clang-tidy-diff.py
+# run-clang-tidy.py
+def prune_final_dir_for_clang_tidy(final_dir, osx_cross_compile):
+ # Make sure we only have what we expect.
+ dirs = [
+ "bin",
+ "include",
+ "lib",
+ "lib32",
+ "libexec",
+ "msbuild-bin",
+ "share",
+ "tools",
+ ]
+ if is_linux():
+ dirs.append("x86_64-unknown-linux-gnu")
+ for f in glob.glob("%s/*" % final_dir):
+ if os.path.basename(f) not in dirs:
+ raise Exception("Found unknown file %s in the final directory" % f)
+ if not os.path.isdir(f):
+ raise Exception("Expected %s to be a directory" % f)
+
+ kept_binaries = ["clang-apply-replacements", "clang-format", "clang-tidy", "clangd"]
+ re_clang_tidy = re.compile(r"^(" + "|".join(kept_binaries) + r")(\.exe)?$", re.I)
+ for f in glob.glob("%s/bin/*" % final_dir):
+ if re_clang_tidy.search(os.path.basename(f)) is None:
+ delete(f)
+
+ # Keep include/ intact.
+
+ # Remove the target-specific files.
+ if is_linux():
+ if os.path.exists(os.path.join(final_dir, "x86_64-unknown-linux-gnu")):
+ shutil.rmtree(os.path.join(final_dir, "x86_64-unknown-linux-gnu"))
+
+ # In lib/, only keep lib/clang/N.M.O/include and the LLVM shared library.
+ re_ver_num = re.compile(r"^\d+\.\d+\.\d+$", re.I)
+ for f in glob.glob("%s/lib/*" % final_dir):
+ name = os.path.basename(f)
+ if name == "clang":
+ continue
+ if osx_cross_compile and name in ["libLLVM.dylib", "libclang-cpp.dylib"]:
+ continue
+ if is_linux() and (
+ fnmatch.fnmatch(name, "libLLVM*.so")
+ or fnmatch.fnmatch(name, "libclang-cpp.so*")
+ ):
+ continue
+ delete(f)
+ for f in glob.glob("%s/lib/clang/*" % final_dir):
+ if re_ver_num.search(os.path.basename(f)) is None:
+ delete(f)
+ for f in glob.glob("%s/lib/clang/*/*" % final_dir):
+ if os.path.basename(f) != "include":
+ delete(f)
+
+ # Completely remove libexec/, msbuild-bin and tools, if it exists.
+ shutil.rmtree(os.path.join(final_dir, "libexec"))
+ for d in ("msbuild-bin", "tools"):
+ d = os.path.join(final_dir, d)
+ if os.path.exists(d):
+ shutil.rmtree(d)
+
+ # In share/, only keep share/clang/*tidy*
+ re_clang_tidy = re.compile(r"format|tidy", re.I)
+ for f in glob.glob("%s/share/*" % final_dir):
+ if os.path.basename(f) != "clang":
+ delete(f)
+ for f in glob.glob("%s/share/clang/*" % final_dir):
+ if re_clang_tidy.search(os.path.basename(f)) is None:
+ delete(f)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-c",
+ "--config",
+ required=True,
+ type=argparse.FileType("r"),
+ help="Clang configuration file",
+ )
+ parser.add_argument(
+ "--clean", required=False, action="store_true", help="Clean the build directory"
+ )
+ parser.add_argument(
+ "--skip-tar",
+ required=False,
+ action="store_true",
+ help="Skip tar packaging stage",
+ )
+ parser.add_argument(
+ "--skip-checkout",
+ required=False,
+ action="store_true",
+ help="Do not checkout/revert source",
+ )
+
+ args = parser.parse_args()
+
+ if not os.path.exists("llvm/README.txt"):
+ raise Exception(
+ "The script must be run from the root directory of the llvm-project tree"
+ )
+ source_dir = os.getcwd()
+ build_dir = source_dir + "/build"
+
+ if args.clean:
+ shutil.rmtree(build_dir)
+ os.sys.exit(0)
+
+ llvm_source_dir = source_dir + "/llvm"
+ extra_source_dir = source_dir + "/clang-tools-extra"
+ clang_source_dir = source_dir + "/clang"
+ lld_source_dir = source_dir + "/lld"
+ compiler_rt_source_dir = source_dir + "/compiler-rt"
+ libcxx_source_dir = source_dir + "/libcxx"
+ libcxxabi_source_dir = source_dir + "/libcxxabi"
+
+ exe_ext = ""
+ if is_windows():
+ exe_ext = ".exe"
+
+ cc_name = "clang"
+ cxx_name = "clang++"
+ if is_windows():
+ cc_name = "clang-cl"
+ cxx_name = "clang-cl"
+
+ config_dir = os.path.dirname(args.config.name)
+ config = json.load(args.config)
+
+ stages = 3
+ if "stages" in config:
+ stages = int(config["stages"])
+ if stages not in (1, 2, 3, 4):
+ raise ValueError("We only know how to build 1, 2, 3, or 4 stages.")
+ pgo = False
+ if "pgo" in config:
+ pgo = config["pgo"]
+ if pgo not in (True, False):
+ raise ValueError("Only boolean values are accepted for pgo.")
+ if pgo and stages != 4:
+ raise ValueError("PGO is only supported in 4-stage builds.")
+ build_type = "Release"
+ if "build_type" in config:
+ build_type = config["build_type"]
+ if build_type not in ("Release", "Debug", "RelWithDebInfo", "MinSizeRel"):
+ raise ValueError(
+ "We only know how to do Release, Debug, RelWithDebInfo or "
+ "MinSizeRel builds"
+ )
+ build_libcxx = False
+ if "build_libcxx" in config:
+ build_libcxx = config["build_libcxx"]
+ if build_libcxx not in (True, False):
+ raise ValueError("Only boolean values are accepted for build_libcxx.")
+ build_wasm = False
+ if "build_wasm" in config:
+ build_wasm = config["build_wasm"]
+ if build_wasm not in (True, False):
+ raise ValueError("Only boolean values are accepted for build_wasm.")
+ build_clang_tidy = False
+ if "build_clang_tidy" in config:
+ build_clang_tidy = config["build_clang_tidy"]
+ if build_clang_tidy not in (True, False):
+ raise ValueError("Only boolean values are accepted for build_clang_tidy.")
+ build_clang_tidy_alpha = False
+ # check for build_clang_tidy_alpha only if build_clang_tidy is true
+ if build_clang_tidy and "build_clang_tidy_alpha" in config:
+ build_clang_tidy_alpha = config["build_clang_tidy_alpha"]
+ if build_clang_tidy_alpha not in (True, False):
+ raise ValueError(
+ "Only boolean values are accepted for build_clang_tidy_alpha."
+ )
+ build_clang_tidy_external = False
+ # check for build_clang_tidy_external only if build_clang_tidy is true
+ if build_clang_tidy and "build_clang_tidy_external" in config:
+ build_clang_tidy_external = config["build_clang_tidy_external"]
+ if build_clang_tidy_external not in (True, False):
+ raise ValueError(
+ "Only boolean values are accepted for build_clang_tidy_external."
+ )
+ osx_cross_compile = False
+ if "osx_cross_compile" in config:
+ osx_cross_compile = config["osx_cross_compile"]
+ if osx_cross_compile not in (True, False):
+ raise ValueError("Only boolean values are accepted for osx_cross_compile.")
+ if osx_cross_compile and not is_linux():
+ raise ValueError("osx_cross_compile can only be used on Linux.")
+ assertions = False
+ if "assertions" in config:
+ assertions = config["assertions"]
+ if assertions not in (True, False):
+ raise ValueError("Only boolean values are accepted for assertions.")
+ python_path = None
+ if "python_path" not in config:
+ raise ValueError("Config file needs to set python_path")
+ python_path = config["python_path"]
+ gcc_dir = None
+ if "gcc_dir" in config:
+ gcc_dir = config["gcc_dir"].format(**os.environ)
+ if not os.path.exists(gcc_dir):
+ raise ValueError("gcc_dir must point to an existing path")
+ ndk_dir = None
+ android_targets = None
+ if "android_targets" in config:
+ android_targets = config["android_targets"]
+ for attr in ("ndk_toolchain", "ndk_sysroot", "ndk_includes", "api_level"):
+ for target, cfg in android_targets.items():
+ if attr not in cfg:
+ raise ValueError(
+ "must specify '%s' as a key for android target: %s"
+ % (attr, target)
+ )
+ extra_targets = None
+ if "extra_targets" in config:
+ extra_targets = config["extra_targets"]
+ if not isinstance(extra_targets, list):
+ raise ValueError("extra_targets must be a list")
+ if not all(isinstance(t, str) for t in extra_targets):
+ raise ValueError("members of extra_targets should be strings")
+
+ if is_linux() and gcc_dir is None:
+ raise ValueError("Config file needs to set gcc_dir")
+
+ if is_darwin() or osx_cross_compile:
+ os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.12"
+
+ cc = get_tool(config, "cc")
+ cxx = get_tool(config, "cxx")
+ asm = get_tool(config, "ml" if is_windows() else "as")
+ ld = get_tool(config, "link" if is_windows() else "ld")
+ ar = get_tool(config, "lib" if is_windows() else "ar")
+ ranlib = None if is_windows() else get_tool(config, "ranlib")
+ libtool = None
+ if "libtool" in config:
+ libtool = get_tool(config, "libtool")
+
+ if not os.path.exists(source_dir):
+ os.makedirs(source_dir)
+
+ for p in config.get("patches", []):
+ patch(os.path.join(config_dir, p), source_dir)
+
+ compiler_rt_source_link = llvm_source_dir + "/projects/compiler-rt"
+
+ symlinks = [
+ (clang_source_dir, llvm_source_dir + "/tools/clang"),
+ (extra_source_dir, llvm_source_dir + "/tools/clang/tools/extra"),
+ (lld_source_dir, llvm_source_dir + "/tools/lld"),
+ (compiler_rt_source_dir, compiler_rt_source_link),
+ (libcxx_source_dir, llvm_source_dir + "/projects/libcxx"),
+ (libcxxabi_source_dir, llvm_source_dir + "/projects/libcxxabi"),
+ ]
+ for l in symlinks:
+ # On Windows, we have to re-copy the whole directory every time.
+ if not is_windows() and os.path.islink(l[1]):
+ continue
+ delete(l[1])
+ if os.path.exists(l[0]):
+ symlink(l[0], l[1])
+
+ package_name = "clang"
+ if build_clang_tidy:
+ package_name = "clang-tidy"
+ import_clang_tidy(source_dir, build_clang_tidy_alpha, build_clang_tidy_external)
+
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+
+ libcxx_include_dir = os.path.join(llvm_source_dir, "projects", "libcxx", "include")
+
+ stage1_dir = build_dir + "/stage1"
+ stage1_inst_dir = stage1_dir + "/" + package_name
+
+ final_stage_dir = stage1_dir
+ final_inst_dir = stage1_inst_dir
+
+ if is_darwin():
+ extra_cflags = []
+ extra_cxxflags = ["-stdlib=libc++"]
+ extra_cflags2 = []
+ extra_cxxflags2 = ["-stdlib=libc++"]
+ extra_asmflags = []
+ extra_ldflags = []
+ elif is_linux():
+ extra_cflags = []
+ extra_cxxflags = []
+ # When building stage2 and stage3, we want the newly-built clang to pick
+ # up whatever headers were installed from the gcc we used to build stage1,
+ # always, rather than the system headers. Providing -gcc-toolchain
+ # encourages clang to do that.
+ extra_cflags2 = ["-fPIC", "-gcc-toolchain", stage1_inst_dir]
+ # Silence clang's warnings about arguments not being used in compilation.
+ extra_cxxflags2 = [
+ "-fPIC",
+ "-Qunused-arguments",
+ "-gcc-toolchain",
+ stage1_inst_dir,
+ ]
+ extra_asmflags = []
+ # Avoid libLLVM internal function calls going through the PLT.
+ extra_ldflags = ["-Wl,-Bsymbolic-functions"]
+ # For whatever reason, LLVM's build system will set things up to turn
+ # on -ffunction-sections and -fdata-sections, but won't turn on the
+ # corresponding option to strip unused sections. We do it explicitly
+ # here. LLVM's build system is also picky about turning on ICF, so
+ # we do that explicitly here, too.
+ extra_ldflags += ["-fuse-ld=gold", "-Wl,--gc-sections", "-Wl,--icf=safe"]
+
+ if "LD_LIBRARY_PATH" in os.environ:
+ os.environ["LD_LIBRARY_PATH"] = "%s/lib64/:%s" % (
+ gcc_dir,
+ os.environ["LD_LIBRARY_PATH"],
+ )
+ else:
+ os.environ["LD_LIBRARY_PATH"] = "%s/lib64/" % gcc_dir
+ elif is_windows():
+ extra_cflags = []
+ extra_cxxflags = []
+ # clang-cl would like to figure out what it's supposed to be emulating
+ # by looking at an MSVC install, but we don't really have that here.
+ # Force things on.
+ extra_cflags2 = []
+ extra_cxxflags2 = [
+ "-fms-compatibility-version=19.15.26726",
+ "-Xclang",
+ "-std=c++14",
+ ]
+ extra_asmflags = []
+ extra_ldflags = []
+
+ if osx_cross_compile:
+ # undo the damage done in the is_linux() block above, and also simulate
+ # the is_darwin() block above.
+ extra_cflags = []
+ extra_cxxflags = ["-stdlib=libc++"]
+ extra_cxxflags2 = ["-stdlib=libc++"]
+
+ extra_flags = [
+ "-target",
+ "x86_64-apple-darwin",
+ "-mlinker-version=137",
+ "-B",
+ "%s/bin" % os.getenv("CROSS_CCTOOLS_PATH"),
+ "-isysroot",
+ os.getenv("CROSS_SYSROOT"),
+ # technically the sysroot flag there should be enough to deduce this,
+ # but clang needs some help to figure this out.
+ "-I%s/usr/include" % os.getenv("CROSS_SYSROOT"),
+ "-iframework",
+ "%s/System/Library/Frameworks" % os.getenv("CROSS_SYSROOT"),
+ ]
+ extra_cflags += extra_flags
+ extra_cxxflags += extra_flags
+ extra_cflags2 += extra_flags
+ extra_cxxflags2 += extra_flags
+ extra_asmflags += extra_flags
+ extra_ldflags = [
+ "-Wl,-syslibroot,%s" % os.getenv("CROSS_SYSROOT"),
+ "-Wl,-dead_strip",
+ ]
+
+ upload_dir = os.getenv("UPLOAD_DIR")
+ if assertions and upload_dir:
+ extra_cflags2 += ["-fcrash-diagnostics-dir=%s" % upload_dir]
+ extra_cxxflags2 += ["-fcrash-diagnostics-dir=%s" % upload_dir]
+
+ build_one_stage(
+ [cc] + extra_cflags,
+ [cxx] + extra_cxxflags,
+ [asm] + extra_asmflags,
+ [ld] + extra_ldflags,
+ ar,
+ ranlib,
+ libtool,
+ llvm_source_dir,
+ stage1_dir,
+ package_name,
+ build_libcxx,
+ osx_cross_compile,
+ build_type,
+ assertions,
+ python_path,
+ gcc_dir,
+ libcxx_include_dir,
+ build_wasm,
+ is_final_stage=(stages == 1),
+ )
+
+ runtimes_source_link = llvm_source_dir + "/runtimes/compiler-rt"
+
+ if stages >= 2:
+ stage2_dir = build_dir + "/stage2"
+ stage2_inst_dir = stage2_dir + "/" + package_name
+ final_stage_dir = stage2_dir
+ final_inst_dir = stage2_inst_dir
+ pgo_phase = "gen" if pgo else None
+ build_one_stage(
+ [stage1_inst_dir + "/bin/%s%s" % (cc_name, exe_ext)] + extra_cflags2,
+ [stage1_inst_dir + "/bin/%s%s" % (cxx_name, exe_ext)] + extra_cxxflags2,
+ [stage1_inst_dir + "/bin/%s%s" % (cc_name, exe_ext)] + extra_asmflags,
+ [ld] + extra_ldflags,
+ ar,
+ ranlib,
+ libtool,
+ llvm_source_dir,
+ stage2_dir,
+ package_name,
+ build_libcxx,
+ osx_cross_compile,
+ build_type,
+ assertions,
+ python_path,
+ gcc_dir,
+ libcxx_include_dir,
+ build_wasm,
+ compiler_rt_source_dir,
+ runtimes_source_link,
+ compiler_rt_source_link,
+ is_final_stage=(stages == 2),
+ android_targets=android_targets,
+ extra_targets=extra_targets,
+ pgo_phase=pgo_phase,
+ )
+
+ if stages >= 3:
+ stage3_dir = build_dir + "/stage3"
+ stage3_inst_dir = stage3_dir + "/" + package_name
+ final_stage_dir = stage3_dir
+ final_inst_dir = stage3_inst_dir
+ build_one_stage(
+ [stage2_inst_dir + "/bin/%s%s" % (cc_name, exe_ext)] + extra_cflags2,
+ [stage2_inst_dir + "/bin/%s%s" % (cxx_name, exe_ext)] + extra_cxxflags2,
+ [stage2_inst_dir + "/bin/%s%s" % (cc_name, exe_ext)] + extra_asmflags,
+ [ld] + extra_ldflags,
+ ar,
+ ranlib,
+ libtool,
+ llvm_source_dir,
+ stage3_dir,
+ package_name,
+ build_libcxx,
+ osx_cross_compile,
+ build_type,
+ assertions,
+ python_path,
+ gcc_dir,
+ libcxx_include_dir,
+ build_wasm,
+ compiler_rt_source_dir,
+ runtimes_source_link,
+ compiler_rt_source_link,
+ (stages == 3),
+ extra_targets=extra_targets,
+ )
+
+ if stages >= 4:
+ stage4_dir = build_dir + "/stage4"
+ stage4_inst_dir = stage4_dir + "/" + package_name
+ final_stage_dir = stage4_dir
+ final_inst_dir = stage4_inst_dir
+ pgo_phase = None
+ if pgo:
+ pgo_phase = "use"
+ llvm_profdata = stage3_inst_dir + "/bin/llvm-profdata%s" % exe_ext
+ merge_cmd = [llvm_profdata, "merge", "-o", "merged.profdata"]
+ profraw_files = glob.glob(
+ os.path.join(stage2_dir, "build", "profiles", "*.profraw")
+ )
+ if not os.path.exists(stage4_dir):
+ os.mkdir(stage4_dir)
+ run_in(stage4_dir, merge_cmd + profraw_files)
+ build_one_stage(
+ [stage3_inst_dir + "/bin/%s%s" % (cc_name, exe_ext)] + extra_cflags2,
+ [stage3_inst_dir + "/bin/%s%s" % (cxx_name, exe_ext)] + extra_cxxflags2,
+ [stage3_inst_dir + "/bin/%s%s" % (cc_name, exe_ext)] + extra_asmflags,
+ [ld] + extra_ldflags,
+ ar,
+ ranlib,
+ libtool,
+ llvm_source_dir,
+ stage4_dir,
+ package_name,
+ build_libcxx,
+ osx_cross_compile,
+ build_type,
+ assertions,
+ python_path,
+ gcc_dir,
+ libcxx_include_dir,
+ build_wasm,
+ compiler_rt_source_dir,
+ runtimes_source_link,
+ compiler_rt_source_link,
+ (stages == 4),
+ extra_targets=extra_targets,
+ pgo_phase=pgo_phase,
+ )
+
+ if build_clang_tidy:
+ prune_final_dir_for_clang_tidy(
+ os.path.join(final_stage_dir, package_name), osx_cross_compile
+ )
+
+ # Copy the wasm32 builtins to the final_inst_dir if the archive is present.
+ if "wasi-sysroot" in config:
+ sysroot = config["wasi-sysroot"].format(**os.environ)
+ if os.path.isdir(sysroot):
+ for srcdir in glob.glob(
+ os.path.join(sysroot, "lib", "clang", "*", "lib", "wasi")
+ ):
+ print("Copying from wasi-sysroot srcdir %s" % srcdir)
+ # Copy the contents of the "lib/wasi" subdirectory to the
+ # appropriate location in final_inst_dir.
+ version = os.path.basename(os.path.dirname(os.path.dirname(srcdir)))
+ destdir = os.path.join(
+ final_inst_dir, "lib", "clang", version, "lib", "wasi"
+ )
+ mkdir_p(destdir)
+ copy_tree(srcdir, destdir)
+
+ if not args.skip_tar:
+ build_tar_package("%s.tar.zst" % package_name, final_stage_dir, package_name)
diff --git a/build/build-clang/clang-10-linux64.json b/build/build-clang/clang-10-linux64.json
new file mode 100644
index 0000000000..b79243b2bc
--- /dev/null
+++ b/build/build-clang/clang-10-linux64.json
@@ -0,0 +1,27 @@
+{
+ "stages": "4",
+ "pgo" : true,
+ "build_libcxx": true,
+ "build_wasm": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "wasi-sysroot": "{MOZ_FETCHES_DIR}/wasi-sysroot",
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "find_symbolizer_linux_clang_10.patch",
+ "rename_gcov_flush_clang_10.patch",
+ "critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch",
+ "rG7e18aeba5062_clang_10.patch",
+ "llvmorg-11-init-4265-g2dcbdba8540_clang_10.patch",
+ "android-mangling-error.patch",
+ "unpoison-thread-stacks_clang_10.patch",
+ "downgrade-mangling-error.patch",
+ "tsan-hang-be41a98ac222_clang_10.patch",
+ "loosen-msvc-detection.patch"
+ ]
+}
diff --git a/build/build-clang/clang-11-android.json b/build/build-clang/clang-11-android.json
new file mode 100644
index 0000000000..ff284f1212
--- /dev/null
+++ b/build/build-clang/clang-11-android.json
@@ -0,0 +1,55 @@
+{
+ "stages": "2",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "android_targets": {
+ "armv7-linux-android": {
+ "ndk_toolchain": "{MOZ_FETCHES_DIR}/android-ndk/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64",
+ "ndk_sysroot": "{MOZ_FETCHES_DIR}/android-ndk/platforms/android-16/arch-arm",
+ "ndk_includes": [
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include/arm-linux-androideabi",
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include"
+ ],
+ "api_level": 16
+ },
+ "i686-linux-android": {
+ "ndk_toolchain": "{MOZ_FETCHES_DIR}/android-ndk/toolchains/x86-4.9/prebuilt/linux-x86_64",
+ "ndk_sysroot": "{MOZ_FETCHES_DIR}/android-ndk/platforms/android-16/arch-x86",
+ "ndk_includes": [
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include/i686-linux-android",
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include"
+ ],
+ "api_level": 16
+ },
+ "aarch64-linux-android": {
+ "ndk_toolchain": "{MOZ_FETCHES_DIR}/android-ndk/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64",
+ "ndk_sysroot": "{MOZ_FETCHES_DIR}/android-ndk/platforms/android-21/arch-arm64",
+ "ndk_includes": [
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include/aarch64-linux-android",
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include"
+ ],
+ "api_level": 21
+ },
+ "x86_64-linux-android": {
+ "ndk_toolchain": "{MOZ_FETCHES_DIR}/android-ndk/toolchains/x86_64-4.9/prebuilt/linux-x86_64",
+ "ndk_sysroot": "{MOZ_FETCHES_DIR}/android-ndk/platforms/android-21/arch-x86_64",
+ "ndk_includes": [
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include/x86_64-linux-android",
+ "{MOZ_FETCHES_DIR}/android-ndk/sysroot/usr/include"
+ ],
+ "api_level": 21
+ }
+ },
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "find_symbolizer_linux_clang_10.patch",
+ "rename_gcov_flush_clang_11.patch",
+ "revert-r362047-and-r362065.patch"
+ ]
+}
diff --git a/build/build-clang/clang-11-linux64-aarch64-cross.json b/build/build-clang/clang-11-linux64-aarch64-cross.json
new file mode 100644
index 0000000000..1a091815ed
--- /dev/null
+++ b/build/build-clang/clang-11-linux64-aarch64-cross.json
@@ -0,0 +1,21 @@
+{
+ "stages": "4",
+ "pgo" : true,
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "extra_targets": [
+ "aarch64-unknown-linux-gnu"
+ ],
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "find_symbolizer_linux_clang_10.patch",
+ "rename_gcov_flush_clang_11.patch",
+ "android-mangling-error.patch"
+ ]
+}
diff --git a/build/build-clang/clang-11-linux64.json b/build/build-clang/clang-11-linux64.json
new file mode 100644
index 0000000000..4e8f1f0098
--- /dev/null
+++ b/build/build-clang/clang-11-linux64.json
@@ -0,0 +1,24 @@
+{
+ "stages": "4",
+ "pgo" : true,
+ "build_libcxx": true,
+ "build_wasm": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "wasi-sysroot": "{MOZ_FETCHES_DIR}/wasi-sysroot",
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "find_symbolizer_linux_clang_10.patch",
+ "rename_gcov_flush_clang_11.patch",
+ "android-mangling-error.patch",
+ "unpoison-thread-stacks_clang_10.patch",
+ "downgrade-mangling-error.patch",
+ "llvmorg-12-init-10926-gb79e990f401-LTO-new-pass-manager.patch",
+ "loosen-msvc-detection.patch"
+ ]
+}
diff --git a/build/build-clang/clang-11-macosx64.json b/build/build-clang/clang-11-macosx64.json
new file mode 100644
index 0000000000..367c953e38
--- /dev/null
+++ b/build/build-clang/clang-11-macosx64.json
@@ -0,0 +1,22 @@
+{
+ "stages": "1",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "osx_cross_compile": true,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/clang/bin/clang",
+ "cxx": "{MOZ_FETCHES_DIR}/clang/bin/clang++",
+ "as": "{MOZ_FETCHES_DIR}/clang/bin/clang",
+ "ar": "{MOZ_FETCHES_DIR}/cctools/bin/x86_64-apple-darwin-ar",
+ "ranlib": "{MOZ_FETCHES_DIR}/cctools/bin/x86_64-apple-darwin-ranlib",
+ "libtool": "{MOZ_FETCHES_DIR}/cctools/bin/x86_64-apple-darwin-libtool",
+ "ld": "{MOZ_FETCHES_DIR}/clang/bin/clang",
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "rename_gcov_flush_clang_11.patch",
+ "compiler-rt-cross-compile.patch",
+ "compiler-rt-no-codesign.patch"
+ ]
+}
diff --git a/build/build-clang/clang-11-mingw.json b/build/build-clang/clang-11-mingw.json
new file mode 100755
index 0000000000..4dcd9b26c4
--- /dev/null
+++ b/build/build-clang/clang-11-mingw.json
@@ -0,0 +1,14 @@
+{
+ "stages": "4",
+ "pgo" : true,
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "patches": [
+ ]
+}
diff --git a/build/build-clang/clang-11-win64-2stage.json b/build/build-clang/clang-11-win64-2stage.json
new file mode 100644
index 0000000000..10e6267dbd
--- /dev/null
+++ b/build/build-clang/clang-11-win64-2stage.json
@@ -0,0 +1,14 @@
+{
+ "stages": "2",
+ "build_libcxx": false,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "c:/mozilla-build/python/python.exe",
+ "cc": "cl.exe",
+ "cxx": "cl.exe",
+ "ml": "ml64.exe",
+ "patches": [
+ "unpoison-thread-stacks_clang_10.patch",
+ "bug47258-extract-symbols-mbcs.patch"
+ ]
+}
diff --git a/build/build-clang/clang-11-win64.json b/build/build-clang/clang-11-win64.json
new file mode 100644
index 0000000000..ba225876a7
--- /dev/null
+++ b/build/build-clang/clang-11-win64.json
@@ -0,0 +1,18 @@
+{
+ "stages": "4",
+ "pgo" : true,
+ "build_libcxx": false,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "c:/mozilla-build/python/python.exe",
+ "cc": "cl.exe",
+ "cxx": "cl.exe",
+ "ml": "ml64.exe",
+ "patches": [
+ "unpoison-thread-stacks_clang_10.patch",
+ "downgrade-mangling-error.patch",
+ "bug47258-extract-symbols-mbcs.patch",
+ "llvmorg-12-init-10926-gb79e990f401-LTO-new-pass-manager.patch",
+ "loosen-msvc-detection.patch"
+ ]
+}
diff --git a/build/build-clang/clang-5.0-linux64.json b/build/build-clang/clang-5.0-linux64.json
new file mode 100644
index 0000000000..0d66e6b731
--- /dev/null
+++ b/build/build-clang/clang-5.0-linux64.json
@@ -0,0 +1,12 @@
+{
+ "stages": "3",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "patches": []
+}
diff --git a/build/build-clang/clang-7-linux64.json b/build/build-clang/clang-7-linux64.json
new file mode 100644
index 0000000000..adddc0eb35
--- /dev/null
+++ b/build/build-clang/clang-7-linux64.json
@@ -0,0 +1,19 @@
+{
+ "stages": "3",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "find_symbolizer_linux.patch",
+ "rename_gcov_flush_7.patch",
+ "critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch",
+ "r350774.patch",
+ "android-mangling-error.patch"
+ ]
+}
diff --git a/build/build-clang/clang-linux64.json b/build/build-clang/clang-linux64.json
new file mode 100644
index 0000000000..1a25656ea4
--- /dev/null
+++ b/build/build-clang/clang-linux64.json
@@ -0,0 +1,28 @@
+{
+ "stages": "4",
+ "pgo" : true,
+ "build_libcxx": true,
+ "build_wasm": true,
+ "build_type": "Release",
+ "assertions": false,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "wasi-sysroot": "{MOZ_FETCHES_DIR}/wasi-sysroot",
+ "patches": [
+ "static-llvm-symbolizer.patch",
+ "find_symbolizer_linux.patch",
+ "rename_gcov_flush.patch",
+ "critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch",
+ "rG7e18aeba5062.patch",
+ "llvmorg-11-init-4265-g2dcbdba8540.patch",
+ "android-mangling-error.patch",
+ "unpoison-thread-stacks.patch",
+ "downgrade-mangling-error.patch",
+ "tsan-hang-be41a98ac222.patch",
+ "llvmorg-11-init-15486-gfc937806efd-dont-jump-to-landing-pads.patch",
+ "loosen-msvc-detection.patch"
+ ]
+}
diff --git a/build/build-clang/clang-tidy-ci.patch b/build/build-clang/clang-tidy-ci.patch
new file mode 100644
index 0000000000..8d5d807ddf
--- /dev/null
+++ b/build/build-clang/clang-tidy-ci.patch
@@ -0,0 +1,26 @@
+diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp
+index d6913dfd3c07..d031a163fdd7 100644
+--- a/clang-tools-extra/clang-tidy/ClangTidy.cpp
++++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp
+@@ -418,6 +418,7 @@ ClangTidyASTConsumerFactory::CreateASTConsumer(
+ if (!Check->isLanguageVersionSupported(Context.getLangOpts()))
+ continue;
+ Check->registerMatchers(&*Finder);
++ Check->registerPPCallbacks(Compiler);
+ Check->registerPPCallbacks(*SM, PP, ModuleExpanderPP);
+ }
+
+diff --git a/clang-tools-extra/clang-tidy/ClangTidyCheck.h b/clang-tools-extra/clang-tidy/ClangTidyCheck.h
+index 54b725126752..200780e86804 100644
+--- a/clang-tools-extra/clang-tidy/ClangTidyCheck.h
++++ b/clang-tools-extra/clang-tidy/ClangTidyCheck.h
+@@ -130,6 +130,9 @@ public:
+ return true;
+ }
+
++ /// This has been deprecated in clang 9 - needed by mozilla-must-override
++ virtual void registerPPCallbacks(CompilerInstance &Compiler) {}
++
+ /// Override this to register ``PPCallbacks`` in the preprocessor.
+ ///
+ /// This should be used for clang-tidy checks that analyze preprocessor-
diff --git a/build/build-clang/clang-tidy-external-linux64.json b/build/build-clang/clang-tidy-external-linux64.json
new file mode 100644
index 0000000000..55382875b1
--- /dev/null
+++ b/build/build-clang/clang-tidy-external-linux64.json
@@ -0,0 +1,17 @@
+{
+ "stages": "1",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "build_clang_tidy": true,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "patches": [
+ "clang-tidy-ci.patch",
+ "clang-tidy-no-errors.patch"
+ ],
+ "build_clang_tidy_external": true
+}
diff --git a/build/build-clang/clang-tidy-linux64.json b/build/build-clang/clang-tidy-linux64.json
new file mode 100644
index 0000000000..dd5d85db25
--- /dev/null
+++ b/build/build-clang/clang-tidy-linux64.json
@@ -0,0 +1,16 @@
+{
+ "stages": "1",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "build_clang_tidy": true,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "cxx": "{MOZ_FETCHES_DIR}/gcc/bin/g++",
+ "as": "{MOZ_FETCHES_DIR}/gcc/bin/gcc",
+ "patches": [
+ "clang-tidy-ci.patch",
+ "clang-tidy-no-errors.patch"
+ ]
+}
diff --git a/build/build-clang/clang-tidy-macosx64.json b/build/build-clang/clang-tidy-macosx64.json
new file mode 100644
index 0000000000..1295b9e4a9
--- /dev/null
+++ b/build/build-clang/clang-tidy-macosx64.json
@@ -0,0 +1,23 @@
+{
+ "stages": "1",
+ "build_libcxx": true,
+ "build_type": "Release",
+ "assertions": false,
+ "build_clang_tidy": true,
+ "osx_cross_compile": true,
+ "python_path": "/usr/bin/python2.7",
+ "gcc_dir": "{MOZ_FETCHES_DIR}/gcc",
+ "cc": "{MOZ_FETCHES_DIR}/clang/bin/clang",
+ "cxx": "{MOZ_FETCHES_DIR}/clang/bin/clang++",
+ "as": "{MOZ_FETCHES_DIR}/clang/bin/clang",
+ "ar": "{MOZ_FETCHES_DIR}/cctools/bin/x86_64-apple-darwin-ar",
+ "ranlib": "{MOZ_FETCHES_DIR}/cctools/bin/x86_64-apple-darwin-ranlib",
+ "libtool": "{MOZ_FETCHES_DIR}/cctools/bin/x86_64-apple-darwin-libtool",
+ "ld": "{MOZ_FETCHES_DIR}/clang/bin/clang",
+ "patches": [
+ "clang-tidy-ci.patch",
+ "clang-tidy-no-errors.patch",
+ "compiler-rt-cross-compile.patch",
+ "compiler-rt-no-codesign.patch"
+ ]
+}
diff --git a/build/build-clang/clang-tidy-no-errors.patch b/build/build-clang/clang-tidy-no-errors.patch
new file mode 100644
index 0000000000..57a8167021
--- /dev/null
+++ b/build/build-clang/clang-tidy-no-errors.patch
@@ -0,0 +1,12 @@
+diff --git a/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp b/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp
+index fbf117688bb..dc7235b1450 100644
+--- a/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp
++++ b/clang-tools-extra/clang-tidy/ClangTidyCheck.cpp
+@@ -20,6 +20,7 @@ ClangTidyCheck::ClangTidyCheck(StringRef CheckName, ClangTidyContext *Context)
+
+ DiagnosticBuilder ClangTidyCheck::diag(SourceLocation Loc, StringRef Message,
+ DiagnosticIDs::Level Level) {
++ Level = Level == DiagnosticIDs::Error ? DiagnosticIDs::Warning : Level;
+ return Context->diag(CheckName, Loc, Message, Level);
+ }
+
diff --git a/build/build-clang/clang-tidy-win64.json b/build/build-clang/clang-tidy-win64.json
new file mode 100644
index 0000000000..3cf7038e98
--- /dev/null
+++ b/build/build-clang/clang-tidy-win64.json
@@ -0,0 +1,15 @@
+{
+ "stages": "1",
+ "build_libcxx": false,
+ "build_type": "Release",
+ "assertions": false,
+ "build_clang_tidy": true,
+ "python_path": "c:/mozilla-build/python/python.exe",
+ "cc": "cl.exe",
+ "cxx": "cl.exe",
+ "ml": "ml64.exe",
+ "patches": [
+ "clang-tidy-ci.patch",
+ "clang-tidy-no-errors.patch"
+ ]
+}
diff --git a/build/build-clang/compiler-rt-cross-compile.patch b/build/build-clang/compiler-rt-cross-compile.patch
new file mode 100644
index 0000000000..4ab24952ac
--- /dev/null
+++ b/build/build-clang/compiler-rt-cross-compile.patch
@@ -0,0 +1,15 @@
+Add `-target x86_64-apple-darwin' to the compiler-rt overridden CFLAGS
+
+diff --git a/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake
+index 28d398672..aac68bf36 100644
+--- a/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake
++++ b/compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake
+@@ -265,7 +265,7 @@ endfunction()
+ macro(darwin_add_builtin_libraries)
+ set(DARWIN_EXCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Darwin-excludes)
+
+- set(CFLAGS "-fPIC -O3 -fvisibility=hidden -DVISIBILITY_HIDDEN -Wall -fomit-frame-pointer")
++ set(CFLAGS "-fPIC -O3 -fvisibility=hidden -DVISIBILITY_HIDDEN -Wall -fomit-frame-pointer -target x86_64-apple-darwin -isysroot ${CMAKE_OSX_SYSROOT} -I${CMAKE_OSX_SYSROOT}/usr/include")
+ set(CMAKE_C_FLAGS "")
+ set(CMAKE_CXX_FLAGS "")
+ set(CMAKE_ASM_FLAGS "")
diff --git a/build/build-clang/compiler-rt-no-codesign.patch b/build/build-clang/compiler-rt-no-codesign.patch
new file mode 100644
index 0000000000..99d3f7e992
--- /dev/null
+++ b/build/build-clang/compiler-rt-no-codesign.patch
@@ -0,0 +1,21 @@
+Disable codesign for macosx cross-compile toolchain. Codesign only works on OSX.
+
+Index: cmake/Modules/AddCompilerRT.cmake
+===================================================================
+--- a/compiler-rt/cmake/Modules/AddCompilerRT.cmake
++++ b/compiler-rt/cmake/Modules/AddCompilerRT.cmake
+@@ -321,14 +321,6 @@
+ set_target_properties(${libname} PROPERTIES IMPORT_PREFIX "")
+ set_target_properties(${libname} PROPERTIES IMPORT_SUFFIX ".lib")
+ endif()
+- if(APPLE)
+- # Ad-hoc sign the dylibs
+- add_custom_command(TARGET ${libname}
+- POST_BUILD
+- COMMAND codesign --sign - $<TARGET_FILE:${libname}>
+- WORKING_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}
+- )
+- endif()
+ endif()
+
+ set(parent_target_arg)
diff --git a/build/build-clang/critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch b/build/build-clang/critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch
new file mode 100644
index 0000000000..c5c533a915
--- /dev/null
+++ b/build/build-clang/critical_section_on_gcov_flush-rG02ce9d8ef5a8.patch
@@ -0,0 +1,75 @@
+From 02ce9d8ef5a84bc884de4105eae5f8736ef67634 Mon Sep 17 00:00:00 2001
+From: Calixte Denizet <calixte.denizet@gmail.com>
+Date: Tue, 10 Dec 2019 13:22:33 +0100
+Subject: [PATCH] [compiler-rt] Add a critical section when flushing gcov
+ counters
+
+Summary:
+Counters can be flushed in a multi-threaded context for example when the process is forked in different threads (https://github.com/llvm/llvm-project/blob/master/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp#L632-L663).
+In order to avoid pretty bad things, a critical section is needed around the flush.
+We had a lot of crashes in this code in Firefox CI when we switched to clang for linux ccov builds and those crashes disappeared with this patch.
+
+Reviewers: marco-c, froydnj, dmajor, davidxl, vsk
+
+Reviewed By: marco-c, dmajor
+
+Subscribers: ahatanak, froydnj, dmajor, dberris, jfb, #sanitizers, llvm-commits, sylvestre.ledru
+
+Tags: #sanitizers, #llvm
+
+Differential Revision: https://reviews.llvm.org/D70910
+---
+
+diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
+index b7257db10e7..d4abc4181ed 100644
+--- a/compiler-rt/lib/profile/GCDAProfiling.c
++++ b/compiler-rt/lib/profile/GCDAProfiling.c
+@@ -62,8 +62,27 @@ typedef unsigned long long uint64_t;
+ #include "InstrProfiling.h"
+ #include "InstrProfilingUtil.h"
+
+-/* #define DEBUG_GCDAPROFILING */
++#ifndef _WIN32
++#include <pthread.h>
++static pthread_mutex_t gcov_flush_mutex = PTHREAD_MUTEX_INITIALIZER;
++static __inline void gcov_flush_lock() {
++ pthread_mutex_lock(&gcov_flush_mutex);
++}
++static __inline void gcov_flush_unlock() {
++ pthread_mutex_unlock(&gcov_flush_mutex);
++}
++#else
++#include <windows.h>
++static SRWLOCK gcov_flush_mutex = SRWLOCK_INIT;
++static __inline void gcov_flush_lock() {
++ AcquireSRWLockExclusive(&gcov_flush_mutex);
++}
++static __inline void gcov_flush_unlock() {
++ ReleaseSRWLockExclusive(&gcov_flush_mutex);
++}
++#endif
+
++/* #define DEBUG_GCDAPROFILING */
+ /*
+ * --- GCOV file format I/O primitives ---
+ */
+@@ -620,12 +639,16 @@ void llvm_register_flush_function(fn_ptr fn) {
+ }
+
+ void __custom_llvm_gcov_flush() {
++ gcov_flush_lock();
++
+ struct fn_node* curr = flush_fn_list.head;
+
+ while (curr) {
+ curr->fn();
+ curr = curr->next;
+ }
++
++ gcov_flush_unlock();
+ }
+
+ COMPILER_RT_VISIBILITY
+--
+2.24.0
+
diff --git a/build/build-clang/downgrade-mangling-error.patch b/build/build-clang/downgrade-mangling-error.patch
new file mode 100644
index 0000000000..69f46f4dd0
--- /dev/null
+++ b/build/build-clang/downgrade-mangling-error.patch
@@ -0,0 +1,23 @@
+Downgrade unimplemented mangling diagnostic from error to note.
+This codepath is exercised by MozsearchIndexer.cpp (the searchfox
+indexer) when indexing on Windows. We can do without having the
+unimplemented bits for now as long the compiler doesn't fail the
+build. See also https://bugs.llvm.org/show_bug.cgi?id=39294
+
+diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
+index 8b1419074df5..4436cd118f87 100644
+--- a/clang/lib/AST/ItaniumMangle.cpp
++++ b/clang/lib/AST/ItaniumMangle.cpp
+@@ -3847,10 +3847,11 @@ recurse:
+ if (!NullOut) {
+ // As bad as this diagnostic is, it's better than crashing.
+ DiagnosticsEngine &Diags = Context.getDiags();
+- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
++ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Remark,
+ "cannot yet mangle expression type %0");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
++ Out << "MOZ_WE_HACKED_AROUND_BUG_1418415";
+ }
+ break;
+ }
diff --git a/build/build-clang/find_symbolizer_linux.patch b/build/build-clang/find_symbolizer_linux.patch
new file mode 100644
index 0000000000..c511401c32
--- /dev/null
+++ b/build/build-clang/find_symbolizer_linux.patch
@@ -0,0 +1,58 @@
+We currently need this patch because ASan only searches PATH to find the
+llvm-symbolizer binary to symbolize ASan traces. On testing machines, this
+can be installed in PATH easily. However, for e.g. the ASan Nightly Project,
+where we ship an ASan build, including llvm-symbolizer, to the user, we
+cannot expect llvm-symbolizer to be on PATH. Instead, we should try to look
+it up next to the binary. This patch implements the functionality for Linux
+only until there is similar functionality provided upstream.
+
+diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_file.cc b/compiler-rt/lib/sanitizer_common/sanitizer_file.cc
+index cde54bf..8daade1 100644
+--- a/compiler-rt/lib/sanitizer_common/sanitizer_file.cc
++++ b/compiler-rt/lib/sanitizer_common/sanitizer_file.cc
+@@ -21,6 +21,10 @@
+ #include "sanitizer_common.h"
+ #include "sanitizer_file.h"
+
++#if SANITIZER_LINUX
++#include "sanitizer_posix.h"
++#endif
++
+ namespace __sanitizer {
+
+ void CatastrophicErrorWrite(const char *buffer, uptr length) {
+@@ -156,6 +160,34 @@ char *FindPathToBinary(const char *name) {
+ if (*end == '\0') break;
+ beg = end + 1;
+ }
++
++#if SANITIZER_LINUX
++ // If we cannot find the requested binary in PATH, we should try to locate
++ // it next to the binary, in case it is shipped with the build itself
++ // (e.g. llvm-symbolizer shipped with sanitizer build to symbolize on client.
++ if (internal_readlink("/proc/self/exe", buffer.data(), kMaxPathLength) < 0)
++ return nullptr;
++
++ uptr buf_len = internal_strlen(buffer.data());
++
++ /* Avoid using dirname() here */
++ while (buf_len > 0) {
++ if (buffer[buf_len - 1] == '/')
++ break;
++ buf_len--;
++ }
++
++ if (!buf_len)
++ return nullptr;
++
++ if (buf_len + name_len + 1 <= kMaxPathLength) {
++ internal_memcpy(&buffer[buf_len], name, name_len);
++ buffer[buf_len + name_len] = '\0';
++ if (FileExists(buffer.data()))
++ return internal_strdup(buffer.data());
++ }
++#endif
++
+ return nullptr;
+ }
+
diff --git a/build/build-clang/find_symbolizer_linux_clang_10.patch b/build/build-clang/find_symbolizer_linux_clang_10.patch
new file mode 100644
index 0000000000..1ddb02024d
--- /dev/null
+++ b/build/build-clang/find_symbolizer_linux_clang_10.patch
@@ -0,0 +1,58 @@
+We currently need this patch because ASan only searches PATH to find the
+llvm-symbolizer binary to symbolize ASan traces. On testing machines, this
+can be installed in PATH easily. However, for e.g. the ASan Nightly Project,
+where we ship an ASan build, including llvm-symbolizer, to the user, we
+cannot expect llvm-symbolizer to be on PATH. Instead, we should try to look
+it up next to the binary. This patch implements the functionality for Linux
+only until there is similar functionality provided upstream.
+
+diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
+index 79930d79425..cfb4f90c0d5 100644
+--- a/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
++++ b/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
+@@ -20,6 +20,10 @@
+ #include "sanitizer_common.h"
+ #include "sanitizer_file.h"
+
++#if SANITIZER_LINUX
++#include "sanitizer_posix.h"
++#endif
++
+ namespace __sanitizer {
+
+ void CatastrophicErrorWrite(const char *buffer, uptr length) {
+@@ -194,6 +198,34 @@ char *FindPathToBinary(const char *name) {
+ if (*end == '\0') break;
+ beg = end + 1;
+ }
++
++#if SANITIZER_LINUX
++ // If we cannot find the requested binary in PATH, we should try to locate
++ // it next to the binary, in case it is shipped with the build itself
++ // (e.g. llvm-symbolizer shipped with sanitizer build to symbolize on client.
++ if (internal_readlink("/proc/self/exe", buffer.data(), kMaxPathLength) < 0)
++ return nullptr;
++
++ uptr buf_len = internal_strlen(buffer.data());
++
++ /* Avoid using dirname() here */
++ while (buf_len > 0) {
++ if (buffer[buf_len - 1] == '/')
++ break;
++ buf_len--;
++ }
++
++ if (!buf_len)
++ return nullptr;
++
++ if (buf_len + name_len + 1 <= kMaxPathLength) {
++ internal_memcpy(&buffer[buf_len], name, name_len);
++ buffer[buf_len + name_len] = '\0';
++ if (FileExists(buffer.data()))
++ return internal_strdup(buffer.data());
++ }
++#endif
++
+ return nullptr;
+ }
+
diff --git a/build/build-clang/llvmorg-11-init-15486-gfc937806efd-dont-jump-to-landing-pads.patch b/build/build-clang/llvmorg-11-init-15486-gfc937806efd-dont-jump-to-landing-pads.patch
new file mode 100644
index 0000000000..fee6798c59
--- /dev/null
+++ b/build/build-clang/llvmorg-11-init-15486-gfc937806efd-dont-jump-to-landing-pads.patch
@@ -0,0 +1,100 @@
+From d1c09fb47e2778538c5b1f918724d31d05497883 Mon Sep 17 00:00:00 2001
+From: Arthur Eubanks <aeubanks@google.com>
+Date: Wed, 13 May 2020 16:33:09 -0700
+Subject: [PATCH] Don't jump to landing pads in Control Flow Optimizer
+
+Summary: Likely fixes https://bugs.llvm.org/show_bug.cgi?id=45858.
+
+Subscribers: hiraditya, llvm-commits
+
+Tags: #llvm
+
+Differential Revision: https://reviews.llvm.org/D80047
+---
+ llvm/lib/CodeGen/BranchFolding.cpp | 18 ++++++------
+ llvm/test/CodeGen/X86/branchfolding-ehpad.mir | 28 +++++++++++++++++++
+ 2 files changed, 38 insertions(+), 8 deletions(-)
+ create mode 100644 llvm/test/CodeGen/X86/branchfolding-ehpad.mir
+
+diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp
+index fb54b5d6c8d..4a822b58446 100644
+--- a/llvm/lib/CodeGen/BranchFolding.cpp
++++ b/llvm/lib/CodeGen/BranchFolding.cpp
+@@ -991,10 +991,10 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
+ continue;
+ }
+
+- // If one of the blocks is the entire common tail (and not the entry
+- // block, which we can't jump to), we can treat all blocks with this same
+- // tail at once. Use PredBB if that is one of the possibilities, as that
+- // will not introduce any extra branches.
++ // If one of the blocks is the entire common tail (and is not the entry
++ // block/an EH pad, which we can't jump to), we can treat all blocks with
++ // this same tail at once. Use PredBB if that is one of the possibilities,
++ // as that will not introduce any extra branches.
+ MachineBasicBlock *EntryBB =
+ &MergePotentials.front().getBlock()->getParent()->front();
+ unsigned commonTailIndex = SameTails.size();
+@@ -1002,19 +1002,21 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
+ // into the other.
+ if (SameTails.size() == 2 &&
+ SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
+- SameTails[1].tailIsWholeBlock())
++ SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad())
+ commonTailIndex = 1;
+ else if (SameTails.size() == 2 &&
+ SameTails[1].getBlock()->isLayoutSuccessor(
+- SameTails[0].getBlock()) &&
+- SameTails[0].tailIsWholeBlock())
++ SameTails[0].getBlock()) &&
++ SameTails[0].tailIsWholeBlock() &&
++ !SameTails[0].getBlock()->isEHPad())
+ commonTailIndex = 0;
+ else {
+ // Otherwise just pick one, favoring the fall-through predecessor if
+ // there is one.
+ for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = SameTails[i].getBlock();
+- if (MBB == EntryBB && SameTails[i].tailIsWholeBlock())
++ if ((MBB == EntryBB || MBB->isEHPad()) &&
++ SameTails[i].tailIsWholeBlock())
+ continue;
+ if (MBB == PredBB) {
+ commonTailIndex = i;
+diff --git a/llvm/test/CodeGen/X86/branchfolding-ehpad.mir b/llvm/test/CodeGen/X86/branchfolding-ehpad.mir
+new file mode 100644
+index 00000000000..d445cd20680
+--- /dev/null
++++ b/llvm/test/CodeGen/X86/branchfolding-ehpad.mir
+@@ -0,0 +1,28 @@
++# RUN: llc -mtriple=x86_64-windows-msvc -verify-machineinstrs -run-pass branch-folder -o - %s | FileCheck %s
++
++# Check that branch-folder does not create a fallthrough to a landing pad.
++# Also make sure that the landing pad still can be tail merged.
++---
++name: foo
++body: |
++ ; CHECK-LABEL: name: foo
++ bb.0:
++ successors: %bb.1, %bb.3
++ bb.1:
++ JCC_1 %bb.4, 5, implicit killed $eflags
++ bb.2:
++ MOV8mi $r13, 1, $noreg, 0, $noreg, 0
++ JMP_1 %bb.5
++ ; CHECK: bb.2:
++ ; CHECK-NOT: successors: {{.*}}bb.3
++ ; CHECK: bb.3 (landing-pad):
++ ; CHECK-NOT: MOV8mi
++ bb.3(landing-pad):
++ MOV8mi $r13, 1, $noreg, 0, $noreg, 0
++ JMP_1 %bb.5
++ ; CHECK: bb.4:
++ bb.4:
++ MOV8mi $r13, 2, $noreg, 0, $noreg, 0
++ bb.5:
++ RET 0
++...
+--
+2.24.1.windows.2
+
diff --git a/build/build-clang/llvmorg-11-init-4265-g2dcbdba8540.patch b/build/build-clang/llvmorg-11-init-4265-g2dcbdba8540.patch
new file mode 100644
index 0000000000..b03ae0640c
--- /dev/null
+++ b/build/build-clang/llvmorg-11-init-4265-g2dcbdba8540.patch
@@ -0,0 +1,106 @@
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc
+index 9a184c79798..733decfe52c 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc
++++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc
+@@ -1021,7 +1021,7 @@ TSAN_INTERCEPTOR(int, pthread_create,
+
+ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+@@ -1034,8 +1034,8 @@ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ DEFINE_REAL_PTHREAD_FUNCTIONS
+
+ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+- SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(thr, pc, tid);
+@@ -1055,8 +1055,8 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
+
+ #if SANITIZER_LINUX
+ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+- SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(pthread_tryjoin_np)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+@@ -1069,8 +1069,8 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+
+ TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
+ const struct timespec *abstime) {
+- SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
+ ThreadIgnoreEnd(thr, pc);
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+index 3a8231bda9a..30e144fbd00 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+@@ -772,7 +772,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+ ThreadType thread_type);
+ void ThreadFinish(ThreadState *thr);
+-int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
++int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+ void ThreadJoin(ThreadState *thr, uptr pc, int tid);
+ void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+ void ThreadFinalize(ThreadState *thr);
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc
+index fd95cfed4f5..13e457bd770 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc
++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc
+@@ -285,19 +285,34 @@ void ThreadFinish(ThreadState *thr) {
+ ctx->thread_registry->FinishThread(thr->tid);
+ }
+
+-static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+- uptr uid = (uptr)arg;
+- if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
++struct ConsumeThreadContext {
++ uptr uid;
++ ThreadContextBase* tctx;
++};
++
++static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
++ ConsumeThreadContext *findCtx = (ConsumeThreadContext*)arg;
++ if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) {
++ if (findCtx->tctx) {
++ // Ensure that user_id is unique. If it's not the case we are screwed.
++ // Something went wrong before, but now there is no way to recover.
++ // Returning a wrong thread is not an option, it may lead to very hard
++ // to debug false positives (e.g. if we join a wrong thread).
++ Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid);
++ Die();
++ }
++ findCtx->tctx = tctx;
+ tctx->user_id = 0;
+- return true;
+ }
+ return false;
+ }
+
+-int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
+- int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
+- DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
+- return res;
++int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
++ ConsumeThreadContext findCtx = {uid, nullptr};
++ ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
++ int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid;
++ DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
++ return tid;
+ }
+
+ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
diff --git a/build/build-clang/llvmorg-11-init-4265-g2dcbdba8540_clang_10.patch b/build/build-clang/llvmorg-11-init-4265-g2dcbdba8540_clang_10.patch
new file mode 100644
index 0000000000..fb487e7801
--- /dev/null
+++ b/build/build-clang/llvmorg-11-init-4265-g2dcbdba8540_clang_10.patch
@@ -0,0 +1,106 @@
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+index 8aea1e4ec05..a623f4fe589 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
++++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+@@ -1016,7 +1016,7 @@ TSAN_INTERCEPTOR(int, pthread_create,
+
+ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+@@ -1029,8 +1029,8 @@ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ DEFINE_REAL_PTHREAD_FUNCTIONS
+
+ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+- SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(thr, pc, tid);
+@@ -1050,8 +1050,8 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
+
+ #if SANITIZER_LINUX
+ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+- SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(pthread_tryjoin_np)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+@@ -1064,8 +1064,8 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+
+ TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
+ const struct timespec *abstime) {
+- SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime);
+- int tid = ThreadTid(thr, pc, (uptr)th);
++ SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
++ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
+ ThreadIgnoreEnd(thr, pc);
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+index c38fc43a9f8..20f7a99157a 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+@@ -775,7 +775,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+ ThreadType thread_type);
+ void ThreadFinish(ThreadState *thr);
+-int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
++int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+ void ThreadJoin(ThreadState *thr, uptr pc, int tid);
+ void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+ void ThreadFinalize(ThreadState *thr);
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+index 0ac1ee99c47..f7068f0d331 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+@@ -285,19 +285,34 @@ void ThreadFinish(ThreadState *thr) {
+ ctx->thread_registry->FinishThread(thr->tid);
+ }
+
+-static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+- uptr uid = (uptr)arg;
+- if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
++struct ConsumeThreadContext {
++ uptr uid;
++ ThreadContextBase* tctx;
++};
++
++static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
++ ConsumeThreadContext *findCtx = (ConsumeThreadContext*)arg;
++ if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) {
++ if (findCtx->tctx) {
++ // Ensure that user_id is unique. If it's not the case we are screwed.
++ // Something went wrong before, but now there is no way to recover.
++ // Returning a wrong thread is not an option, it may lead to very hard
++ // to debug false positives (e.g. if we join a wrong thread).
++ Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid);
++ Die();
++ }
++ findCtx->tctx = tctx;
+ tctx->user_id = 0;
+- return true;
+ }
+ return false;
+ }
+
+-int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
+- int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
+- DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
+- return res;
++int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
++ ConsumeThreadContext findCtx = {uid, nullptr};
++ ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
++ int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid;
++ DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
++ return tid;
+ }
+
+ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
diff --git a/build/build-clang/llvmorg-12-init-10926-gb79e990f401-LTO-new-pass-manager.patch b/build/build-clang/llvmorg-12-init-10926-gb79e990f401-LTO-new-pass-manager.patch
new file mode 100644
index 0000000000..61c0df9214
--- /dev/null
+++ b/build/build-clang/llvmorg-12-init-10926-gb79e990f401-LTO-new-pass-manager.patch
@@ -0,0 +1,66 @@
+diff --git a/lld/COFF/Config.h b/lld/COFF/Config.h
+index 7c439176f3a4..ae969c6bdd8b 100644
+--- a/lld/COFF/Config.h
++++ b/lld/COFF/Config.h
+@@ -155,6 +155,11 @@ struct Configuration {
+ // Used for /opt:lldltocachepolicy=policy
+ llvm::CachePruningPolicy ltoCachePolicy;
+
++ // Used for /opt:[no]ltonewpassmanager
++ bool ltoNewPassManager = false;
++ // Used for /opt:[no]ltodebugpassmanager
++ bool ltoDebugPassManager = false;
++
+ // Used for /merge:from=to (e.g. /merge:.rdata=.text)
+ std::map<StringRef, StringRef> merge;
+
+diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp
+index 9ceccef86779..db2ae241dddf 100644
+--- a/lld/COFF/Driver.cpp
++++ b/lld/COFF/Driver.cpp
+@@ -1418,6 +1418,8 @@ void LinkerDriver::link(ArrayRef<const char *> argsArr) {
+ unsigned icfLevel =
+ args.hasArg(OPT_profile) ? 0 : 1; // 0: off, 1: limited, 2: on
+ unsigned tailMerge = 1;
++ bool ltoNewPM = false;
++ bool ltoDebugPM = false;
+ for (auto *arg : args.filtered(OPT_opt)) {
+ std::string str = StringRef(arg->getValue()).lower();
+ SmallVector<StringRef, 1> vec;
+@@ -1435,6 +1437,14 @@ void LinkerDriver::link(ArrayRef<const char *> argsArr) {
+ tailMerge = 2;
+ } else if (s == "nolldtailmerge") {
+ tailMerge = 0;
++ } else if (s == "ltonewpassmanager") {
++ ltoNewPM = true;
++ } else if (s == "noltonewpassmanager") {
++ ltoNewPM = false;
++ } else if (s == "ltodebugpassmanager") {
++ ltoDebugPM = true;
++ } else if (s == "noltodebugpassmanager") {
++ ltoDebugPM = false;
+ } else if (s.startswith("lldlto=")) {
+ StringRef optLevel = s.substr(7);
+ if (optLevel.getAsInteger(10, config->ltoo) || config->ltoo > 3)
+@@ -1464,6 +1474,8 @@ void LinkerDriver::link(ArrayRef<const char *> argsArr) {
+ config->doGC = doGC;
+ config->doICF = icfLevel > 0;
+ config->tailMerge = (tailMerge == 1 && config->doICF) || tailMerge == 2;
++ config->ltoNewPassManager = ltoNewPM;
++ config->ltoDebugPassManager = ltoDebugPM;
+
+ // Handle /lldsavetemps
+ if (args.hasArg(OPT_lldsavetemps))
+diff --git a/lld/COFF/LTO.cpp b/lld/COFF/LTO.cpp
+index bb44819e60f8..e55fb544b050 100644
+--- a/lld/COFF/LTO.cpp
++++ b/lld/COFF/LTO.cpp
+@@ -82,6 +82,8 @@ static lto::Config createConfig() {
+ c.MAttrs = getMAttrs();
+ c.CGOptLevel = args::getCGOptLevel(config->ltoo);
+ c.AlwaysEmitRegularLTOObj = !config->ltoObjPath.empty();
++ c.UseNewPM = config->ltoNewPassManager;
++ c.DebugPassManager = config->ltoDebugPassManager;
+
+ if (config->saveTemps)
+ checkError(c.addSaveTemps(std::string(config->outputFile) + ".",
diff --git a/build/build-clang/loosen-msvc-detection.patch b/build/build-clang/loosen-msvc-detection.patch
new file mode 100644
index 0000000000..03cd72e929
--- /dev/null
+++ b/build/build-clang/loosen-msvc-detection.patch
@@ -0,0 +1,22 @@
+In a proper VS install, the path to cl.exe looks like:
+...\VC\Tools\MSVC\14.11.25503\bin\HostX64\x64\cl.exe
+
+In our automation, the path is just:
+...\VC\bin\HostX64\x64\cl.exe
+
+Clang tries to do some sanity-checking to make sure that the cl.exe it finds is the Microsoft compiler and not some other program. But the checks are a little too strict for us, so just look for "bin\Host*\*\cl.exe".
+
+diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
+index 7978a6941cb..0159e89fa27 100644
+--- a/clang/lib/Driver/ToolChains/MSVC.cpp
++++ b/clang/lib/Driver/ToolChains/MSVC.cpp
+@@ -152,8 +152,7 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
+ // path components with these prefixes when walking backwards through
+ // the path.
+ // Note: empty strings match anything.
+- llvm::StringRef ExpectedPrefixes[] = {"", "Host", "bin", "",
+- "MSVC", "Tools", "VC"};
++ llvm::StringRef ExpectedPrefixes[] = {"", "Host", "bin"};
+
+ auto It = llvm::sys::path::rbegin(PathEntry);
+ auto End = llvm::sys::path::rend(PathEntry);
diff --git a/build/build-clang/r350774.patch b/build/build-clang/r350774.patch
new file mode 100644
index 0000000000..6b8640f745
--- /dev/null
+++ b/build/build-clang/r350774.patch
@@ -0,0 +1,14 @@
+diff --git a/llvm/lib/Object/Binary.cpp b/llvm/lib/Object/Binary.cpp
+index d7c25921ec3..fe41987f5c2 100644
+--- a/llvm/lib/Object/Binary.cpp
++++ b/llvm/lib/Object/Binary.cpp
+@@ -88,7 +88,8 @@ Expected<std::unique_ptr<Binary>> object::createBinary(MemoryBufferRef Buffer,
+
+ Expected<OwningBinary<Binary>> object::createBinary(StringRef Path) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+- MemoryBuffer::getFileOrSTDIN(Path);
++ MemoryBuffer::getFileOrSTDIN(Path, /*FileSize=*/-1,
++ /*RequiresNullTerminator=*/false);
+ if (std::error_code EC = FileOrErr.getError())
+ return errorCodeToError(EC);
+ std::unique_ptr<MemoryBuffer> &Buffer = FileOrErr.get();
diff --git a/build/build-clang/rG7e18aeba5062.patch b/build/build-clang/rG7e18aeba5062.patch
new file mode 100644
index 0000000000..58947b6dd8
--- /dev/null
+++ b/build/build-clang/rG7e18aeba5062.patch
@@ -0,0 +1,255 @@
+From 779a169144581438d9e24b8b46a86704f6335e35 Mon Sep 17 00:00:00 2001
+From: Nikita Popov <nikita.ppv@gmail.com>
+Date: Sat, 16 Nov 2019 16:22:18 +0100
+Subject: [PATCH] [LVI] Restructure caching
+
+Variant on D70103. The caching is switched to always use a BB to
+cache entry map, which then contains per-value caches. A separate
+set contains value handles with a deletion callback. This allows us
+to properly invalidate overdefined values.
+
+A possible alternative would be to always cache by value first and
+have per-BB maps/sets in the each cache entry. In that case we could
+use a ValueMap and would avoid the separate value handle set. I went
+with the BB indexing at the top level to make it easier to integrate
+D69914, but possibly that's not the right choice.
+
+Differential Revision: https://reviews.llvm.org/D70376
+---
+ llvm/lib/Analysis/LazyValueInfo.cpp | 143 +++++++++-------------------
+ 1 file changed, 47 insertions(+), 96 deletions(-)
+
+diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
+index 542ff709d47..eb51744aec3 100644
+--- a/llvm/lib/Analysis/LazyValueInfo.cpp
++++ b/llvm/lib/Analysis/LazyValueInfo.cpp
+@@ -132,12 +132,9 @@ namespace {
+ /// A callback value handle updates the cache when values are erased.
+ class LazyValueInfoCache;
+ struct LVIValueHandle final : public CallbackVH {
+- // Needs to access getValPtr(), which is protected.
+- friend struct DenseMapInfo<LVIValueHandle>;
+-
+ LazyValueInfoCache *Parent;
+
+- LVIValueHandle(Value *V, LazyValueInfoCache *P)
++ LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
+ : CallbackVH(V), Parent(P) { }
+
+ void deleted() override;
+@@ -151,89 +148,63 @@ namespace {
+ /// This is the cache kept by LazyValueInfo which
+ /// maintains information about queries across the clients' queries.
+ class LazyValueInfoCache {
+- /// This is all of the cached block information for exactly one Value*.
+- /// The entries are sorted by the BasicBlock* of the
+- /// entries, allowing us to do a lookup with a binary search.
+- /// Over-defined lattice values are recorded in OverDefinedCache to reduce
+- /// memory overhead.
+- struct ValueCacheEntryTy {
+- ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {}
+- LVIValueHandle Handle;
+- SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4> BlockVals;
++ /// This is all of the cached information for one basic block. It contains
++ /// the per-value lattice elements, as well as a separate set for
++ /// overdefined values to reduce memory usage.
++ struct BlockCacheEntryTy {
++ SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
++ SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
+ };
+
+- /// This tracks, on a per-block basis, the set of values that are
+- /// over-defined at the end of that block.
+- typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>>
+- OverDefinedCacheTy;
+- /// Keep track of all blocks that we have ever seen, so we
+- /// don't spend time removing unused blocks from our caches.
+- DenseSet<PoisoningVH<BasicBlock> > SeenBlocks;
+-
+- /// This is all of the cached information for all values,
+- /// mapped from Value* to key information.
+- DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache;
+- OverDefinedCacheTy OverDefinedCache;
+-
++ /// Cached information per basic block.
++ DenseMap<PoisoningVH<BasicBlock>, BlockCacheEntryTy> BlockCache;
++ /// Set of value handles used to erase values from the cache on deletion.
++ DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
+
+ public:
+ void insertResult(Value *Val, BasicBlock *BB,
+ const ValueLatticeElement &Result) {
+- SeenBlocks.insert(BB);
+-
++ auto &CacheEntry = BlockCache.try_emplace(BB).first->second;
+ // Insert over-defined values into their own cache to reduce memory
+ // overhead.
+ if (Result.isOverdefined())
+- OverDefinedCache[BB].insert(Val);
+- else {
+- auto It = ValueCache.find_as(Val);
+- if (It == ValueCache.end()) {
+- ValueCache[Val] = make_unique<ValueCacheEntryTy>(Val, this);
+- It = ValueCache.find_as(Val);
+- assert(It != ValueCache.end() && "Val was just added to the map!");
+- }
+- It->second->BlockVals[BB] = Result;
+- }
+- }
+-
+- bool isOverdefined(Value *V, BasicBlock *BB) const {
+- auto ODI = OverDefinedCache.find(BB);
+-
+- if (ODI == OverDefinedCache.end())
+- return false;
++ CacheEntry.OverDefined.insert(Val);
++ else
++ CacheEntry.LatticeElements.insert({ Val, Result });
+
+- return ODI->second.count(V);
++ auto HandleIt = ValueHandles.find_as(Val);
++ if (HandleIt == ValueHandles.end())
++ ValueHandles.insert({ Val, this });
+ }
+
+ bool hasCachedValueInfo(Value *V, BasicBlock *BB) const {
+- if (isOverdefined(V, BB))
+- return true;
+-
+- auto I = ValueCache.find_as(V);
+- if (I == ValueCache.end())
++ auto It = BlockCache.find(BB);
++ if (It == BlockCache.end())
+ return false;
+
+- return I->second->BlockVals.count(BB);
++ return It->second.OverDefined.count(V) ||
++ It->second.LatticeElements.count(V);
+ }
+
+ ValueLatticeElement getCachedValueInfo(Value *V, BasicBlock *BB) const {
+- if (isOverdefined(V, BB))
++ auto It = BlockCache.find(BB);
++ if (It == BlockCache.end())
++ return ValueLatticeElement();
++
++ if (It->second.OverDefined.count(V))
+ return ValueLatticeElement::getOverdefined();
+
+- auto I = ValueCache.find_as(V);
+- if (I == ValueCache.end())
++ auto LatticeIt = It->second.LatticeElements.find(V);
++ if (LatticeIt == It->second.LatticeElements.end())
+ return ValueLatticeElement();
+- auto BBI = I->second->BlockVals.find(BB);
+- if (BBI == I->second->BlockVals.end())
+- return ValueLatticeElement();
+- return BBI->second;
++
++ return LatticeIt->second;
+ }
+
+ /// clear - Empty the cache.
+ void clear() {
+- SeenBlocks.clear();
+- ValueCache.clear();
+- OverDefinedCache.clear();
++ BlockCache.clear();
++ ValueHandles.clear();
+ }
+
+ /// Inform the cache that a given value has been deleted.
+@@ -247,23 +218,18 @@ namespace {
+ /// OldSucc might have (unless also overdefined in NewSucc). This just
+ /// flushes elements from the cache and does not add any.
+ void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
+-
+- friend struct LVIValueHandle;
+ };
+ }
+
+ void LazyValueInfoCache::eraseValue(Value *V) {
+- for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) {
+- // Copy and increment the iterator immediately so we can erase behind
+- // ourselves.
+- auto Iter = I++;
+- SmallPtrSetImpl<Value *> &ValueSet = Iter->second;
+- ValueSet.erase(V);
+- if (ValueSet.empty())
+- OverDefinedCache.erase(Iter);
++ for (auto &Pair : BlockCache) {
++ Pair.second.LatticeElements.erase(V);
++ Pair.second.OverDefined.erase(V);
+ }
+
+- ValueCache.erase(V);
++ auto HandleIt = ValueHandles.find_as(V);
++ if (HandleIt != ValueHandles.end())
++ ValueHandles.erase(HandleIt);
+ }
+
+ void LVIValueHandle::deleted() {
+@@ -273,18 +239,7 @@ void LVIValueHandle::deleted() {
+ }
+
+ void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+- // Shortcut if we have never seen this block.
+- DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
+- if (I == SeenBlocks.end())
+- return;
+- SeenBlocks.erase(I);
+-
+- auto ODI = OverDefinedCache.find(BB);
+- if (ODI != OverDefinedCache.end())
+- OverDefinedCache.erase(ODI);
+-
+- for (auto &I : ValueCache)
+- I.second->BlockVals.erase(BB);
++ BlockCache.erase(BB);
+ }
+
+ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+@@ -302,10 +257,11 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+ std::vector<BasicBlock*> worklist;
+ worklist.push_back(OldSucc);
+
+- auto I = OverDefinedCache.find(OldSucc);
+- if (I == OverDefinedCache.end())
++ auto I = BlockCache.find(OldSucc);
++ if (I == BlockCache.end() || I->second.OverDefined.empty())
+ return; // Nothing to process here.
+- SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
++ SmallVector<Value *, 4> ValsToClear(I->second.OverDefined.begin(),
++ I->second.OverDefined.end());
+
+ // Use a worklist to perform a depth-first search of OldSucc's successors.
+ // NOTE: We do not need a visited list since any blocks we have already
+@@ -319,10 +275,10 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+ if (ToUpdate == NewSucc) continue;
+
+ // If a value was marked overdefined in OldSucc, and is here too...
+- auto OI = OverDefinedCache.find(ToUpdate);
+- if (OI == OverDefinedCache.end())
++ auto OI = BlockCache.find(ToUpdate);
++ if (OI == BlockCache.end() || OI->second.OverDefined.empty())
+ continue;
+- SmallPtrSetImpl<Value *> &ValueSet = OI->second;
++ auto &ValueSet = OI->second.OverDefined;
+
+ bool changed = false;
+ for (Value *V : ValsToClear) {
+@@ -332,11 +288,6 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+ // If we removed anything, then we potentially need to update
+ // blocks successors too.
+ changed = true;
+-
+- if (ValueSet.empty()) {
+- OverDefinedCache.erase(OI);
+- break;
+- }
+ }
+
+ if (!changed) continue;
+--
+2.24.0
+
diff --git a/build/build-clang/rG7e18aeba5062_clang_10.patch b/build/build-clang/rG7e18aeba5062_clang_10.patch
new file mode 100644
index 0000000000..0fc39a1b4d
--- /dev/null
+++ b/build/build-clang/rG7e18aeba5062_clang_10.patch
@@ -0,0 +1,249 @@
+From 779a169144581438d9e24b8b46a86704f6335e35 Mon Sep 17 00:00:00 2001
+From: Nikita Popov <nikita.ppv@gmail.com>
+Date: Sat, 16 Nov 2019 16:22:18 +0100
+Subject: [PATCH] [LVI] Restructure caching
+
+Variant on D70103. The caching is switched to always use a BB to
+cache entry map, which then contains per-value caches. A separate
+set contains value handles with a deletion callback. This allows us
+to properly invalidate overdefined values.
+
+A possible alternative would be to always cache by value first and
+have per-BB maps/sets in the each cache entry. In that case we could
+use a ValueMap and would avoid the separate value handle set. I went
+with the BB indexing at the top level to make it easier to integrate
+D69914, but possibly that's not the right choice.
+
+Differential Revision: https://reviews.llvm.org/D70376
+
+diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
+index bad2de9e5f5..33406a75d80 100644
+--- a/llvm/lib/Analysis/LazyValueInfo.cpp
++++ b/llvm/lib/Analysis/LazyValueInfo.cpp
+@@ -136,12 +136,10 @@ namespace {
+ /// A callback value handle updates the cache when values are erased.
+ class LazyValueInfoCache;
+ struct LVIValueHandle final : public CallbackVH {
+- // Needs to access getValPtr(), which is protected.
+- friend struct DenseMapInfo<LVIValueHandle>;
+
+ LazyValueInfoCache *Parent;
+
+- LVIValueHandle(Value *V, LazyValueInfoCache *P)
++ LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
+ : CallbackVH(V), Parent(P) { }
+
+ void deleted() override;
+@@ -155,89 +153,63 @@ namespace {
+ /// This is the cache kept by LazyValueInfo which
+ /// maintains information about queries across the clients' queries.
+ class LazyValueInfoCache {
+- /// This is all of the cached block information for exactly one Value*.
+- /// The entries are sorted by the BasicBlock* of the
+- /// entries, allowing us to do a lookup with a binary search.
+- /// Over-defined lattice values are recorded in OverDefinedCache to reduce
+- /// memory overhead.
+- struct ValueCacheEntryTy {
+- ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {}
+- LVIValueHandle Handle;
+- SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4> BlockVals;
++ /// This is all of the cached information for one basic block. It contains
++ /// the per-value lattice elements, as well as a separate set for
++ /// overdefined values to reduce memory usage.
++ struct BlockCacheEntryTy {
++ SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
++ SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
+ };
+
+- /// This tracks, on a per-block basis, the set of values that are
+- /// over-defined at the end of that block.
+- typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>>
+- OverDefinedCacheTy;
+- /// Keep track of all blocks that we have ever seen, so we
+- /// don't spend time removing unused blocks from our caches.
+- DenseSet<PoisoningVH<BasicBlock> > SeenBlocks;
+-
+- /// This is all of the cached information for all values,
+- /// mapped from Value* to key information.
+- DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache;
+- OverDefinedCacheTy OverDefinedCache;
+-
++ /// Cached information per basic block.
++ DenseMap<PoisoningVH<BasicBlock>, BlockCacheEntryTy> BlockCache;
++ /// Set of value handles used to erase values from the cache on deletion.
++ DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
+
+ public:
+ void insertResult(Value *Val, BasicBlock *BB,
+ const ValueLatticeElement &Result) {
+- SeenBlocks.insert(BB);
+-
++ auto &CacheEntry = BlockCache.try_emplace(BB).first->second;
+ // Insert over-defined values into their own cache to reduce memory
+ // overhead.
+ if (Result.isOverdefined())
+- OverDefinedCache[BB].insert(Val);
+- else {
+- auto It = ValueCache.find_as(Val);
+- if (It == ValueCache.end()) {
+- ValueCache[Val] = std::make_unique<ValueCacheEntryTy>(Val, this);
+- It = ValueCache.find_as(Val);
+- assert(It != ValueCache.end() && "Val was just added to the map!");
+- }
+- It->second->BlockVals[BB] = Result;
+- }
+- }
+-
+- bool isOverdefined(Value *V, BasicBlock *BB) const {
+- auto ODI = OverDefinedCache.find(BB);
+-
+- if (ODI == OverDefinedCache.end())
+- return false;
++ CacheEntry.OverDefined.insert(Val);
++ else
++ CacheEntry.LatticeElements.insert({ Val, Result });
+
+- return ODI->second.count(V);
++ auto HandleIt = ValueHandles.find_as(Val);
++ if (HandleIt == ValueHandles.end())
++ ValueHandles.insert({ Val, this });
+ }
+
+ bool hasCachedValueInfo(Value *V, BasicBlock *BB) const {
+- if (isOverdefined(V, BB))
+- return true;
+-
+- auto I = ValueCache.find_as(V);
+- if (I == ValueCache.end())
++ auto It = BlockCache.find(BB);
++ if (It == BlockCache.end())
+ return false;
+
+- return I->second->BlockVals.count(BB);
++ return It->second.OverDefined.count(V) ||
++ It->second.LatticeElements.count(V);
+ }
+
+ ValueLatticeElement getCachedValueInfo(Value *V, BasicBlock *BB) const {
+- if (isOverdefined(V, BB))
++ auto It = BlockCache.find(BB);
++ if (It == BlockCache.end())
++ return ValueLatticeElement();
++
++ if (It->second.OverDefined.count(V))
+ return ValueLatticeElement::getOverdefined();
+
+- auto I = ValueCache.find_as(V);
+- if (I == ValueCache.end())
+- return ValueLatticeElement();
+- auto BBI = I->second->BlockVals.find(BB);
+- if (BBI == I->second->BlockVals.end())
++ auto LatticeIt = It->second.LatticeElements.find(V);
++ if (LatticeIt == It->second.LatticeElements.end())
+ return ValueLatticeElement();
+- return BBI->second;
++
++ return LatticeIt->second;
+ }
+
+ /// clear - Empty the cache.
+ void clear() {
+- SeenBlocks.clear();
+- ValueCache.clear();
+- OverDefinedCache.clear();
++ BlockCache.clear();
++ ValueHandles.clear();
+ }
+
+ /// Inform the cache that a given value has been deleted.
+@@ -251,23 +223,18 @@ namespace {
+ /// OldSucc might have (unless also overdefined in NewSucc). This just
+ /// flushes elements from the cache and does not add any.
+ void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
+-
+- friend struct LVIValueHandle;
+ };
+ }
+
+ void LazyValueInfoCache::eraseValue(Value *V) {
+- for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) {
+- // Copy and increment the iterator immediately so we can erase behind
+- // ourselves.
+- auto Iter = I++;
+- SmallPtrSetImpl<Value *> &ValueSet = Iter->second;
+- ValueSet.erase(V);
+- if (ValueSet.empty())
+- OverDefinedCache.erase(Iter);
++ for (auto &Pair : BlockCache) {
++ Pair.second.LatticeElements.erase(V);
++ Pair.second.OverDefined.erase(V);
+ }
+
+- ValueCache.erase(V);
++ auto HandleIt = ValueHandles.find_as(V);
++ if (HandleIt != ValueHandles.end())
++ ValueHandles.erase(HandleIt);
+ }
+
+ void LVIValueHandle::deleted() {
+@@ -277,18 +244,7 @@ void LVIValueHandle::deleted() {
+ }
+
+ void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+- // Shortcut if we have never seen this block.
+- DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
+- if (I == SeenBlocks.end())
+- return;
+- SeenBlocks.erase(I);
+-
+- auto ODI = OverDefinedCache.find(BB);
+- if (ODI != OverDefinedCache.end())
+- OverDefinedCache.erase(ODI);
+-
+- for (auto &I : ValueCache)
+- I.second->BlockVals.erase(BB);
++ BlockCache.erase(BB);
+ }
+
+ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+@@ -306,10 +262,11 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+ std::vector<BasicBlock*> worklist;
+ worklist.push_back(OldSucc);
+
+- auto I = OverDefinedCache.find(OldSucc);
+- if (I == OverDefinedCache.end())
++ auto I = BlockCache.find(OldSucc);
++ if (I == BlockCache.end() || I->second.OverDefined.empty())
+ return; // Nothing to process here.
+- SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
++ SmallVector<Value *, 4> ValsToClear(I->second.OverDefined.begin(),
++ I->second.OverDefined.end());
+
+ // Use a worklist to perform a depth-first search of OldSucc's successors.
+ // NOTE: We do not need a visited list since any blocks we have already
+@@ -323,10 +280,10 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+ if (ToUpdate == NewSucc) continue;
+
+ // If a value was marked overdefined in OldSucc, and is here too...
+- auto OI = OverDefinedCache.find(ToUpdate);
+- if (OI == OverDefinedCache.end())
++ auto OI = BlockCache.find(ToUpdate);
++ if (OI == BlockCache.end() || OI->second.OverDefined.empty())
+ continue;
+- SmallPtrSetImpl<Value *> &ValueSet = OI->second;
++ auto &ValueSet = OI->second.OverDefined;
+
+ bool changed = false;
+ for (Value *V : ValsToClear) {
+@@ -336,11 +293,6 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+ // If we removed anything, then we potentially need to update
+ // blocks successors too.
+ changed = true;
+-
+- if (ValueSet.empty()) {
+- OverDefinedCache.erase(OI);
+- break;
+- }
+ }
+
+ if (!changed) continue;
diff --git a/build/build-clang/rename_gcov_flush.patch b/build/build-clang/rename_gcov_flush.patch
new file mode 100644
index 0000000000..c707c4423f
--- /dev/null
+++ b/build/build-clang/rename_gcov_flush.patch
@@ -0,0 +1,40 @@
+Index: compiler-rt/lib/profile/GCDAProfiling.c
+===================================================================
+diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
+--- a/compiler-rt/lib/profile/GCDAProfiling.c
++++ b/compiler-rt/lib/profile/GCDAProfiling.c
+@@ -619,7 +619,7 @@
+ fn_list_insert(&flush_fn_list, fn);
+ }
+
+-void __gcov_flush() {
++void __custom_llvm_gcov_flush() {
+ struct fn_node* curr = flush_fn_list.head;
+
+ while (curr) {
+diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+index 9af64ed332c..bcebe303ff4 100644
+--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
++++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+@@ -647,7 +647,7 @@
+ for (auto I : ForkAndExecs) {
+ IRBuilder<> Builder(I);
+ FunctionType *FTy = FunctionType::get(Builder.getVoidTy(), {}, false);
+- FunctionCallee GCOVFlush = M->getOrInsertFunction("__gcov_flush", FTy);
++ FunctionCallee GCOVFlush = M->getOrInsertFunction("__custom_llvm_gcov_flush", FTy);
+ Builder.CreateCall(GCOVFlush);
+ I->getParent()->splitBasicBlock(I);
+ }
+diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
+index e113f9a679..b3a07b18c0 100644
+--- a/clang/lib/Driver/ToolChains/Darwin.cpp
++++ b/clang/lib/Driver/ToolChains/Darwin.cpp
+@@ -1122,7 +1122,7 @@
+ // runtime's functionality.
+ if (hasExportSymbolDirective(Args)) {
+ if (needsGCovInstrumentation(Args)) {
+- addExportedSymbol(CmdArgs, "___gcov_flush");
++ addExportedSymbol(CmdArgs, "___custom_llvm_gcov_flush");
+ addExportedSymbol(CmdArgs, "_flush_fn_list");
+ addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ } else {
diff --git a/build/build-clang/rename_gcov_flush_7.patch b/build/build-clang/rename_gcov_flush_7.patch
new file mode 100644
index 0000000000..ae7b922716
--- /dev/null
+++ b/build/build-clang/rename_gcov_flush_7.patch
@@ -0,0 +1,14 @@
+Index: compiler-rt/lib/profile/GCDAProfiling.c
+===================================================================
+diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
+--- a/compiler-rt/lib/profile/GCDAProfiling.c (revisione 336380)
++++ b/compiler-rt/lib/profile/GCDAProfiling.c (copia locale)
+@@ -555,7 +555,7 @@
+ fn_list_insert(&flush_fn_list, fn);
+ }
+
+-void __gcov_flush() {
++void __custom_llvm_gcov_flush() {
+ struct fn_node* curr = flush_fn_list.head;
+
+ while (curr) {
diff --git a/build/build-clang/rename_gcov_flush_clang_10.patch b/build/build-clang/rename_gcov_flush_clang_10.patch
new file mode 100644
index 0000000000..1da3b653a5
--- /dev/null
+++ b/build/build-clang/rename_gcov_flush_clang_10.patch
@@ -0,0 +1,42 @@
+diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
+index 220bc8f9835..4f7ce485777 100644
+--- a/clang/lib/Driver/ToolChains/Darwin.cpp
++++ b/clang/lib/Driver/ToolChains/Darwin.cpp
+@@ -1143,7 +1143,7 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
+ // runtime's functionality.
+ if (hasExportSymbolDirective(Args)) {
+ if (ForGCOV) {
+- addExportedSymbol(CmdArgs, "___gcov_flush");
++ addExportedSymbol(CmdArgs, "___custom_llvm_gcov_flush");
+ addExportedSymbol(CmdArgs, "_flush_fn_list");
+ addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ } else {
+diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
+index 498c05900bf..b7257db10e7 100644
+--- a/compiler-rt/lib/profile/GCDAProfiling.c
++++ b/compiler-rt/lib/profile/GCDAProfiling.c
+@@ -619,7 +619,7 @@ void llvm_register_flush_function(fn_ptr fn) {
+ fn_list_insert(&flush_fn_list, fn);
+ }
+
+-void __gcov_flush() {
++void __custom_llvm_gcov_flush() {
+ struct fn_node* curr = flush_fn_list.head;
+
+ while (curr) {
+diff --git a/compiler-rt/test/tsan/pthread_atfork_deadlock2.c b/compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+new file mode 100644
+index 00000000000..e69de29bb2d
+diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+index bf3e4ed3e31..37bdcfaeab8 100644
+--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
++++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+@@ -656,7 +656,7 @@ void GCOVProfiler::AddFlushBeforeForkAndExec() {
+ for (auto I : ForkAndExecs) {
+ IRBuilder<> Builder(I);
+ FunctionType *FTy = FunctionType::get(Builder.getVoidTy(), {}, false);
+- FunctionCallee GCOVFlush = M->getOrInsertFunction("__gcov_flush", FTy);
++ FunctionCallee GCOVFlush = M->getOrInsertFunction("__custom_llvm_gcov_flush", FTy);
+ Builder.CreateCall(GCOVFlush);
+ I->getParent()->splitBasicBlock(I);
+ }
diff --git a/build/build-clang/rename_gcov_flush_clang_11.patch b/build/build-clang/rename_gcov_flush_clang_11.patch
new file mode 100644
index 0000000000..bd76477fd5
--- /dev/null
+++ b/build/build-clang/rename_gcov_flush_clang_11.patch
@@ -0,0 +1,26 @@
+diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
+index 7b879f8cb65..3810a2ceec2 100644
+--- a/clang/lib/Driver/ToolChains/Darwin.cpp
++++ b/clang/lib/Driver/ToolChains/Darwin.cpp
+@@ -1196,7 +1196,7 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
+ // runtime's functionality.
+ if (hasExportSymbolDirective(Args)) {
+ if (ForGCOV) {
+- addExportedSymbol(CmdArgs, "___gcov_flush");
++ addExportedSymbol(CmdArgs, "___custom_llvm_gcov_flush");
+ addExportedSymbol(CmdArgs, "_flush_fn_list");
+ addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ addExportedSymbol(CmdArgs, "_reset_fn_list");
+diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
+index 57d8dec423c..2edfb6e19e9 100644
+--- a/compiler-rt/lib/profile/GCDAProfiling.c
++++ b/compiler-rt/lib/profile/GCDAProfiling.c
+@@ -644,7 +644,7 @@ void llvm_register_flush_function(fn_ptr fn) {
+ fn_list_insert(&flush_fn_list, fn);
+ }
+
+-void __gcov_flush() {
++void __custom_llvm_gcov_flush() {
+ struct fn_node* curr = flush_fn_list.head;
+
+ while (curr) {
diff --git a/build/build-clang/revert-r362047-and-r362065.patch b/build/build-clang/revert-r362047-and-r362065.patch
new file mode 100644
index 0000000000..c522c9ae02
--- /dev/null
+++ b/build/build-clang/revert-r362047-and-r362065.patch
@@ -0,0 +1,62 @@
+Bisection found that r362047 (and its followup build fix r362065) cause the
+build to install the android PGO library into the following location:
+stage2/clang/lib/linux/libclang_rt.profile-arm-android.a
+rather than the expected:
+stage2/clang/lib64/clang/$VERSION/lib/linux/libclang_rt.profile-arm-android.a
+
+For lack of any progress in debugging this, revert those two patches.
+
+--- a/llvm/runtimes/CMakeLists.txt
++++ b/llvm/runtimes/CMakeLists.txt
+@@ -60,12 +60,11 @@
+ project(Runtimes C CXX ASM)
+
+- find_package(LLVM PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+-
+ # Add the root project's CMake modules, and the LLVM build's modules to the
+ # CMake module path.
+ list(INSERT CMAKE_MODULE_PATH 0
+ "${CMAKE_CURRENT_SOURCE_DIR}/../cmake"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules"
++ "${LLVM_LIBRARY_DIR}/cmake/llvm"
+ )
+
+ # Some of the runtimes will conditionally use the compiler-rt sanitizers
+@@ -80,6 +79,11 @@
+ endif()
+ endif()
+
++ # LLVMConfig.cmake contains a bunch of CMake variables from the LLVM build.
++ # This file is installed as part of LLVM distributions, so this can be used
++ # either from a build directory or an installed LLVM.
++ include(LLVMConfig)
++
+ # Setting these variables will allow the sub-build to put their outputs into
+ # the library and bin directories of the top-level build.
+ set(LLVM_LIBRARY_OUTPUT_INTDIR ${LLVM_LIBRARY_DIR})
+@@ -89,9 +93,6 @@
+ set(LLVM_MAIN_SRC_DIR ${LLVM_BUILD_MAIN_SRC_DIR})
+ set(LLVM_CMAKE_PATH ${LLVM_MAIN_SRC_DIR}/cmake/modules)
+
+- # This variable is used by individual runtimes to locate LLVM files.
+- set(LLVM_PATH ${LLVM_BUILD_MAIN_SRC_DIR})
+-
+ if(APPLE)
+ set(LLVM_ENABLE_LIBCXX ON CACHE BOOL "")
+ endif()
+@@ -381,4 +382,6 @@
+ CMAKE_ARGS -DCOMPILER_RT_BUILD_BUILTINS=Off
+ -DLLVM_INCLUDE_TESTS=${LLVM_INCLUDE_TESTS}
++ -DLLVM_BINARY_DIR=${LLVM_BINARY_DIR}
++ -DLLVM_LIBRARY_DIR=${LLVM_LIBRARY_DIR}
+ -DLLVM_DEFAULT_TARGET_TRIPLE=${TARGET_TRIPLE}
+ -DLLVM_ENABLE_PROJECTS_USED=${LLVM_ENABLE_PROJECTS_USED}
+@@ -470,6 +473,8 @@
+ # Builtins were built separately above
+ CMAKE_ARGS -DCOMPILER_RT_BUILD_BUILTINS=Off
+ -DLLVM_INCLUDE_TESTS=${LLVM_INCLUDE_TESTS}
++ -DLLVM_BINARY_DIR=${LLVM_BINARY_DIR}
++ -DLLVM_LIBRARY_DIR=${LLVM_LIBRARY_DIR}
+ -DLLVM_DEFAULT_TARGET_TRIPLE=${target}
+ -DLLVM_ENABLE_PROJECTS_USED=${LLVM_ENABLE_PROJECTS_USED}
+ -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=ON
diff --git a/build/build-clang/static-llvm-symbolizer.patch b/build/build-clang/static-llvm-symbolizer.patch
new file mode 100644
index 0000000000..ea8ebc322b
--- /dev/null
+++ b/build/build-clang/static-llvm-symbolizer.patch
@@ -0,0 +1,12 @@
+diff --git a/llvm/tools/llvm-symbolizer/CMakeLists.txt b/llvm/tools/llvm-symbolizer/CMakeLists.txt
+index 8185c296c50..13c7419fa47 100644
+--- a/llvm/tools/llvm-symbolizer/CMakeLists.txt
++++ b/llvm/tools/llvm-symbolizer/CMakeLists.txt
+@@ -13,6 +13,7 @@ set(LLVM_LINK_COMPONENTS
+ )
+
+ add_llvm_tool(llvm-symbolizer
++ DISABLE_LLVM_LINK_LLVM_DYLIB
+ llvm-symbolizer.cpp
+ )
+
diff --git a/build/build-clang/tsan-hang-be41a98ac222.patch b/build/build-clang/tsan-hang-be41a98ac222.patch
new file mode 100644
index 0000000000..3e148e52b3
--- /dev/null
+++ b/build/build-clang/tsan-hang-be41a98ac222.patch
@@ -0,0 +1,100 @@
+From be41a98ac222f33ed5558d86e1cede67249e99b5 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Sat, 21 Mar 2020 13:34:50 +0100
+Subject: [PATCH] tsan: fix deadlock with pthread_atfork callbacks
+
+This fixes the bug reported at:
+https://groups.google.com/forum/#!topic/thread-sanitizer/e_zB9gYqFHM
+
+A pthread_atfork callback triggers a data race
+and we deadlock on the report_mtx. Ignore memory access
+in the pthread_atfork callbacks to prevent the deadlock.
+---
+ compiler-rt/lib/tsan/rtl/tsan_rtl.cc | 9 ++++
+ .../test/tsan/pthread_atfork_deadlock2.c | 49 +++++++++++++++++++
+ 2 files changed, 58 insertions(+)
+ create mode 100644 compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl.ccc
+index fe469faad2a2..13c9b770f50a 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc
++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc
+@@ -495,14 +495,23 @@ int Finalize(ThreadState *thr) {
+ void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
++ // Ignore memory accesses in the pthread_atfork callbacks.
++ // If any of them triggers a data race we will deadlock
++ // on the report_mtx.
++ // We could ignore interceptors and sync operations as well,
++ // but so far it's unclear if it will do more good or harm.
++ // Unnecessarily ignoring things can lead to false positives later.
++ ThreadIgnoreBegin(thr, pc);
+ }
+
+ void ForkParentAfter(ThreadState *thr, uptr pc) {
++ ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+ }
+
+ void ForkChildAfter(ThreadState *thr, uptr pc) {
++ ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+diff --git a/compiler-rt/test/tsan/pthread_atfork_deadlock2.c b/compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+new file mode 100644
+index 000000000000..700507c1e637
+--- /dev/null
++++ b/compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+@@ -0,0 +1,49 @@
++// RUN: %clang_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
++// Regression test for
++// https://groups.google.com/d/msg/thread-sanitizer/e_zB9gYqFHM/DmAiTsrLAwAJ
++// pthread_atfork() callback triggers a data race and we deadlocked
++// on the report_mtx as we lock it around fork.
++#include "test.h"
++#include <sys/types.h>
++#include <sys/wait.h>
++#include <errno.h>
++
++int glob = 0;
++
++void *worker(void *unused) {
++ glob++;
++ barrier_wait(&barrier);
++ return NULL;
++}
++
++void atfork() {
++ glob++;
++}
++
++int main() {
++ barrier_init(&barrier, 2);
++ pthread_atfork(atfork, NULL, NULL);
++ pthread_t t;
++ pthread_create(&t, NULL, worker, NULL);
++ barrier_wait(&barrier);
++ pid_t pid = fork();
++ if (pid < 0) {
++ fprintf(stderr, "fork failed: %d\n", errno);
++ return 1;
++ }
++ if (pid == 0) {
++ fprintf(stderr, "CHILD\n");
++ return 0;
++ }
++ if (pid != waitpid(pid, NULL, 0)) {
++ fprintf(stderr, "waitpid failed: %d\n", errno);
++ return 1;
++ }
++ pthread_join(t, NULL);
++ fprintf(stderr, "PARENT\n");
++ return 0;
++}
++
++// CHECK-NOT: ThreadSanitizer: data race
++// CHECK: CHILD
++// CHECK: PARENT
diff --git a/build/build-clang/tsan-hang-be41a98ac222_clang_10.patch b/build/build-clang/tsan-hang-be41a98ac222_clang_10.patch
new file mode 100644
index 0000000000..e65335a1fd
--- /dev/null
+++ b/build/build-clang/tsan-hang-be41a98ac222_clang_10.patch
@@ -0,0 +1,100 @@
+From be41a98ac222f33ed5558d86e1cede67249e99b5 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Sat, 21 Mar 2020 13:34:50 +0100
+Subject: [PATCH] tsan: fix deadlock with pthread_atfork callbacks
+
+This fixes the bug reported at:
+https://groups.google.com/forum/#!topic/thread-sanitizer/e_zB9gYqFHM
+
+A pthread_atfork callback triggers a data race
+and we deadlock on the report_mtx. Ignore memory access
+in the pthread_atfork callbacks to prevent the deadlock.
+---
+ compiler-rt/lib/tsan/rtl/tsan_rtl.cc | 9 ++++
+ .../test/tsan/pthread_atfork_deadlock2.c | 49 +++++++++++++++++++
+ 2 files changed, 58 insertions(+)
+ create mode 100644 compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+
+diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+index 3f3c0cce119..5e324a0a5fd 100644
+--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+@@ -494,14 +494,23 @@ int Finalize(ThreadState *thr) {
+ void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
++ // Ignore memory accesses in the pthread_atfork callbacks.
++ // If any of them triggers a data race we will deadlock
++ // on the report_mtx.
++ // We could ignore interceptors and sync operations as well,
++ // but so far it's unclear if it will do more good or harm.
++ // Unnecessarily ignoring things can lead to false positives later.
++ ThreadIgnoreBegin(thr, pc);
+ }
+
+ void ForkParentAfter(ThreadState *thr, uptr pc) {
++ ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+ }
+
+ void ForkChildAfter(ThreadState *thr, uptr pc) {
++ ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+diff --git a/compiler-rt/test/tsan/pthread_atfork_deadlock2.c b/compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+new file mode 100644
+index 00000000000..700507c1e63
+--- /dev/null
++++ b/compiler-rt/test/tsan/pthread_atfork_deadlock2.c
+@@ -0,0 +1,49 @@
++// RUN: %clang_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
++// Regression test for
++// https://groups.google.com/d/msg/thread-sanitizer/e_zB9gYqFHM/DmAiTsrLAwAJ
++// pthread_atfork() callback triggers a data race and we deadlocked
++// on the report_mtx as we lock it around fork.
++#include "test.h"
++#include <sys/types.h>
++#include <sys/wait.h>
++#include <errno.h>
++
++int glob = 0;
++
++void *worker(void *unused) {
++ glob++;
++ barrier_wait(&barrier);
++ return NULL;
++}
++
++void atfork() {
++ glob++;
++}
++
++int main() {
++ barrier_init(&barrier, 2);
++ pthread_atfork(atfork, NULL, NULL);
++ pthread_t t;
++ pthread_create(&t, NULL, worker, NULL);
++ barrier_wait(&barrier);
++ pid_t pid = fork();
++ if (pid < 0) {
++ fprintf(stderr, "fork failed: %d\n", errno);
++ return 1;
++ }
++ if (pid == 0) {
++ fprintf(stderr, "CHILD\n");
++ return 0;
++ }
++ if (pid != waitpid(pid, NULL, 0)) {
++ fprintf(stderr, "waitpid failed: %d\n", errno);
++ return 1;
++ }
++ pthread_join(t, NULL);
++ fprintf(stderr, "PARENT\n");
++ return 0;
++}
++
++// CHECK-NOT: ThreadSanitizer: data race
++// CHECK: CHILD
++// CHECK: PARENT
diff --git a/build/build-clang/unpoison-thread-stacks.patch b/build/build-clang/unpoison-thread-stacks.patch
new file mode 100644
index 0000000000..2fb7cafd90
--- /dev/null
+++ b/build/build-clang/unpoison-thread-stacks.patch
@@ -0,0 +1,62 @@
+[winasan] Unpoison the stack in NtTerminateThread
+
+In long-running builds we've seen some ASan complaints during thread creation
+that we suspect are due to leftover poisoning from previous threads whose stacks
+occupied that memory. This patch adds a hook that unpoisons the stack just
+before the NtTerminateThread syscall.
+
+Differential Revision: https://reviews.llvm.org/D52091
+
+** Update for clang 9 ** : After some backouts, this patch eventually landed
+upstream in a different form, as the TLS handler `asan_thread_exit`, but that
+variant causes failures in our test suite, so revert the TLS handler in favor of
+the interceptor approach from the first patch.
+
+--- a/compiler-rt/lib/asan/asan_win.cc
++++ b/compiler-rt/lib/asan/asan_win.cc
+@@ -154,6 +154,14 @@
+ thr_flags, tid);
+ }
+
++INTERCEPTOR_WINAPI(void, NtTerminateThread, void *rcx) {
++ // Unpoison the terminating thread's stack because the memory may be re-used.
++ NT_TIB *tib = (NT_TIB *)NtCurrentTeb();
++ uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit;
++ __asan_unpoison_memory_region(tib->StackLimit, stackSize);
++ return REAL(NtTerminateThread(rcx));
++}
++
+ // }}}
+
+ namespace __asan {
+@@ -168,7 +176,9 @@
+
+ ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
+-
++ CHECK(::__interception::OverrideFunction("NtTerminateThread",
++ (uptr)WRAP(NtTerminateThread),
++ (uptr *)&REAL(NtTerminateThread)));
+ #ifdef _WIN64
+ ASAN_INTERCEPT_FUNC(__C_specific_handler);
+ #else
+@@ -380,19 +390,6 @@
+ void *, unsigned long, void *) = asan_thread_init;
+ #endif
+
+-static void NTAPI asan_thread_exit(void *module, DWORD reason, void *reserved) {
+- if (reason == DLL_THREAD_DETACH) {
+- // Unpoison the thread's stack because the memory may be re-used.
+- NT_TIB *tib = (NT_TIB *)NtCurrentTeb();
+- uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit;
+- __asan_unpoison_memory_region(tib->StackLimit, stackSize);
+- }
+-}
+-
+-#pragma section(".CRT$XLY", long, read) // NOLINT
+-__declspec(allocate(".CRT$XLY")) void(NTAPI *__asan_tls_exit)(
+- void *, unsigned long, void *) = asan_thread_exit;
+-
+ WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+ // }}}
diff --git a/build/build-clang/unpoison-thread-stacks_clang_10.patch b/build/build-clang/unpoison-thread-stacks_clang_10.patch
new file mode 100644
index 0000000000..563fa1d7bf
--- /dev/null
+++ b/build/build-clang/unpoison-thread-stacks_clang_10.patch
@@ -0,0 +1,64 @@
+[winasan] Unpoison the stack in NtTerminateThread
+
+In long-running builds we've seen some ASan complaints during thread creation
+that we suspect are due to leftover poisoning from previous threads whose stacks
+occupied that memory. This patch adds a hook that unpoisons the stack just
+before the NtTerminateThread syscall.
+
+Differential Revision: https://reviews.llvm.org/D52091
+
+** Update for clang 9 ** : After some backouts, this patch eventually landed
+upstream in a different form, as the TLS handler `asan_thread_exit`, but that
+variant causes failures in our test suite, so revert the TLS handler in favor of
+the interceptor approach from the first patch.
+
+diff --git a/compiler-rt/lib/asan/asan_win.cpp b/compiler-rt/lib/asan/asan_win.cpp
+index 417892aaedd..5fe86db44f4 100644
+--- a/compiler-rt/lib/asan/asan_win.cpp
++++ b/compiler-rt/lib/asan/asan_win.cpp
+@@ -154,6 +154,14 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
+ thr_flags, tid);
+ }
+
++INTERCEPTOR_WINAPI(void, NtTerminateThread, void *rcx) {
++ // Unpoison the terminating thread's stack because the memory may be re-used.
++ NT_TIB *tib = (NT_TIB *)NtCurrentTeb();
++ uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit;
++ __asan_unpoison_memory_region(tib->StackLimit, stackSize);
++ return REAL(NtTerminateThread(rcx));
++}
++
+ // }}}
+
+ namespace __asan {
+@@ -168,7 +176,9 @@ void InitializePlatformInterceptors() {
+
+ ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
+-
++ CHECK(::__interception::OverrideFunction("NtTerminateThread",
++ (uptr)WRAP(NtTerminateThread),
++ (uptr *)&REAL(NtTerminateThread)));
+ #ifdef _WIN64
+ ASAN_INTERCEPT_FUNC(__C_specific_handler);
+ #else
+@@ -380,19 +390,6 @@ __declspec(allocate(".CRT$XLAB")) void(NTAPI *__asan_tls_init)(
+ void *, unsigned long, void *) = asan_thread_init;
+ #endif
+
+-static void NTAPI asan_thread_exit(void *module, DWORD reason, void *reserved) {
+- if (reason == DLL_THREAD_DETACH) {
+- // Unpoison the thread's stack because the memory may be re-used.
+- NT_TIB *tib = (NT_TIB *)NtCurrentTeb();
+- uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit;
+- __asan_unpoison_memory_region(tib->StackLimit, stackSize);
+- }
+-}
+-
+-#pragma section(".CRT$XLY", long, read)
+-__declspec(allocate(".CRT$XLY")) void(NTAPI *__asan_tls_exit)(
+- void *, unsigned long, void *) = asan_thread_exit;
+-
+ WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+ // }}}