summaryrefslogtreecommitdiffstats
path: root/build/moz.configure/toolchain.configure
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
commit0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d (patch)
treea31f07c9bcca9d56ce61e9a1ffd30ef350d513aa /build/moz.configure/toolchain.configure
parentInitial commit. (diff)
downloadfirefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.tar.xz
firefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.zip
Adding upstream version 115.8.0esr.upstream/115.8.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'build/moz.configure/toolchain.configure')
-rw-r--r--build/moz.configure/toolchain.configure3194
1 files changed, 3194 insertions, 0 deletions
diff --git a/build/moz.configure/toolchain.configure b/build/moz.configure/toolchain.configure
new file mode 100644
index 0000000000..3f91d71537
--- /dev/null
+++ b/build/moz.configure/toolchain.configure
@@ -0,0 +1,3194 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Code optimization
+# ==============================================================
+
+option("--disable-optimize", nargs="?", help="Disable optimizations via compiler flags")
+
+
+@depends("--enable-optimize")
+def moz_optimize(option):
+ flags = None
+
+ if len(option):
+ val = "2"
+ flags = option[0]
+ elif option:
+ val = "1"
+ else:
+ val = None
+
+ return namespace(
+ optimize=val,
+ flags=flags,
+ )
+
+
+set_config("MOZ_OPTIMIZE", moz_optimize.optimize)
+add_old_configure_assignment("MOZ_OPTIMIZE", moz_optimize.optimize)
+add_old_configure_assignment("MOZ_CONFIGURE_OPTIMIZE_FLAGS", moz_optimize.flags)
+
+# Android NDK
+# ==============================================================
+
+
+@depends("--disable-compile-environment", target)
+def compiling_android(compile_env, target):
+ return compile_env and target.os == "Android"
+
+
+include("android-ndk.configure", when=compiling_android)
+
+with only_when(target_is_osx):
+ # MacOS deployment target version
+ # ==============================================================
+ # This needs to happen before any compilation test is done.
+
+ option(
+ "--enable-macos-target",
+ env="MACOSX_DEPLOYMENT_TARGET",
+ nargs=1,
+ default=depends(target, developer_options)
+ # We continue to target 10.12 on Intel, but can target 11.0 for
+ # aarch64 since the earliest hardware was released alongside 11.0.
+ # For local builds, we want to target 10.12 regardless of the
+ # underlying platform to catch any errors or warnings that wouldn't
+ # show up when targeting 11.0, since these would later show up on
+ # CI for Intel builds.
+ (lambda t, d: "11.0" if (t.cpu == "aarch64" and not d) else "10.12"),
+ help="Set the minimum MacOS version needed at runtime{|}",
+ )
+
+ @depends_if("--enable-macos-target", developer_options)
+ def macos_target(value, _):
+ return value[0]
+
+
+with only_when(host_is_osx | target_is_osx):
+ # MacOS SDK
+ # =========
+ option(
+ "--with-macos-sdk",
+ env="MACOS_SDK_DIR",
+ nargs=1,
+ help="Location of platform SDK to use",
+ )
+
+ @imports("plistlib")
+ @imports(_from="__builtin__", _import="open")
+ @imports(_from="__builtin__", _import="Exception")
+ def get_sdk_version(sdk):
+ with open(os.path.join(sdk, "SDKSettings.plist"), "rb") as plist:
+ obj = plistlib.load(plist)
+ if not obj:
+ raise Exception(
+ "Error parsing SDKSettings.plist in the SDK directory: %s" % sdk
+ )
+ if "Version" not in obj:
+ raise Exception(
+ "Error finding Version information in SDKSettings.plist from the SDK: %s"
+ % sdk
+ )
+ return Version(obj["Version"])
+
+ def sdk_min_version():
+ return "13.3"
+
+ @depends(
+ "--with-macos-sdk",
+ host,
+ bootstrap_path(
+ "MacOSX{}.sdk".format(sdk_min_version()),
+ when=depends("--with-macos-sdk")(lambda x: not x),
+ ),
+ )
+ @imports(_from="__builtin__", _import="Exception")
+ @imports(_from="os.path", _import="isdir")
+ @imports(_from="os", _import="listdir")
+ def macos_sdk(sdk, host, bootstrapped):
+ if bootstrapped:
+ sdk = [bootstrapped]
+ if sdk:
+ sdk = sdk[0]
+ try:
+ version = get_sdk_version(sdk)
+ except Exception as e:
+ die(e)
+ elif host.os == "OSX":
+ sdk = check_cmd_output(
+ "xcrun", "--show-sdk-path", onerror=lambda: ""
+ ).rstrip()
+ if not sdk:
+ die(
+ "Could not find the macOS SDK. Please use --with-macos-sdk to give "
+ "the path to a macOS SDK."
+ )
+ # Scan the parent directory xcrun returns for the most recent SDK.
+ sdk_dir = os.path.dirname(sdk)
+ versions = []
+ for d in listdir(sdk_dir):
+ if d.lower().startswith("macos"):
+ try:
+ sdk = os.path.join(sdk_dir, d)
+ versions.append((get_sdk_version(sdk), sdk))
+ except Exception:
+ pass
+ version, sdk = max(versions)
+ else:
+ die(
+ "Need a macOS SDK when targeting macOS. Please use --with-macos-sdk "
+ "to give the path to a macOS SDK."
+ )
+
+ if not isdir(sdk):
+ die(
+ "SDK not found in %s. When using --with-macos-sdk, you must specify a "
+ "valid SDK. SDKs are installed when the optional cross-development "
+ "tools are selected during the Xcode/Developer Tools installation."
+ % sdk
+ )
+ if version < Version(sdk_min_version()):
+ die(
+ 'SDK version "%s" is too old. Please upgrade to at least %s. Try '
+ "updating your system Xcode." % (version, sdk_min_version())
+ )
+ return sdk
+
+ set_config("MACOS_SDK_DIR", macos_sdk)
+
+
+with only_when(target_is_osx):
+ with only_when(cross_compiling):
+ option(
+ "--with-macos-private-frameworks",
+ env="MACOS_PRIVATE_FRAMEWORKS_DIR",
+ nargs=1,
+ help="Location of private frameworks to use",
+ )
+
+ @depends_if("--with-macos-private-frameworks")
+ @imports(_from="os.path", _import="isdir")
+ def macos_private_frameworks(value):
+ if value and not isdir(value[0]):
+ die(
+ "PrivateFrameworks not found not found in %s. When using "
+ "--with-macos-private-frameworks, you must specify a valid "
+ "directory",
+ value[0],
+ )
+ return value[0]
+
+ @depends(macos_private_frameworks, macos_sdk)
+ def macos_private_frameworks(value, sdk):
+ if value:
+ return value
+ return os.path.join(sdk or "/", "System/Library/PrivateFrameworks")
+
+ set_config("MACOS_PRIVATE_FRAMEWORKS_DIR", macos_private_frameworks)
+
+
+# GC rooting and hazard analysis.
+# ==============================================================
+option(env="MOZ_HAZARD", help="Build for the GC rooting hazard analysis")
+
+
+@depends("MOZ_HAZARD")
+def hazard_analysis(value):
+ if value:
+ return True
+
+
+set_config("MOZ_HAZARD", hazard_analysis)
+
+
+# Cross-compilation related things.
+# ==============================================================
+option(
+ "--with-toolchain-prefix",
+ env="TOOLCHAIN_PREFIX",
+ nargs=1,
+ help="Prefix for the target toolchain",
+)
+
+
+@depends("--with-toolchain-prefix", host, target, cross_compiling)
+def toolchain_prefix(value, host, target, cross_compiling):
+ if value:
+ return tuple(value)
+ # We don't want a toolchain prefix by default when building on mac for mac.
+ if cross_compiling and not (target.os == "OSX" and host.os == "OSX"):
+ return ("%s-" % target.toolchain, "%s-" % target.alias)
+
+
+@depends(toolchain_prefix, target)
+def first_toolchain_prefix(toolchain_prefix, target):
+ # Pass TOOLCHAIN_PREFIX down to the build system if it was given from the
+ # command line/environment (in which case there's only one value in the tuple),
+ # or when cross-compiling for Android or OSX.
+ if toolchain_prefix and (
+ target.os in ("Android", "OSX") or len(toolchain_prefix) == 1
+ ):
+ return toolchain_prefix[0]
+
+
+set_config("TOOLCHAIN_PREFIX", first_toolchain_prefix)
+add_old_configure_assignment("TOOLCHAIN_PREFIX", first_toolchain_prefix)
+
+
+# Compilers
+# ==============================================================
+include("compilers-util.configure")
+
+
+def try_preprocess(
+ configure_cache, compiler, language, source, onerror=None, wrapper=[]
+):
+ return try_invoke_compiler(
+ configure_cache, compiler, language, source, ["-E"], onerror, wrapper
+ )
+
+
+@imports(_from="mozbuild.configure.constants", _import="CompilerType")
+@imports(_from="mozbuild.configure.constants", _import="CPU_preprocessor_checks")
+@imports(_from="mozbuild.configure.constants", _import="kernel_preprocessor_checks")
+@imports(_from="mozbuild.configure.constants", _import="OS_preprocessor_checks")
+@imports(_from="textwrap", _import="dedent")
+@imports(_from="__builtin__", _import="Exception")
+def get_compiler_info(configure_cache, compiler, language):
+ """Returns information about the given `compiler` (command line in the
+ form of a list or tuple), in the given `language`.
+
+ The returned information includes:
+ - the compiler type (clang-cl, clang or gcc)
+ - the compiler version
+ - the compiler supported language
+ - the compiler supported language version
+ """
+ # Xcode clang versions are different from the underlying llvm version (they
+ # instead are aligned with the Xcode version). Fortunately, we can tell
+ # apart plain clang from Xcode clang, and convert the Xcode clang version
+ # into the more or less corresponding plain clang version.
+ check = dedent(
+ """\
+ #if defined(_MSC_VER) && defined(__clang__) && defined(_MT)
+ %COMPILER "clang-cl"
+ %VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
+ #elif defined(__clang__)
+ %COMPILER "clang"
+ %VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
+ # ifdef __apple_build_version__
+ %XCODE 1
+ # endif
+ #elif defined(__GNUC__) && !defined(__MINGW32__)
+ %COMPILER "gcc"
+ %VERSION __GNUC__.__GNUC_MINOR__.__GNUC_PATCHLEVEL__
+ #endif
+
+ #if __cplusplus
+ %cplusplus __cplusplus
+ #elif __STDC_VERSION__
+ %STDC_VERSION __STDC_VERSION__
+ #endif
+ """
+ )
+
+ # While we're doing some preprocessing, we might as well do some more
+ # preprocessor-based tests at the same time, to check the toolchain
+ # matches what we want.
+ for name, preprocessor_checks in (
+ ("CPU", CPU_preprocessor_checks),
+ ("KERNEL", kernel_preprocessor_checks),
+ ("OS", OS_preprocessor_checks),
+ ):
+ for n, (value, condition) in enumerate(preprocessor_checks.items()):
+ check += dedent(
+ """\
+ #%(if)s %(condition)s
+ %%%(name)s "%(value)s"
+ """
+ % {
+ "if": "elif" if n else "if",
+ "condition": condition,
+ "name": name,
+ "value": value,
+ }
+ )
+ check += "#endif\n"
+
+ # Also check for endianness. The advantage of living in modern times is
+ # that all the modern compilers we support now have __BYTE_ORDER__ defined
+ # by the preprocessor.
+ check += dedent(
+ """\
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ %ENDIANNESS "little"
+ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ %ENDIANNESS "big"
+ #endif
+ """
+ )
+
+ result = try_preprocess(configure_cache, compiler, language, check)
+
+ if not result:
+ raise FatalCheckError("Unknown compiler or compiler not supported.")
+
+ # Metadata emitted by preprocessors such as GCC with LANG=ja_JP.utf-8 may
+ # have non-ASCII characters. Treat the output as bytearray.
+ data = {}
+ for line in result.splitlines():
+ if line.startswith("%"):
+ k, _, v = line.partition(" ")
+ k = k.lstrip("%")
+ data[k] = v.replace(" ", "").lstrip('"').rstrip('"')
+ log.debug("%s = %s", k, data[k])
+
+ try:
+ type = CompilerType(data["COMPILER"])
+ except Exception:
+ raise FatalCheckError("Unknown compiler or compiler not supported.")
+
+ cplusplus = int(data.get("cplusplus", "0L").rstrip("L"))
+ stdc_version = int(data.get("STDC_VERSION", "0L").rstrip("L"))
+
+ version = data.get("VERSION")
+ if version:
+ version = Version(version)
+ if data.get("XCODE"):
+ # Derived from https://en.wikipedia.org/wiki/Xcode#Toolchain_versions
+ # with enough granularity for major.minor version checks further
+ # down the line
+ if version < "9.1":
+ version = Version("4.0.0.or.less")
+ elif version < "10.0":
+ version = Version("5.0.2")
+ elif version < "10.0.1":
+ version = Version("6.0.1")
+ elif version < "11.0":
+ version = Version("7.0.0")
+ elif version < "11.0.3":
+ version = Version("8.0.0")
+ elif version < "12.0":
+ version = Version("9.0.0")
+ elif version < "12.0.5":
+ version = Version("10.0.0")
+ elif version < "13.0":
+ version = Version("11.1.0")
+ elif version < "13.0.1":
+ version = Version("12.0.0")
+ elif version < "14.0":
+ version = Version("13.0.0")
+ elif version < "15.0":
+ version = Version("14.0.0")
+ else:
+ version = Version("14.0.0.or.more")
+
+ return namespace(
+ type=type,
+ version=version,
+ cpu=data.get("CPU"),
+ kernel=data.get("KERNEL"),
+ endianness=data.get("ENDIANNESS"),
+ os=data.get("OS"),
+ language="C++" if cplusplus else "C",
+ language_version=cplusplus if cplusplus else stdc_version,
+ xcode=bool(data.get("XCODE")),
+ )
+
+
+def same_arch_different_bits():
+ return (
+ ("x86", "x86_64"),
+ ("ppc", "ppc64"),
+ ("sparc", "sparc64"),
+ )
+
+
+@imports(_from="mozbuild.shellutil", _import="quote")
+@imports(_from="mozbuild.configure.constants", _import="OS_preprocessor_checks")
+def check_compiler(configure_cache, compiler, language, target, android_version):
+ info = get_compiler_info(configure_cache, compiler, language)
+
+ flags = []
+
+ # Check language standards
+ # --------------------------------------------------------------------
+ if language != info.language:
+ raise FatalCheckError(
+ "`%s` is not a %s compiler." % (quote(*compiler), language)
+ )
+
+ # Note: We do a strict version check because there sometimes are backwards
+ # incompatible changes in the standard, and not all code that compiles as
+ # C99 compiles as e.g. C11 (as of writing, this is true of libnestegg, for
+ # example)
+ if info.language == "C" and info.language_version != 199901:
+ if info.type == "clang-cl":
+ flags.append("-Xclang")
+ flags.append("-std=gnu99")
+
+ cxx17_version = 201703
+ if info.language == "C++":
+ if info.language_version != cxx17_version:
+ # MSVC headers include C++17 features, but don't guard them
+ # with appropriate checks.
+ if info.type == "clang-cl":
+ flags.append("-Xclang")
+ flags.append("-std=c++17")
+ else:
+ flags.append("-std=gnu++17")
+
+ # Check compiler target
+ # --------------------------------------------------------------------
+ has_target = False
+ if target.os == "Android" and android_version:
+ # This makes clang define __ANDROID_API__ and use versioned library
+ # directories from the NDK.
+ toolchain = "%s%d" % (target.toolchain, android_version)
+ else:
+ toolchain = target.toolchain
+
+ if info.type == "clang":
+ # Add the target explicitly when the target is aarch64 macosx, because
+ # the Xcode clang target is named differently, and we need to work around
+ # https://github.com/rust-lang/rust-bindgen/issues/1871 and
+ # https://github.com/alexcrichton/cc-rs/issues/542 so we always want
+ # the target on the command line, even if the compiler would default to
+ # that.
+ if info.xcode and target.os == "OSX" and target.cpu == "aarch64":
+ if "--target=arm64-apple-darwin" not in compiler:
+ flags.append("--target=arm64-apple-darwin")
+ has_target = True
+
+ elif (
+ not info.kernel
+ or info.kernel != target.kernel
+ or not info.endianness
+ or info.endianness != target.endianness
+ ):
+ flags.append("--target=%s" % toolchain)
+ has_target = True
+
+ # Add target flag when there is an OS mismatch (e.g. building for Android on
+ # Linux). However, only do this if the target OS is in our whitelist, to
+ # keep things the same on other platforms.
+ elif target.os in OS_preprocessor_checks and (
+ not info.os or info.os != target.os
+ ):
+ flags.append("--target=%s" % toolchain)
+ has_target = True
+
+ if not has_target and (not info.cpu or info.cpu != target.cpu):
+ same_arch = same_arch_different_bits()
+ if (target.cpu, info.cpu) in same_arch:
+ flags.append("-m32")
+ elif (info.cpu, target.cpu) in same_arch:
+ flags.append("-m64")
+ elif info.type == "clang-cl" and target.cpu == "aarch64":
+ flags.append("--target=%s" % toolchain)
+ elif info.type == "clang":
+ flags.append("--target=%s" % toolchain)
+
+ return namespace(
+ type=info.type,
+ version=info.version,
+ target_cpu=info.cpu,
+ target_kernel=info.kernel,
+ target_endianness=info.endianness,
+ target_os=info.os,
+ flags=flags,
+ )
+
+
+@imports(_from="__builtin__", _import="open")
+@imports("json")
+@imports("os")
+def get_vc_paths(topsrcdir):
+ def vswhere(args):
+ program_files = os.environ.get("PROGRAMFILES(X86)") or os.environ.get(
+ "PROGRAMFILES"
+ )
+ if not program_files:
+ return []
+ vswhere = os.path.join(
+ program_files, "Microsoft Visual Studio", "Installer", "vswhere.exe"
+ )
+ if not os.path.exists(vswhere):
+ return []
+ return json.loads(check_cmd_output(vswhere, "-format", "json", *args))
+
+ for install in vswhere(
+ [
+ "-products",
+ "*",
+ "-requires",
+ "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
+ ]
+ ):
+ path = install["installationPath"]
+ tools_version = (
+ open(
+ os.path.join(
+ path, r"VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt"
+ ),
+ "r",
+ )
+ .read()
+ .strip()
+ )
+ tools_path = os.path.join(path, r"VC\Tools\MSVC", tools_version)
+ yield (Version(install["installationVersion"]), tools_path)
+
+
+@depends(target, host)
+def is_windows(target, host):
+ return host.kernel == "WINNT" or target.kernel == "WINNT"
+
+
+# Calling this a sysroot is a little weird, but it's the terminology clang went
+# with with its -winsysroot flag.
+option(
+ env="WINSYSROOT",
+ nargs=1,
+ when=is_windows,
+ help='Path to a Windows "sysroot" (directory containing MSVC, SDKs)',
+)
+
+
+@depends(
+ "WINSYSROOT",
+ bootstrap_path(
+ "vs",
+ when=depends("WINSYSROOT", when=is_windows)(lambda x: not x),
+ ),
+ when=is_windows,
+)
+def winsysroot(winsysroot, bootstrapped):
+ if bootstrapped:
+ return bootstrapped
+ if winsysroot:
+ return winsysroot[0]
+
+
+option(
+ env="VC_PATH",
+ nargs=1,
+ when=is_windows,
+ help="Path to the Microsoft Visual C/C++ compiler",
+)
+
+
+@depends(
+ host,
+ build_environment,
+ "VC_PATH",
+ winsysroot,
+ when=is_windows,
+)
+@imports("os")
+@imports(_from="operator", _import="itemgetter")
+def vc_compiler_paths_for_version(host, env, vc_path, winsysroot):
+ if winsysroot:
+ if vc_path:
+ die("WINSYSROOT and VC_PATH cannot be set together.")
+ base_vc_path = os.path.join(winsysroot, "VC", "Tools", "MSVC")
+ versions = os.listdir(base_vc_path)
+ vc_path = [os.path.join(base_vc_path, str(max(Version(v) for v in versions)))]
+ if vc_path:
+ # Use an arbitrary version, it doesn't matter.
+ all_versions = [(Version("15"), vc_path[0])]
+ elif host.kernel != "WINNT":
+ # Don't try to do anything when VC_PATH is not set on cross-compiles.
+ return
+ else:
+ all_versions = sorted(get_vc_paths(env.topsrcdir), key=itemgetter(0))
+ if not all_versions:
+ return
+ # Choose the newest version.
+ path = all_versions[-1][1]
+ host_dir = {
+ "x86_64": "Hostx64",
+ "x86": "Hostx86",
+ }.get(host.cpu)
+ if host_dir:
+ path = os.path.join(path, "bin", host_dir)
+ return {
+ "x64": [os.path.join(path, "x64")],
+ # The cross toolchains require DLLs from the native x64 toolchain.
+ "x86": [os.path.join(path, "x86"), os.path.join(path, "x64")],
+ "arm64": [os.path.join(path, "arm64"), os.path.join(path, "x64")],
+ }
+
+
+@depends(target, vc_compiler_paths_for_version, when=is_windows)
+def vc_compiler_path(target, paths):
+ vc_target = {
+ "x86": "x86",
+ "x86_64": "x64",
+ "arm": "arm",
+ "aarch64": "arm64",
+ }.get(target.cpu)
+ if not paths:
+ return
+ return paths.get(vc_target)
+
+
+@depends(vc_compiler_path, original_path)
+@imports("os")
+@imports(_from="os", _import="environ")
+def vc_toolchain_search_path(vc_compiler_path, original_path):
+ result = list(original_path)
+
+ if vc_compiler_path:
+ # The second item, if there is one, is necessary to have in $PATH for
+ # Windows to load the required DLLs from there.
+ if len(vc_compiler_path) > 1:
+ environ["PATH"] = os.pathsep.join(result + vc_compiler_path[1:])
+
+ # The first item is where the programs are going to be
+ result.append(vc_compiler_path[0])
+
+ return result
+
+
+@depends_if(vc_compiler_path, when=is_windows)
+def vc_compiler_version(vc_compiler_path):
+ version = Version(
+ os.path.basename(
+ os.path.dirname(os.path.dirname(os.path.dirname(vc_compiler_path[0])))
+ )
+ )
+ # MSVC path with version 14.x is actually version 19.x
+ if version.major == 14:
+ return Version(f"19.{version.minor}")
+
+
+@depends_if(vc_compiler_version)
+def is_vs2019_or_more(vc_compiler_version):
+ return vc_compiler_version >= Version("19.20")
+
+
+add_old_configure_assignment("IS_VS2019_OR_MORE", is_vs2019_or_more)
+
+
+@depends_if(vc_compiler_version)
+def msvs_version(vc_compiler_version):
+ # clang-cl emulates the same version scheme as cl. And MSVS_VERSION needs to
+ # be set for GYP on Windows.
+ if vc_compiler_version >= Version("19.30"):
+ return "2022"
+ if vc_compiler_version >= Version("19.20"):
+ return "2019"
+ if vc_compiler_version >= Version("19.10"):
+ return "2017"
+
+ return ""
+
+
+set_config("MSVS_VERSION", msvs_version)
+
+
+clang_search_path = bootstrap_search_path("clang/bin")
+
+
+@depends(
+ bootstrap_search_path("rustc/bin", when="MOZ_AUTOMATION"),
+ bootstrap_search_path_order,
+ original_path,
+)
+@imports("os")
+@imports(_from="os", _import="environ")
+def rust_search_path(rust_path, search_order, original_path):
+ result = list(rust_path or original_path)
+ # Also add the rustup install directory for cargo/rustc.
+ cargo_home = environ.get("CARGO_HOME", "")
+ if cargo_home:
+ cargo_home = os.path.abspath(cargo_home)
+ else:
+ cargo_home = os.path.expanduser(os.path.join("~", ".cargo"))
+ rustup_path = os.path.join(cargo_home, "bin")
+ if search_order == "prepend":
+ result.insert(0, rustup_path)
+ else:
+ result.append(rustup_path)
+ return result
+
+
+# Prepend the mozilla-build msys2 path, since otherwise we can get mismatched
+# cygwin dll errors during configure if we get called from another msys2
+# environment, see bug 1801826.
+@depends(mozillabuild_bin_paths, clang_search_path, target, original_path)
+@imports("os")
+def altered_path(mozillabuild_bin_paths, clang_search_path, target, original_path):
+ altered_path = mozillabuild_bin_paths
+ if target.kernel == "Darwin":
+ # The rust compiler wants to execute dsymutil, but it does so in a
+ # non-configurable way (https://github.com/rust-lang/rust/issues/52728)
+ # so we add the clang path.
+ path = clang_search_path
+ else:
+ path = original_path
+ for p in path:
+ if p not in altered_path:
+ altered_path.append(p)
+ return os.pathsep.join(altered_path)
+
+
+set_config("PATH", altered_path)
+
+
+# Compiler wrappers
+# ==============================================================
+option(
+ "--with-compiler-wrapper",
+ env="COMPILER_WRAPPER",
+ nargs=1,
+ help="Enable compiling with wrappers such as distcc and ccache",
+)
+
+option("--with-ccache", env="CCACHE", nargs="?", help="Enable compiling with ccache")
+
+
+@depends_if("--with-ccache")
+def ccache(value):
+ if len(value):
+ return value
+ # If --with-ccache was given without an explicit value, we default to
+ # 'ccache'.
+ return "ccache"
+
+
+ccache = check_prog(
+ "CCACHE",
+ progs=(),
+ input=ccache,
+ paths=bootstrap_search_path(
+ "sccache", when=depends("CCACHE")(lambda c: len(c) and c[0] == "sccache")
+ ),
+ allow_missing=True,
+)
+
+option(env="CCACHE_PREFIX", nargs=1, help="Compiler prefix to use when using ccache")
+
+ccache_prefix = depends_if("CCACHE_PREFIX")(lambda prefix: prefix[0])
+set_config("CCACHE_PREFIX", ccache_prefix)
+
+# Distinguish ccache from sccache.
+
+
+@depends_if(ccache)
+def ccache_is_sccache(ccache):
+ return check_cmd_output(ccache, "--version").startswith("sccache")
+
+
+@depends(ccache, ccache_is_sccache)
+def using_ccache(ccache, ccache_is_sccache):
+ return ccache and not ccache_is_sccache
+
+
+@depends_if(ccache, ccache_is_sccache)
+def using_sccache(ccache, ccache_is_sccache):
+ return ccache and ccache_is_sccache
+
+
+option(env="RUSTC_WRAPPER", nargs=1, help="Wrap rust compilation with given tool")
+
+
+@depends(ccache, ccache_is_sccache, "RUSTC_WRAPPER")
+@imports(_from="textwrap", _import="dedent")
+@imports("os")
+def check_sccache_version(ccache, ccache_is_sccache, rustc_wrapper):
+ sccache_min_version = Version("0.2.13")
+
+ def check_version(path):
+ out = check_cmd_output(path, "--version")
+ version = Version(out.rstrip().split()[-1])
+ if version < sccache_min_version:
+ die(
+ dedent(
+ """\
+ sccache %s or later is required. sccache in use at %s has
+ version %s.
+
+ Please upgrade or acquire a new version with |./mach bootstrap|.
+ """
+ ),
+ sccache_min_version,
+ path,
+ version,
+ )
+
+ if ccache and ccache_is_sccache:
+ check_version(ccache)
+
+ if rustc_wrapper and (
+ os.path.splitext(os.path.basename(rustc_wrapper[0]))[0].lower() == "sccache"
+ ):
+ check_version(rustc_wrapper[0])
+
+
+set_config("MOZ_USING_CCACHE", using_ccache)
+set_config("MOZ_USING_SCCACHE", using_sccache)
+
+option(env="SCCACHE_VERBOSE_STATS", help="Print verbose sccache stats after build")
+
+
+@depends(using_sccache, "SCCACHE_VERBOSE_STATS")
+def sccache_verbose_stats(using_sccache, verbose_stats):
+ return using_sccache and bool(verbose_stats)
+
+
+set_config("SCCACHE_VERBOSE_STATS", sccache_verbose_stats)
+
+
+@depends("--with-compiler-wrapper", ccache)
+@imports(_from="mozbuild.shellutil", _import="split", _as="shell_split")
+def compiler_wrapper(wrapper, ccache):
+ if wrapper:
+ raw_wrapper = wrapper[0]
+ wrapper = shell_split(raw_wrapper)
+ wrapper_program = find_program(wrapper[0])
+ if not wrapper_program:
+ die(
+ "Cannot find `%s` from the given compiler wrapper `%s`",
+ wrapper[0],
+ raw_wrapper,
+ )
+ wrapper[0] = wrapper_program
+
+ if ccache:
+ if wrapper:
+ return tuple([ccache] + wrapper)
+ else:
+ return (ccache,)
+ elif wrapper:
+ return tuple(wrapper)
+
+
+@depends_if(compiler_wrapper)
+def using_compiler_wrapper(compiler_wrapper):
+ return True
+
+
+set_config("MOZ_USING_COMPILER_WRAPPER", using_compiler_wrapper)
+
+
+@dependable
+def wasm():
+ return split_triplet("wasm32-wasi", allow_wasi=True)
+
+
+@template
+def default_c_compilers(host_or_target, other_c_compiler=None):
+ """Template defining the set of default C compilers for the host and
+ target platforms.
+ `host_or_target` is either `host` or `target` (the @depends functions
+ from init.configure.
+ `other_c_compiler` is the `target` C compiler when `host_or_target` is `host`.
+ """
+ assert host_or_target in {host, target, wasm}
+
+ other_c_compiler = () if other_c_compiler is None else (other_c_compiler,)
+
+ @depends(host_or_target, target, toolchain_prefix, *other_c_compiler)
+ def default_c_compilers(
+ host_or_target, target, toolchain_prefix, *other_c_compiler
+ ):
+ if host_or_target.kernel == "WINNT":
+ if host_or_target.abi:
+ if host_or_target.abi == "msvc":
+ supported = types = ("clang-cl",)
+ elif host_or_target.abi == "mingw":
+ supported = types = ("clang",)
+ else:
+ supported = types = ("clang-cl", "clang")
+ elif host_or_target.kernel == "Darwin":
+ types = ("clang",)
+ supported = ("clang", "gcc")
+ elif host_or_target.kernel == "WASI":
+ supported = types = ("clang",)
+ else:
+ supported = types = ("clang", "gcc")
+
+ info = other_c_compiler[0] if other_c_compiler else None
+ if info and info.type in supported:
+ # When getting default C compilers for the host, we prioritize the
+ # same compiler as the target C compiler.
+ prioritized = info.compiler
+ if info.type == "gcc":
+ same_arch = same_arch_different_bits()
+ if (
+ target.cpu != host_or_target.cpu
+ and (target.cpu, host_or_target.cpu) not in same_arch
+ and (host_or_target.cpu, target.cpu) not in same_arch
+ ):
+ # If the target C compiler is GCC, and it can't be used with
+ # -m32/-m64 for the host, it's probably toolchain-prefixed,
+ # so we prioritize a raw 'gcc' instead.
+ prioritized = info.type
+ if target.os != "WINNT" and host_or_target.os == "WINNT":
+ # When cross-compiling on Windows, don't prioritize. We'll fallback
+ # to checking for clang-cl first.
+ pass
+ else:
+ types = [prioritized] + [t for t in types if t != info.type]
+
+ gcc = ("gcc",)
+ if toolchain_prefix and host_or_target is target:
+ gcc = tuple("%sgcc" % p for p in toolchain_prefix) + gcc
+
+ result = []
+ for type in types:
+ if type == "gcc":
+ result.extend(gcc)
+ else:
+ result.append(type)
+
+ return tuple(result)
+
+ return default_c_compilers
+
+
+@template
+def default_cxx_compilers(c_compiler, other_c_compiler=None, other_cxx_compiler=None):
+ """Template defining the set of default C++ compilers for the host and
+ target platforms.
+ `c_compiler` is the @depends function returning a Compiler instance for
+ the desired platform.
+
+ Because the build system expects the C and C++ compilers to be from the
+ same compiler suite, we derive the default C++ compilers from the C
+ compiler that was found if none was provided.
+
+ We also factor in the target C++ compiler when getting the default host
+ C++ compiler, using the target C++ compiler if the host and target C
+ compilers are the same.
+ """
+
+ assert (other_c_compiler is None) == (other_cxx_compiler is None)
+ if other_c_compiler is not None:
+ other_compilers = (other_c_compiler, other_cxx_compiler)
+ else:
+ other_compilers = ()
+
+ @depends(c_compiler, *other_compilers)
+ def default_cxx_compilers(c_compiler, *other_compilers):
+ if other_compilers:
+ other_c_compiler, other_cxx_compiler = other_compilers
+ if other_c_compiler.compiler == c_compiler.compiler:
+ return (other_cxx_compiler.compiler,)
+
+ dir = os.path.dirname(c_compiler.compiler)
+ file = os.path.basename(c_compiler.compiler)
+
+ if c_compiler.type == "gcc":
+ return (os.path.join(dir, file.replace("gcc", "g++")),)
+
+ if c_compiler.type == "clang":
+ return (os.path.join(dir, file.replace("clang", "clang++")),)
+
+ return (c_compiler.compiler,)
+
+ return default_cxx_compilers
+
+
+@template
+def provided_program(env_var, when=None):
+ """Template handling cases where a program can be specified either as a
+ path or as a path with applicable arguments.
+ """
+
+ @depends_if(env_var, when=when)
+ @imports(_from="itertools", _import="takewhile")
+ @imports(_from="mozbuild.shellutil", _import="split", _as="shell_split")
+ def provided(cmd):
+ # Assume the first dash-prefixed item (and any subsequent items) are
+ # command-line options, the item before the dash-prefixed item is
+ # the program we're looking for, and anything before that is a wrapper
+ # of some kind (e.g. sccache).
+ cmd = shell_split(cmd[0])
+
+ without_flags = list(takewhile(lambda x: not x.startswith("-"), cmd))
+
+ return namespace(
+ wrapper=without_flags[:-1],
+ program=without_flags[-1],
+ flags=cmd[len(without_flags) :],
+ )
+
+ return provided
+
+
+@template
+def sysroot(host_or_target, target_sysroot=None):
+ assert target_sysroot or host_or_target is target
+ bootstrap_target_when = target_is_linux_or_wasi
+ if host_or_target is host:
+ host_or_target_str = "host"
+ opt = "--with-host-sysroot"
+ env = "HOST_SYSROOT"
+ when = depends(host)(lambda h: h.kernel == "Linux")
+ # Only bootstrap a host sysroot when using a bootstrapped target sysroot
+ # or when the target doesn't use a bootstrapped sysroot in the first place.
+ @depends(when, bootstrap_target_when, target_sysroot.bootstrapped)
+ def bootstrap_when(when, bootstrap_target_when, bootstrapped):
+ return when and (bootstrapped or not bootstrap_target_when)
+
+ else:
+ assert host_or_target is target
+ host_or_target_str = "target"
+ opt = "--with-sysroot"
+ env = "SYSROOT"
+ when = target_is_linux_or_wasi
+ bootstrap_when = bootstrap_target_when
+
+ option(
+ opt,
+ env=env,
+ nargs=1,
+ when=when,
+ help="Use the given sysroot directory for %s build" % host_or_target_str,
+ )
+
+ sysroot_input = depends(opt, when=when)(lambda x: x)
+ bootstrap_sysroot = depends(bootstrap_when, sysroot_input)(
+ # Only bootstrap when no flag was explicitly given (either --with or --without)
+ lambda bootstrap, input: bootstrap
+ and not input
+ and input.origin == "default"
+ )
+
+ @depends(
+ sysroot_input,
+ host_or_target,
+ macos_sdk,
+ bootstrap_path(
+ depends(host_or_target)(lambda t: "sysroot-{}".format(t.toolchain)),
+ when=bootstrap_sysroot,
+ ),
+ )
+ @imports("os")
+ def sysroot(sysroot_input, host_or_target, macos_sdk, path):
+ version = None
+ if sysroot_input:
+ path = sysroot_input[0]
+ elif host_or_target.kernel == "Darwin" and macos_sdk:
+ path = macos_sdk
+ if path:
+ # Find the version of libstdc++ headears in the sysroot
+ include = os.path.join(path, "usr/include/c++")
+ if os.path.isdir(include):
+ with os.scandir(include) as d:
+ version = max(Version(e.name) for e in d if e.is_dir())
+ log.info("Using %s sysroot in %s", host_or_target_str, path)
+ return namespace(
+ path=path,
+ bootstrapped=bool(path and not sysroot_input),
+ stdcxx_version=version,
+ )
+
+ return sysroot
+
+
+target_sysroot = sysroot(target)
+
+
+# Use `system_lib_option` instead of `option` for options that enable building
+# with a system library for which the development headers are not available in
+# the bootstrapped sysroots.
+@template
+def system_lib_option(name, *args, **kwargs):
+ option(name, *args, **kwargs)
+
+ @depends(name, target_sysroot.bootstrapped)
+ def no_system_lib_in_sysroot(value, bootstrapped):
+ if bootstrapped and value:
+ die(
+ "%s is not supported with bootstrapped sysroot. "
+ "Drop the option, or use --without-sysroot or --disable-bootstrap",
+ value.format(name),
+ )
+
+
+host_sysroot = sysroot(host, target_sysroot)
+
+
+@template
+def multiarch_dir(host_or_target):
+ sysroot = {
+ host: host_sysroot,
+ target: target_sysroot,
+ }[host_or_target]
+
+ @depends(host_or_target, when=sysroot.path)
+ def multiarch_dir(target):
+ if target.cpu == "x86":
+ # Turn e.g. i686-linux-gnu into i386-linux-gnu
+ return target.toolchain.replace(target.raw_cpu, "i386")
+ return target.toolchain
+
+ return multiarch_dir
+
+
+target_multiarch_dir = multiarch_dir(target)
+host_multiarch_dir = multiarch_dir(host)
+
+
+def minimum_gcc_version():
+ return Version("8.1.0")
+
+
+@template
+def compiler(
+ language,
+ host_or_target,
+ c_compiler=None,
+ other_compiler=None,
+ other_c_compiler=None,
+):
+ """Template handling the generic base checks for the compiler for the
+ given `language` on the given platform (`host_or_target`).
+ `host_or_target` is either `host` or `target` (the @depends functions
+ from init.configure.
+ When the language is 'C++', `c_compiler` is the result of the `compiler`
+ template for the language 'C' for the same `host_or_target`.
+ When `host_or_target` is `host`, `other_compiler` is the result of the
+ `compiler` template for the same `language` for `target`.
+ When `host_or_target` is `host` and the language is 'C++',
+ `other_c_compiler` is the result of the `compiler` template for the
+ language 'C' for `target`.
+ """
+ assert host_or_target in {host, target, wasm}
+ assert language in ("C", "C++")
+ assert language == "C" or c_compiler is not None
+ assert host_or_target is target or other_compiler is not None
+ assert language == "C" or host_or_target is target or other_c_compiler is not None
+
+ host_or_target_str = {
+ host: "host",
+ target: "target",
+ wasm: "wasm",
+ }[host_or_target]
+
+ sysroot = {
+ host: host_sysroot,
+ target: target_sysroot,
+ wasm: dependable(lambda: namespace(path=None)),
+ }[host_or_target]
+
+ multiarch_dir = {
+ host: host_multiarch_dir,
+ target: target_multiarch_dir,
+ wasm: never,
+ }[host_or_target]
+
+ var = {
+ ("C", target): "CC",
+ ("C++", target): "CXX",
+ ("C", host): "HOST_CC",
+ ("C++", host): "HOST_CXX",
+ ("C", wasm): "WASM_CC",
+ ("C++", wasm): "WASM_CXX",
+ }[language, host_or_target]
+
+ default_compilers = {
+ "C": lambda: default_c_compilers(host_or_target, other_compiler),
+ "C++": lambda: default_cxx_compilers(
+ c_compiler, other_c_compiler, other_compiler
+ ),
+ }[language]()
+
+ what = "the %s %s compiler" % (host_or_target_str, language)
+
+ option(env=var, nargs=1, help="Path to %s" % what)
+
+ # Handle the compiler given by the user through one of the CC/CXX/HOST_CC/
+ # HOST_CXX variables.
+ provided_compiler = provided_program(var)
+
+ # Normally, we'd use `var` instead of `_var`, but the interaction with
+ # old-configure complicates things, and for now, we a) can't take the plain
+ # result from check_prog as CC/CXX/HOST_CC/HOST_CXX and b) have to let
+ # old-configure AC_SUBST it (because it's autoconf doing it, not us)
+ compiler = check_prog(
+ "_%s" % var,
+ what=what,
+ progs=default_compilers,
+ input=provided_compiler.program,
+ paths=clang_search_path,
+ )
+
+ @depends(
+ configure_cache,
+ compiler,
+ provided_compiler,
+ compiler_wrapper,
+ host_or_target,
+ sysroot,
+ macos_target,
+ android_version,
+ vc_compiler_version,
+ multiarch_dir,
+ winsysroot,
+ host,
+ )
+ @checking("whether %s can be used" % what, lambda x: bool(x))
+ @imports(_from="mozbuild.shellutil", _import="quote")
+ @imports("os")
+ def valid_compiler(
+ configure_cache,
+ compiler,
+ provided_compiler,
+ compiler_wrapper,
+ host_or_target,
+ sysroot,
+ macos_target,
+ android_version,
+ vc_compiler_version,
+ multiarch_dir,
+ winsysroot,
+ host,
+ ):
+ wrapper = list(compiler_wrapper or ())
+ flags = []
+ if sysroot.path:
+ if host_or_target.kernel == "Darwin":
+ # While --sysroot and -isysroot are roughly equivalent, when not using
+ # -isysroot on mac, clang takes the SDKROOT environment variable into
+ # consideration, which may be set by python and break things.
+ flags.extend(("-isysroot", sysroot.path))
+ else:
+ flags.extend(("--sysroot", sysroot.path))
+ if host_or_target.os == "OSX" and macos_target:
+ flags.append("-mmacosx-version-min=%s" % macos_target)
+ if provided_compiler:
+ wrapper.extend(provided_compiler.wrapper)
+ flags.extend(provided_compiler.flags)
+
+ info = check_compiler(
+ configure_cache,
+ wrapper + [compiler] + flags,
+ language,
+ host_or_target,
+ android_version,
+ )
+
+ # When not given an explicit compatibility version, clang-cl tries
+ # to get one from MSVC, which might not even be the one used by the
+ # build. And when it can't find one, its default might also not match
+ # what the build is using. So if we were able to figure out the version
+ # we're building with, explicitly use that.
+ # This also means that, as a side effect, clang-cl will not try to find
+ # MSVC, which saves a little overhead.
+ if info.type == "clang-cl" and vc_compiler_version:
+ flags.append(f"-fms-compatibility-version={vc_compiler_version}")
+
+ if info.type == "clang" and language == "C++" and host_or_target.os == "OSX":
+ flags.append("-stdlib=libc++")
+
+ # Check that the additional flags we got are enough to not require any
+ # more flags. If we get an exception, just ignore it; it's liable to be
+ # invalid command-line flags, which means the compiler we're checking
+ # doesn't support those command-line flags and will fail one or more of
+ # the checks below.
+ try:
+ if info.flags:
+ flags += info.flags
+ info = check_compiler(
+ configure_cache,
+ wrapper + [compiler] + flags,
+ language,
+ host_or_target,
+ android_version,
+ )
+ except FatalCheckError:
+ pass
+
+ if not info.target_cpu or info.target_cpu != host_or_target.cpu:
+ raise FatalCheckError(
+ "%s %s compiler target CPU (%s) does not match --%s CPU (%s)"
+ % (
+ host_or_target_str.capitalize(),
+ language,
+ info.target_cpu or "unknown",
+ host_or_target_str,
+ host_or_target.raw_cpu,
+ )
+ )
+
+ if not info.target_kernel or (info.target_kernel != host_or_target.kernel):
+ raise FatalCheckError(
+ "%s %s compiler target kernel (%s) does not match --%s kernel (%s)"
+ % (
+ host_or_target_str.capitalize(),
+ language,
+ info.target_kernel or "unknown",
+ host_or_target_str,
+ host_or_target.kernel,
+ )
+ )
+
+ if not info.target_endianness or (
+ info.target_endianness != host_or_target.endianness
+ ):
+ raise FatalCheckError(
+ "%s %s compiler target endianness (%s) does not match --%s "
+ "endianness (%s)"
+ % (
+ host_or_target_str.capitalize(),
+ language,
+ info.target_endianness or "unknown",
+ host_or_target_str,
+ host_or_target.endianness,
+ )
+ )
+
+ # Compiler version checks
+ # ===================================================
+ # Check the compiler version here instead of in `compiler_version` so
+ # that the `checking` message doesn't pretend the compiler can be used
+ # to then bail out one line later.
+ if info.type == "gcc":
+ if host_or_target.os == "Android":
+ raise FatalCheckError(
+ "GCC is not supported on Android.\n"
+ "Please use clang from the Android NDK instead."
+ )
+ gcc_version = minimum_gcc_version()
+ if info.version < gcc_version:
+ raise FatalCheckError(
+ "Only GCC %d.%d or newer is supported (found version %s)."
+ % (gcc_version.major, gcc_version.minor, info.version)
+ )
+
+ # Force GCC to use the C++ headers from the sysroot, and to prefer the
+ # sysroot system headers to /usr/include.
+ # Non-Debian GCC also doesn't look at headers in multiarch directory.
+ if sysroot.bootstrapped and sysroot.stdcxx_version:
+ version = sysroot.stdcxx_version
+ for path in (
+ "usr/include/c++/{}".format(version),
+ "usr/include/{}/c++/{}".format(multiarch_dir, version),
+ "usr/include/{}".format(multiarch_dir),
+ "usr/include",
+ ):
+ flags.extend(("-isystem", os.path.join(sysroot.path, path)))
+
+ if info.type == "clang-cl":
+ if info.version < "9.0.0":
+ raise FatalCheckError(
+ "Only clang-cl 9.0 or newer is supported (found version %s)"
+ % info.version
+ )
+ if winsysroot and host.os != "WINNT":
+ overlay = os.path.join(winsysroot, "overlay.yaml")
+ if os.path.exists(overlay):
+ overlay_flags = ["-Xclang", "-ivfsoverlay", "-Xclang", overlay]
+ if info.version >= "16.0" or (
+ # clang-cl 15 normally doesn't support the root-relative
+ # overlay we use, but the bootstrapped clang-cl 15 is patched
+ # to support it, so check we're using a patched version.
+ info.version >= "15.0"
+ and try_preprocess(
+ configure_cache,
+ [compiler] + flags + overlay_flags,
+ language,
+ "",
+ onerror=lambda: False,
+ wrapper=wrapper,
+ )
+ ):
+ flags.extend(overlay_flags)
+
+ if (info.type, host_or_target.abi) in (
+ ("clang", "msvc"),
+ ("clang-cl", "mingw"),
+ ):
+ raise FatalCheckError("Unknown compiler or compiler not supported.")
+
+ # If you want to bump the version check here ensure the version
+ # is known for Xcode in get_compiler_info.
+ if info.type == "clang" and info.version < "7.0":
+ raise FatalCheckError(
+ "Only clang/llvm 7.0 or newer is supported (found version %s)."
+ % info.version
+ )
+
+ if host_or_target.kernel == "WASI":
+ if info.type != "clang":
+ raise FatalCheckError(
+ "Only clang is supported for %s" % host_or_target.alias
+ )
+ if info.version < "8.0":
+ raise FatalCheckError(
+ "Only clang/llvm 8.0 or newer is supported for %s (found version %s)."
+ % (host_or_target.alias, info.version)
+ )
+
+ if host_or_target.os == "Android":
+ # Need at least clang 13 for compiler-rt/libunwind being the default.
+ if info.type == "clang" and info.version < "13.0":
+ raise FatalCheckError(
+ "Only clang/llvm 13.0 or newer is supported for %s (found version %s)."
+ % (host_or_target.alias, info.version)
+ )
+
+ if info.flags:
+ raise FatalCheckError("Unknown compiler or compiler not supported.")
+
+ return namespace(
+ wrapper=wrapper,
+ compiler=compiler,
+ flags=flags,
+ type=info.type,
+ version=info.version,
+ language=language,
+ )
+
+ @depends(valid_compiler)
+ @checking("%s version" % what)
+ def compiler_version(compiler):
+ return compiler.version
+
+ if language == "C++":
+
+ @depends(valid_compiler, c_compiler)
+ def valid_compiler(compiler, c_compiler):
+ if compiler.type != c_compiler.type:
+ die(
+ "The %s C compiler is %s, while the %s C++ compiler is "
+ "%s. Need to use the same compiler suite.",
+ host_or_target_str,
+ c_compiler.type,
+ host_or_target_str,
+ compiler.type,
+ )
+
+ if compiler.version != c_compiler.version:
+ die(
+ "The %s C compiler is version %s, while the %s C++ "
+ "compiler is version %s. Need to use the same compiler "
+ "version.",
+ host_or_target_str,
+ c_compiler.version,
+ host_or_target_str,
+ compiler.version,
+ )
+ return compiler
+
+ # Set CC/CXX/HOST_CC/HOST_CXX for old-configure, which needs the wrapper
+ # and the flags that were part of the user input for those variables to
+ # be provided.
+ add_old_configure_assignment(
+ var,
+ depends_if(valid_compiler)(
+ lambda x: list(x.wrapper) + [x.compiler] + list(x.flags)
+ ),
+ )
+
+ if host_or_target is target:
+ add_old_configure_assignment(
+ "ac_cv_prog_%s" % var,
+ depends_if(valid_compiler)(
+ lambda x: list(x.wrapper) + [x.compiler] + list(x.flags)
+ ),
+ )
+ # We check that it works in python configure already.
+ add_old_configure_assignment("ac_cv_prog_%s_works" % var.lower(), "yes")
+ add_old_configure_assignment(
+ "ac_cv_prog_%s_cross" % var.lower(),
+ depends(cross_compiling)(lambda x: "yes" if x else "no"),
+ )
+ gcc_like = depends(valid_compiler.type)(
+ lambda x: "yes" if x in ("gcc", "clang") else "no"
+ )
+ add_old_configure_assignment("ac_cv_prog_%s_g" % var.lower(), gcc_like)
+ if language == "C":
+ add_old_configure_assignment("ac_cv_prog_gcc", gcc_like)
+ if language == "C++":
+ add_old_configure_assignment("ac_cv_prog_gxx", gcc_like)
+
+ # Set CC_TYPE/CC_VERSION/HOST_CC_TYPE/HOST_CC_VERSION to allow
+ # old-configure to do some of its still existing checks.
+ if language == "C":
+ set_config("%s_TYPE" % var, valid_compiler.type)
+ add_old_configure_assignment("%s_TYPE" % var, valid_compiler.type)
+ set_config(
+ "%s_VERSION" % var, depends(valid_compiler.version)(lambda v: str(v))
+ )
+
+ valid_compiler = compiler_class(valid_compiler, host_or_target)
+
+ def compiler_error():
+ raise FatalCheckError(
+ "Failed compiling a simple %s source with %s" % (language, what)
+ )
+
+ valid_compiler.try_compile(check_msg="%s works" % what, onerror=compiler_error)
+
+ set_config("%s_BASE_FLAGS" % var, valid_compiler.flags)
+
+ # Set CPP/CXXCPP for both the build system and old-configure. We don't
+ # need to check this works for preprocessing, because we already relied
+ # on $CC -E/$CXX -E doing preprocessing work to validate the compiler
+ # in the first place.
+ if host_or_target is target:
+ pp_var = {
+ "C": "CPP",
+ "C++": "CXXCPP",
+ }[language]
+
+ preprocessor = depends_if(valid_compiler)(
+ lambda x: list(x.wrapper) + [x.compiler, "-E"] + list(x.flags)
+ )
+
+ set_config(pp_var, preprocessor)
+ add_old_configure_assignment(pp_var, preprocessor)
+
+ if language == "C":
+ linker_var = {
+ target: "LD",
+ host: "HOST_LD",
+ }.get(host_or_target)
+
+ if linker_var:
+
+ @deprecated_option(env=linker_var, nargs=1)
+ def linker(value):
+ if value:
+ return value[0]
+
+ @depends(linker)
+ def unused_linker(linker):
+ if linker:
+ log.warning(
+ "The value of %s is not used by this build system." % linker_var
+ )
+
+ return valid_compiler
+
+
+c_compiler = compiler("C", target)
+cxx_compiler = compiler("C++", target, c_compiler=c_compiler)
+host_c_compiler = compiler("C", host, other_compiler=c_compiler)
+host_cxx_compiler = compiler(
+ "C++",
+ host,
+ c_compiler=host_c_compiler,
+ other_compiler=cxx_compiler,
+ other_c_compiler=c_compiler,
+)
+
+
+@template
+def windows_abi(host_or_target, c_compiler):
+ @depends(host_or_target)
+ def windows_abi(host_or_target):
+ if host_or_target.os == "WINNT":
+ return host_or_target.abi
+
+ @depends(host_or_target, windows_abi)
+ def need_windows_abi_from_compiler(host_or_target, windows_abi):
+ return host_or_target.os == "WINNT" and windows_abi is None
+
+ @depends(host_or_target, c_compiler, when=need_windows_abi_from_compiler)
+ def windows_abi_from_compiler(host_or_target, c_compiler):
+ if host_or_target.os == "WINNT":
+ if c_compiler.type == "clang-cl":
+ return "msvc"
+ return "mingw"
+
+ return windows_abi | windows_abi_from_compiler
+
+
+target_windows_abi = windows_abi(target, c_compiler)
+host_windows_abi = windows_abi(host, host_c_compiler)
+
+
+# Generic compiler-based conditions.
+building_with_gcc = depends(c_compiler)(lambda info: info.type == "gcc")
+
+
+@depends(cxx_compiler, ccache_prefix)
+@imports("os")
+def cxx_is_icecream(info, ccache_prefix):
+ if (
+ os.path.islink(info.compiler)
+ and os.path.basename(os.readlink(info.compiler)) == "icecc"
+ ):
+ return True
+ if ccache_prefix and os.path.basename(ccache_prefix) == "icecc":
+ return True
+
+
+set_config("CXX_IS_ICECREAM", cxx_is_icecream)
+
+
+# Linker detection
+# ==============================================================
+# The policy is as follows:
+# For Windows:
+# - the linker is picked via the LINKER environment variable per windows.configure,
+# but ought to be llvm-lld in any case.
+# For macOS:
+# - the linker is lld if the clang used is >= 15 (per LLVM version, not Xcode version).
+# - the linker is also lld on local developer builds if the clang used is >= 13 (per LLVM
+# version, not Xcode version)
+# - otherwise the linker is ld64, either from XCode on macOS, or from cctools-ports when
+# cross-compiling.
+# For other OSes:
+# - on local developer builds: lld is used if present. Otherwise gold is used if present
+# otherwise, BFD ld is used.
+# - on release/official builds: whatever the compiler uses by default, except on Android
+# (see enable_linker_default below). Usually what the compiler uses by default is BFD
+# ld, except with the Android NDK compiler, where the default varies depending on the
+# NDK version. The default also varies by platform and clang version.
+# lld is not used by default on Linux and Android because it introduces layout changes
+# that prevent elfhack from working. See e.g.
+# https://bugzilla.mozilla.org/show_bug.cgi?id=1563654#c2.
+@template
+def is_not_winnt_or_sunos(host_or_target):
+ @depends(host_or_target)
+ def is_not_winnt_or_sunos(host_or_target):
+ if host_or_target.kernel not in ("WINNT", "SunOS"):
+ return True
+
+ return is_not_winnt_or_sunos
+
+
+is_linker_option_enabled = is_not_winnt_or_sunos(target)
+
+
+@deprecated_option("--enable-gold", env="MOZ_FORCE_GOLD", when=is_linker_option_enabled)
+def enable_gold(value):
+ if value:
+ die("--enable-gold is deprecated, use --enable-linker=gold instead")
+ else:
+ die("--disable-gold is deprecated, use --enable-linker=something_else instead")
+
+
+@depends(target, developer_options)
+def enable_linker_default(target, developer_options):
+ # Recent versions of clang default to lld when targetting Android, but we don't
+ # want that as the default for non developer builds (see above).
+ # So we want to force the default to whatever it was with older versions of clang,
+ # but with caveats/workarounds:
+ # - x86-64 gold has bugs in how it lays out .note.* sections. See bug 1573820.
+ # - x86-32 gold has a bug when assembly files are built. See bug 1651699.
+ # That leaves us with aarch64 and armv7, which respectively defaulted to
+ # bfd and gold.
+ # On developer builds, select_linker will pick lld if it's not the default.
+ if target.os == "Android" and not developer_options:
+ return "bfd" if target.cpu in ("x86", "x86_64", "aarch64") else "gold"
+
+
+option(
+ "--enable-linker",
+ nargs=1,
+ help="Select the linker {bfd, gold, ld64, lld, lld-*, mold}{|}",
+ default=enable_linker_default,
+ when=is_linker_option_enabled,
+)
+
+
+# No-op to enable depending on --enable-linker from default_elfhack in
+# toolkit/moz.configure.
+@depends("--enable-linker", when=is_linker_option_enabled)
+def enable_linker(linker):
+ return linker
+
+
+@template
+def select_linker_tmpl(host_or_target):
+ if host_or_target is target:
+ deps = depends(
+ "--enable-linker",
+ c_compiler,
+ developer_options,
+ extra_toolchain_flags,
+ target,
+ when=is_linker_option_enabled,
+ )
+ host_or_target_str = "target"
+ else:
+ deps = depends(
+ dependable(None),
+ host_c_compiler,
+ developer_options,
+ dependable(None),
+ host,
+ when=is_not_winnt_or_sunos(host_or_target),
+ )
+ host_or_target_str = "host"
+
+ @deps
+ @checking(f"for {host_or_target_str} linker", lambda x: x.KIND)
+ @imports("os")
+ @imports("shutil")
+ def select_linker(linker, c_compiler, developer_options, toolchain_flags, target):
+
+ if linker:
+ linker = linker[0]
+ else:
+ linker = None
+
+ def is_valid_linker(linker):
+ if target.kernel == "Darwin":
+ valid_linkers = ("ld64", "lld")
+ else:
+ valid_linkers = ("bfd", "gold", "lld", "mold")
+ if linker in valid_linkers:
+ return True
+ if "lld" in valid_linkers and linker.startswith("lld-"):
+ return True
+ return False
+
+ if linker and not is_valid_linker(linker):
+ # Check that we are trying to use a supported linker
+ die("Unsupported linker " + linker)
+
+ # Check the kind of linker
+ version_check = ["-Wl,--version"]
+ cmd_base = c_compiler.wrapper + [c_compiler.compiler] + c_compiler.flags
+
+ def try_linker(linker):
+ # Generate the compiler flag
+ if linker == "ld64":
+ linker_flag = ["-fuse-ld=ld"]
+ elif linker:
+ linker_flag = ["-fuse-ld=" + linker]
+ else:
+ linker_flag = []
+ cmd = cmd_base + linker_flag + version_check
+ if toolchain_flags:
+ cmd += toolchain_flags
+
+ # ld64 doesn't have anything to print out a version. It does print out
+ # "ld64: For information on command line options please use 'man ld'."
+ # but that would require doing two attempts, one with --version, that
+ # would fail, and another with --help.
+ # Instead, abuse its LD_PRINT_OPTIONS feature to detect a message
+ # specific to it on stderr when it fails to process --version.
+ env = dict(os.environ)
+ env["LD_PRINT_OPTIONS"] = "1"
+ # Some locales might not print out the strings we are looking for, so
+ # ensure consistent output.
+ env["LC_ALL"] = "C"
+ retcode, stdout, stderr = get_cmd_output(*cmd, env=env)
+ if retcode == 1 and "Logging ld64 options" in stderr:
+ kind = "ld64"
+
+ elif retcode != 0:
+ return None
+
+ elif "mold" in stdout:
+ kind = "mold"
+
+ elif "GNU ld" in stdout:
+ # We are using the normal linker
+ kind = "bfd"
+
+ elif "GNU gold" in stdout:
+ kind = "gold"
+
+ elif "LLD" in stdout:
+ kind = "lld"
+
+ else:
+ kind = "unknown"
+
+ if kind == "unknown" or is_valid_linker(kind):
+ return namespace(
+ KIND=kind,
+ LINKER_FLAG=linker_flag,
+ )
+
+ result = try_linker(linker)
+ if result is None and linker:
+ die("Could not use {} as linker".format(linker))
+
+ if (
+ linker is None
+ and target.kernel == "Darwin"
+ and c_compiler.type == "clang"
+ and (
+ (developer_options and c_compiler.version >= "13.0")
+ or c_compiler.version >= "15.0"
+ )
+ ):
+ result = try_linker("lld")
+ elif (
+ linker is None
+ and (
+ developer_options
+ or (host_or_target_str == "host" and c_compiler.type == "clang")
+ )
+ and (result is None or result.KIND in ("bfd", "gold"))
+ ):
+ # try and use lld if available.
+ tried = try_linker("lld")
+ if (result is None or result.KIND != "gold") and (
+ tried is None or tried.KIND != "lld"
+ ):
+ tried = try_linker("gold")
+ if tried is None or tried.KIND != "gold":
+ tried = None
+ if tried:
+ result = tried
+
+ if result is None:
+ die("Failed to find an adequate linker")
+
+ # If an explicit linker was given, error out if what we found is different.
+ if linker and not linker.startswith(result.KIND):
+ die("Could not use {} as linker".format(linker))
+
+ return result
+
+ return select_linker
+
+
+select_linker = select_linker_tmpl(target)
+set_config("LINKER_KIND", select_linker.KIND)
+
+
+@template
+def linker_ldflags_tmpl(host_or_target):
+ if host_or_target is target:
+ deps = depends_if(
+ select_linker,
+ target,
+ target_sysroot,
+ target_multiarch_dir,
+ android_sysroot,
+ android_version,
+ c_compiler,
+ developer_options,
+ )
+ else:
+ deps = depends_if(
+ select_linker_tmpl(host),
+ host,
+ host_sysroot,
+ host_multiarch_dir,
+ dependable(None),
+ dependable(None),
+ host_c_compiler,
+ developer_options,
+ )
+
+ @deps
+ @imports("os")
+ def linker_ldflags(
+ linker,
+ target,
+ sysroot,
+ multiarch_dir,
+ android_sysroot,
+ android_version,
+ c_compiler,
+ developer_options,
+ ):
+ flags = list((linker and linker.LINKER_FLAG) or [])
+ # rpath-link is irrelevant to wasm, see for more info https://github.com/emscripten-core/emscripten/issues/11076.
+ if sysroot.path and multiarch_dir and target.os != "WASI":
+ for d in ("lib", "usr/lib"):
+ multiarch_lib_dir = os.path.join(sysroot.path, d, multiarch_dir)
+ if os.path.exists(multiarch_lib_dir):
+ # Non-Debian-patched binutils linkers (both BFD and gold) don't lookup
+ # in multi-arch directories.
+ flags.append("-Wl,-rpath-link,%s" % multiarch_lib_dir)
+ # GCC also needs -L.
+ if c_compiler.type == "gcc":
+ flags.append("-L%s" % multiarch_lib_dir)
+ if (
+ c_compiler.type == "gcc"
+ and sysroot.bootstrapped
+ and sysroot.stdcxx_version
+ ):
+ flags.append(
+ "-L{}/usr/lib/gcc/{}/{}".format(
+ sysroot.path, multiarch_dir, sysroot.stdcxx_version
+ )
+ )
+ if android_sysroot:
+ # BFD/gold linkers need a manual --rpath-link for indirect
+ # dependencies.
+ flags += [
+ "-Wl,--rpath-link={}/usr/lib/{}".format(
+ android_sysroot, target.toolchain
+ ),
+ "-Wl,--rpath-link={}/usr/lib/{}/{}".format(
+ android_sysroot, target.toolchain, android_version
+ ),
+ ]
+ if (
+ developer_options
+ and linker
+ and linker.KIND == "lld"
+ and target.kernel != "WINNT"
+ ):
+ flags.append("-Wl,-O0")
+ return flags
+
+ return linker_ldflags
+
+
+linker_ldflags = linker_ldflags_tmpl(target)
+add_old_configure_assignment("LINKER_LDFLAGS", linker_ldflags)
+
+add_old_configure_assignment("HOST_LINKER_LDFLAGS", linker_ldflags_tmpl(host))
+
+
+# There's a wrinkle with MinGW: linker configuration is not enabled, so
+# `select_linker` is never invoked. Hard-code around it.
+@depends(select_linker, target, c_compiler)
+def gcc_use_gnu_ld(select_linker, target, c_compiler):
+ if select_linker is not None and target.kernel != "Darwin":
+ return select_linker.KIND in ("bfd", "gold", "lld", "mold")
+ if target.kernel == "WINNT" and c_compiler.type == "clang":
+ return True
+ return None
+
+
+# GCC_USE_GNU_LD=1 means the linker is command line compatible with GNU ld.
+set_config("GCC_USE_GNU_LD", gcc_use_gnu_ld)
+add_old_configure_assignment("GCC_USE_GNU_LD", gcc_use_gnu_ld)
+
+
+include("compile-checks.configure")
+include("arm.configure", when=depends(target.cpu)(lambda cpu: cpu == "arm"))
+
+
+@depends(
+ have_64_bit,
+ try_compile(
+ body='static_assert(sizeof(void *) == 8, "")', check_msg="for 64-bit OS"
+ ),
+)
+def check_have_64_bit(have_64_bit, compiler_have_64_bit):
+ if have_64_bit != compiler_have_64_bit:
+ configure_error(
+ "The target compiler does not agree with configure "
+ "about the target bitness."
+ )
+
+
+@depends(cxx_compiler, target)
+def needs_libstdcxx_newness_check(cxx_compiler, target):
+ # We only have to care about this on Linux and MinGW.
+ if cxx_compiler.type == "clang-cl":
+ return
+
+ if target.kernel not in ("Linux", "WINNT"):
+ return
+
+ if target.os == "Android":
+ return
+
+ return True
+
+
+def die_on_old_libstdcxx():
+ die(
+ "The libstdc++ in use is not new enough. Please run "
+ "./mach bootstrap to update your compiler, or update your system "
+ "libstdc++ installation."
+ )
+
+
+try_compile(
+ includes=["cstddef"],
+ body="\n".join(
+ [
+ # _GLIBCXX_RELEASE showed up in libstdc++ 7.
+ "#if defined(__GLIBCXX__) && !defined(_GLIBCXX_RELEASE)",
+ "# error libstdc++ not new enough",
+ "#endif",
+ "#if defined(_GLIBCXX_RELEASE)",
+ "# if _GLIBCXX_RELEASE < %d" % minimum_gcc_version().major,
+ "# error libstdc++ not new enough",
+ "# else",
+ " (void) 0",
+ "# endif",
+ "#endif",
+ ]
+ ),
+ check_msg="for new enough STL headers from libstdc++",
+ when=needs_libstdcxx_newness_check,
+ onerror=die_on_old_libstdcxx,
+)
+
+
+@depends(c_compiler, target)
+def default_debug_flags(compiler_info, target):
+ # Debug info is ON by default.
+ if compiler_info.type == "clang-cl":
+ return "-Z7"
+ elif target.kernel == "WINNT" and compiler_info.type == "clang":
+ return "-g -gcodeview"
+ # The oldest versions of supported compilers default to DWARF-4, but
+ # newer versions may default to DWARF-5 or newer (e.g. clang 14), which
+ # Valgrind doesn't support. Force-use DWARF-4.
+ return "-gdwarf-4"
+
+
+option(env="MOZ_DEBUG_FLAGS", nargs=1, help="Debug compiler flags")
+
+imply_option("--enable-debug-symbols", depends_if("--enable-debug")(lambda v: v))
+
+option(
+ "--disable-debug-symbols",
+ nargs="?",
+ help="Disable debug symbols using the given compiler flags",
+)
+
+set_config("MOZ_DEBUG_SYMBOLS", depends_if("--enable-debug-symbols")(lambda _: True))
+
+
+@depends("MOZ_DEBUG_FLAGS", "--enable-debug-symbols", default_debug_flags)
+def debug_flags(env_debug_flags, enable_debug_flags, default_debug_flags):
+ # If MOZ_DEBUG_FLAGS is set, and --enable-debug-symbols is set to a value,
+ # --enable-debug-symbols takes precedence. Note, the value of
+ # --enable-debug-symbols may be implied by --enable-debug.
+ if len(enable_debug_flags):
+ return enable_debug_flags[0]
+ if env_debug_flags:
+ return env_debug_flags[0]
+ return default_debug_flags
+
+
+set_config("MOZ_DEBUG_FLAGS", debug_flags)
+add_old_configure_assignment("MOZ_DEBUG_FLAGS", debug_flags)
+
+
+@depends(c_compiler, host)
+@imports(
+ _from="mach.logging", _import="enable_blessed", _as="_enable_ansi_escape_codes"
+)
+def color_cflags(info, host):
+ # We could test compiling with flags. By why incur the overhead when
+ # color support should always be present in a specific toolchain
+ # version?
+
+ # Code for auto-adding this flag to compiler invocations needs to
+ # determine if an existing flag isn't already present. That is likely
+ # using exact string matching on the returned value. So if the return
+ # value changes to e.g. "<x>=always", exact string match may fail and
+ # multiple color flags could be added. So examine downstream consumers
+ # before adding flags to return values.
+ if info.type == "gcc":
+ return "-fdiagnostics-color"
+ elif info.type in ["clang", "clang-cl"]:
+ if host.os == "WINNT" and _enable_ansi_escape_codes():
+ return "-fcolor-diagnostics -fansi-escape-codes"
+ else:
+ return "-fcolor-diagnostics"
+ else:
+ return ""
+
+
+set_config("COLOR_CFLAGS", color_cflags)
+
+# Some standard library headers (notably bionic on Android) declare standard
+# functions (e.g. getchar()) and also #define macros for those standard
+# functions. libc++ deals with this by doing something like the following
+# (explanatory comments added):
+#
+# #ifdef FUNC
+# // Capture the definition of FUNC.
+# inline _LIBCPP_INLINE_VISIBILITY int __libcpp_FUNC(...) { return FUNC(...); }
+# #undef FUNC
+# // Use a real inline definition.
+# inline _LIBCPP_INLINE_VISIBILITY int FUNC(...) { return _libcpp_FUNC(...); }
+# #endif
+#
+# _LIBCPP_INLINE_VISIBILITY is typically defined as:
+#
+# __attribute__((__visibility__("hidden"), __always_inline__))
+#
+# Unfortunately, this interacts badly with our system header wrappers, as the:
+#
+# #pragma GCC visibility push(default)
+#
+# that they do prior to including the actual system header is treated by the
+# compiler as an explicit declaration of visibility on every function declared
+# in the header. Therefore, when the libc++ code above is encountered, it is
+# as though the compiler has effectively seen:
+#
+# int FUNC(...) __attribute__((__visibility__("default")));
+# int FUNC(...) __attribute__((__visibility__("hidden")));
+#
+# and the compiler complains about the mismatched visibility declarations.
+#
+# However, libc++ will only define _LIBCPP_INLINE_VISIBILITY if there is no
+# existing definition. We can therefore define it to the empty string (since
+# we are properly managing visibility ourselves) and avoid this whole mess.
+# Note that we don't need to do this with gcc, as libc++ detects gcc and
+# effectively does the same thing we are doing here.
+#
+# _LIBCPP_ALWAYS_INLINE needs a similar workarounds, since it too declares
+# hidden visibility.
+#
+# _LIBCPP_HIDE_FROM_ABI is a macro in libc++ versions in NDKs >=r19. It too
+# declares hidden visibility, but it also declares functions as excluded from
+# explicit instantiation (roughly: the function can be unused in the current
+# compilation, but does not then trigger an actual definition of the function;
+# it is assumed the real definition comes from elsewhere). We need to replicate
+# this setup.
+
+
+@depends(c_compiler, target)
+def libcxx_override_visibility(c_compiler, target):
+ if c_compiler.type == "clang" and target.os == "Android":
+ return namespace(
+ empty="",
+ hide_from_abi="__attribute__((__exclude_from_explicit_instantiation__))",
+ )
+
+
+set_define("_LIBCPP_INLINE_VISIBILITY", libcxx_override_visibility.empty)
+set_define("_LIBCPP_ALWAYS_INLINE", libcxx_override_visibility.empty)
+
+set_define("_LIBCPP_HIDE_FROM_ABI", libcxx_override_visibility.hide_from_abi)
+
+
+@depends(target, build_environment)
+def visibility_flags(target, env):
+ if target.os != "WINNT":
+ if target.kernel == "Darwin":
+ return ("-fvisibility=hidden", "-fvisibility-inlines-hidden")
+ return (
+ "-I%s/system_wrappers" % os.path.join(env.dist),
+ "-include",
+ "%s/config/gcc_hidden.h" % env.topsrcdir,
+ )
+
+
+@depends(target, visibility_flags)
+def wrap_system_includes(target, visibility_flags):
+ if visibility_flags and target.kernel != "Darwin":
+ return True
+
+
+set_define(
+ "HAVE_VISIBILITY_HIDDEN_ATTRIBUTE",
+ depends(visibility_flags)(lambda v: bool(v) or None),
+)
+set_define(
+ "HAVE_VISIBILITY_ATTRIBUTE", depends(visibility_flags)(lambda v: bool(v) or None)
+)
+set_config("WRAP_SYSTEM_INCLUDES", wrap_system_includes)
+set_config("VISIBILITY_FLAGS", visibility_flags)
+
+
+@template
+def depend_cflags(host_or_target_c_compiler):
+ @depends(host_or_target_c_compiler)
+ def depend_cflags(host_or_target_c_compiler):
+ if host_or_target_c_compiler.type != "clang-cl":
+ return ["-MD", "-MP", "-MF $(MDDEPDIR)/$(@F).pp"]
+ else:
+ # clang-cl doesn't accept the normal -MD -MP -MF options that clang
+ # does, but the underlying cc1 binary understands how to generate
+ # dependency files. These options are based on analyzing what the
+ # normal clang driver sends to cc1 when given the "correct"
+ # dependency options.
+ return [
+ "-Xclang",
+ "-MP",
+ "-Xclang",
+ "-dependency-file",
+ "-Xclang",
+ "$(MDDEPDIR)/$(@F).pp",
+ "-Xclang",
+ "-MT",
+ "-Xclang",
+ "$@",
+ ]
+
+ return depend_cflags
+
+
+set_config("_DEPEND_CFLAGS", depend_cflags(c_compiler))
+set_config("_HOST_DEPEND_CFLAGS", depend_cflags(host_c_compiler))
+
+
+@depends(c_compiler)
+def preprocess_option(compiler):
+ # The uses of PREPROCESS_OPTION depend on the spacing for -o/-Fi.
+ if compiler.type in ("gcc", "clang"):
+ return "-E -o "
+ else:
+ return "-P -Fi"
+
+
+set_config("PREPROCESS_OPTION", preprocess_option)
+
+
+# We only want to include windows.configure when we are compiling on
+# Windows, or for Windows.
+include("windows.configure", when=is_windows)
+
+
+# On Power ISA, determine compiler flags for VMX, VSX and VSX-3.
+
+set_config(
+ "PPC_VMX_FLAGS",
+ ["-maltivec"],
+ when=depends(target.cpu)(lambda cpu: cpu.startswith("ppc")),
+)
+
+set_config(
+ "PPC_VSX_FLAGS",
+ ["-mvsx"],
+ when=depends(target.cpu)(lambda cpu: cpu.startswith("ppc")),
+)
+
+set_config(
+ "PPC_VSX3_FLAGS",
+ ["-mvsx", "-mcpu=power9"],
+ when=depends(target.cpu)(lambda cpu: cpu.startswith("ppc")),
+)
+
+# ASAN
+# ==============================================================
+
+option("--enable-address-sanitizer", help="Enable Address Sanitizer")
+
+
+@depends(when="--enable-address-sanitizer")
+def asan():
+ return True
+
+
+add_old_configure_assignment("MOZ_ASAN", asan)
+
+# MSAN
+# ==============================================================
+
+option("--enable-memory-sanitizer", help="Enable Memory Sanitizer")
+
+
+@depends(when="--enable-memory-sanitizer")
+def msan():
+ return True
+
+
+add_old_configure_assignment("MOZ_MSAN", msan)
+
+# TSAN
+# ==============================================================
+
+option("--enable-thread-sanitizer", help="Enable Thread Sanitizer")
+
+
+@depends(when="--enable-thread-sanitizer")
+def tsan():
+ return True
+
+
+add_old_configure_assignment("MOZ_TSAN", tsan)
+
+# UBSAN
+# ==============================================================
+
+option(
+ "--enable-undefined-sanitizer", nargs="*", help="Enable UndefinedBehavior Sanitizer"
+)
+
+
+@depends_if("--enable-undefined-sanitizer")
+def ubsan(options):
+ default_checks = [
+ "bool",
+ "bounds",
+ "enum",
+ "function",
+ "integer-divide-by-zero",
+ "object-size",
+ "pointer-overflow",
+ "return",
+ "vla-bound",
+ ]
+
+ checks = options if len(options) else default_checks
+
+ return ",".join(checks)
+
+
+add_old_configure_assignment("MOZ_UBSAN_CHECKS", ubsan)
+
+
+option(
+ "--enable-signed-overflow-sanitizer",
+ help="Enable UndefinedBehavior Sanitizer (Signed Integer Overflow Parts)",
+)
+
+
+@depends(when="--enable-signed-overflow-sanitizer")
+def ub_signed_overflow_san():
+ return True
+
+
+add_old_configure_assignment("MOZ_SIGNED_OVERFLOW_SANITIZE", ub_signed_overflow_san)
+
+
+option(
+ "--enable-unsigned-overflow-sanitizer",
+ help="Enable UndefinedBehavior Sanitizer (Unsigned Integer Overflow Parts)",
+)
+
+
+@depends(when="--enable-unsigned-overflow-sanitizer")
+def ub_unsigned_overflow_san():
+ return True
+
+
+add_old_configure_assignment("MOZ_UNSIGNED_OVERFLOW_SANITIZE", ub_unsigned_overflow_san)
+
+# Security Hardening
+# ==============================================================
+
+option(
+ "--enable-hardening",
+ env="MOZ_SECURITY_HARDENING",
+ help="Enables security hardening compiler options",
+)
+
+
+# This function is a bit confusing. It adds or removes hardening flags in
+# three stuations: if --enable-hardening is passed; if --disable-hardening
+# is passed, and if no flag is passed.
+#
+# At time of this comment writing, all flags are actually added in the
+# default no-flag case; making --enable-hardening the same as omitting the
+# flag. --disable-hardening will omit the security flags. (However, not all
+# possible security flags will be omitted by --disable-hardening, as many are
+# compiler-default options we do not explicitly enable.)
+@depends(
+ "--enable-hardening",
+ "--enable-address-sanitizer",
+ "--enable-debug",
+ "--enable-optimize",
+ c_compiler,
+ target,
+)
+def security_hardening_cflags(
+ hardening_flag, asan, debug, optimize, c_compiler, target
+):
+ compiler_is_gccish = c_compiler.type in ("gcc", "clang")
+ mingw_clang = c_compiler.type == "clang" and target.os == "WINNT"
+
+ flags = []
+ ldflags = []
+ trivial_auto_var_init = []
+
+ # WASI compiler doesn't support security hardening cflags
+ if target.os == "WASI":
+ return
+
+ # ----------------------------------------------------------
+ # If hardening is explicitly enabled, or not explicitly disabled
+ if hardening_flag.origin == "default" or hardening_flag:
+ # FORTIFY_SOURCE ------------------------------------
+ # Require optimization for FORTIFY_SOURCE. See Bug 1417452
+ # Also, undefine it before defining it just in case a distro adds it, see Bug 1418398
+ if compiler_is_gccish and optimize and not asan:
+ flags.append("-U_FORTIFY_SOURCE")
+ flags.append("-D_FORTIFY_SOURCE=2")
+ if mingw_clang:
+ # mingw-clang needs to link in ssp which is not done by default
+ ldflags.append("-lssp")
+
+ # fstack-protector ------------------------------------
+ # Enable only if hardening is not disabled and ASAN is
+ # not on as ASAN will catch the crashes for us
+ if compiler_is_gccish and not asan:
+ flags.append("-fstack-protector-strong")
+ ldflags.append("-fstack-protector-strong")
+
+ if (
+ c_compiler.type == "clang"
+ and c_compiler.version >= "11.0.1"
+ and target.os not in ("WINNT", "OSX")
+ and target.cpu in ("x86", "x86_64", "ppc64", "s390x")
+ ):
+ flags.append("-fstack-clash-protection")
+ ldflags.append("-fstack-clash-protection")
+
+ # ftrivial-auto-var-init ------------------------------
+ # Initialize local variables with a 0xAA pattern in clang builds.
+ # Linux32 fails some xpcshell tests with -ftrivial-auto-var-init
+ linux32 = target.kernel == "Linux" and target.cpu == "x86"
+ if (
+ (c_compiler.type == "clang" or c_compiler.type == "clang-cl")
+ and c_compiler.version >= "8"
+ and not linux32
+ ):
+ if c_compiler.type == "clang-cl":
+ trivial_auto_var_init.append("-Xclang")
+ trivial_auto_var_init.append("-ftrivial-auto-var-init=pattern")
+ # Always enable on debug builds.
+ if debug:
+ flags.extend(trivial_auto_var_init)
+
+ # ASLR ------------------------------------------------
+ # ASLR (dynamicbase) is enabled by default in clang-cl; but the
+ # mingw-clang build requires it to be explicitly enabled
+ if mingw_clang:
+ ldflags.append("-Wl,--dynamicbase")
+
+ # Control Flow Guard (CFG) ----------------------------
+ if (
+ c_compiler.type == "clang-cl"
+ and c_compiler.version >= "8"
+ and (target.cpu != "aarch64" or c_compiler.version >= "8.0.1")
+ ):
+ if target.cpu == "aarch64" and c_compiler.version >= "10.0.0":
+ # The added checks in clang 10 make arm64 builds crash. (Bug 1639318)
+ flags.append("-guard:cf,nochecks")
+ else:
+ flags.append("-guard:cf")
+ # nolongjmp is needed because clang doesn't emit the CFG tables of
+ # setjmp return addresses https://bugs.llvm.org/show_bug.cgi?id=40057
+ ldflags.append("-guard:cf,nolongjmp")
+
+ # ----------------------------------------------------------
+ # If ASAN _is_ on, disable FORTIFY_SOURCE just to be safe
+ if asan:
+ flags.append("-D_FORTIFY_SOURCE=0")
+
+ # fno-common -----------------------------------------
+ # Do not merge variables for ASAN; can detect some subtle bugs
+ if asan:
+ # clang-cl does not recognize the flag, it must be passed down to clang
+ if c_compiler.type == "clang-cl":
+ flags.append("-Xclang")
+ flags.append("-fno-common")
+
+ return namespace(
+ flags=flags,
+ ldflags=ldflags,
+ trivial_auto_var_init=trivial_auto_var_init,
+ )
+
+
+set_config("MOZ_HARDENING_CFLAGS", security_hardening_cflags.flags)
+set_config("MOZ_HARDENING_LDFLAGS", security_hardening_cflags.ldflags)
+set_config(
+ "MOZ_TRIVIAL_AUTO_VAR_INIT",
+ security_hardening_cflags.trivial_auto_var_init,
+)
+
+
+# Intel Control-flow Enforcement Technology
+# ==============================================================
+# We keep this separate from the hardening flags above, because we want to be
+# able to easily remove the flags in the build files for certain executables.
+@depends(c_compiler, target)
+def cet_ldflags(c_compiler, target):
+ ldflags = []
+ if (
+ c_compiler.type == "clang-cl"
+ and c_compiler.version >= "11"
+ and target.cpu == "x86_64"
+ ):
+ ldflags.append("-CETCOMPAT")
+ return ldflags
+
+
+set_config("MOZ_CETCOMPAT_LDFLAGS", cet_ldflags)
+
+# Frame pointers
+# ==============================================================
+@depends(c_compiler)
+def frame_pointer_flags(compiler):
+ if compiler.type == "clang-cl":
+ return namespace(
+ enable=["-Oy-"],
+ disable=["-Oy"],
+ )
+ return namespace(
+ enable=["-fno-omit-frame-pointer", "-funwind-tables"],
+ disable=["-fomit-frame-pointer", "-funwind-tables"],
+ )
+
+
+@depends(
+ moz_optimize.optimize,
+ moz_debug,
+ target,
+ "--enable-memory-sanitizer",
+ "--enable-address-sanitizer",
+ "--enable-undefined-sanitizer",
+)
+def frame_pointer_default(optimize, debug, target, msan, asan, ubsan):
+ return bool(
+ not optimize
+ or debug
+ or msan
+ or asan
+ or ubsan
+ or (target.os == "WINNT" and target.cpu in ("x86", "aarch64"))
+ or target.os == "OSX"
+ )
+
+
+option(
+ "--enable-frame-pointers",
+ default=frame_pointer_default,
+ help="{Enable|Disable} frame pointers",
+)
+
+
+@depends("--enable-frame-pointers", frame_pointer_flags)
+def frame_pointer_flags(enable, flags):
+ if enable:
+ return flags.enable
+ return flags.disable
+
+
+set_config("MOZ_FRAMEPTR_FLAGS", frame_pointer_flags)
+
+
+# Code Coverage
+# ==============================================================
+
+option("--enable-coverage", env="MOZ_CODE_COVERAGE", help="Enable code coverage")
+
+
+@depends("--enable-coverage")
+def code_coverage(value):
+ if value:
+ return True
+
+
+set_config("MOZ_CODE_COVERAGE", code_coverage)
+set_define("MOZ_CODE_COVERAGE", code_coverage)
+
+
+@depends(target, c_compiler, build_environment, when=code_coverage)
+@imports("os")
+@imports("re")
+@imports(_from="__builtin__", _import="open")
+def coverage_cflags(target, c_compiler, build_env):
+ cflags = ["--coverage"]
+
+ # clang 11 no longer accepts this flag (its behavior became the default)
+ if c_compiler.type in ("clang", "clang-cl") and c_compiler.version < "11.0.0":
+ cflags += [
+ "-Xclang",
+ "-coverage-no-function-names-in-data",
+ ]
+
+ exclude = []
+ if target.os == "WINNT" and c_compiler.type == "clang-cl":
+ # VS files
+ exclude.append("^.*[vV][sS]20[0-9]{2}.*$")
+ # Files in fetches directory.
+ exclude.append("^.*[\\\\/]fetches[\\\\/].*$")
+ elif target.os == "OSX":
+ # Files in fetches directory.
+ exclude.append("^.*/fetches/.*$")
+ elif target.os == "GNU":
+ # Files in fetches directory.
+ exclude.append("^.*/fetches/.*$")
+ # Files in /usr/
+ exclude.append("^/usr/.*$")
+
+ if exclude:
+ exclude = ";".join(exclude)
+ cflags += [
+ f"-fprofile-exclude-files={exclude}",
+ ]
+
+ response_file_path = os.path.join(build_env.topobjdir, "code_coverage_cflags")
+
+ with open(response_file_path, "w") as f:
+ f.write(" ".join(cflags))
+
+ return ["@{}".format(response_file_path)]
+
+
+set_config("COVERAGE_CFLAGS", coverage_cflags)
+
+# Assembler detection
+# ==============================================================
+
+option(env="AS", nargs=1, help="Path to the assembler")
+
+
+@depends(target, c_compiler)
+def as_info(target, c_compiler):
+ if c_compiler.type == "clang-cl":
+ ml = {
+ "x86": "ml.exe",
+ "x86_64": "ml64.exe",
+ "aarch64": "armasm64.exe",
+ }.get(target.cpu)
+ return namespace(type="masm", names=(ml,))
+ # When building with anything but clang-cl, we just use the C compiler as the assembler.
+ return namespace(type="gcc", names=(c_compiler.compiler,))
+
+
+# One would expect the assembler to be specified merely as a program. But in
+# cases where the assembler is passed down into js/, it can be specified in
+# the same way as CC: a program + a list of argument flags. We might as well
+# permit the same behavior in general, even though it seems somewhat unusual.
+# So we have to do the same sort of dance as we did above with
+# `provided_compiler`.
+provided_assembler = provided_program("AS")
+assembler = check_prog(
+ "_AS",
+ input=provided_assembler.program,
+ what="the assembler",
+ progs=as_info.names,
+ paths=vc_toolchain_search_path,
+)
+
+
+@depends(as_info, assembler, provided_assembler, c_compiler)
+def as_with_flags(as_info, assembler, provided_assembler, c_compiler):
+ if provided_assembler:
+ return provided_assembler.wrapper + [assembler] + provided_assembler.flags
+
+ if as_info.type == "masm":
+ return assembler
+
+ assert as_info.type == "gcc"
+
+ # Need to add compiler wrappers and flags as appropriate.
+ return c_compiler.wrapper + [assembler] + c_compiler.flags
+
+
+set_config("AS", as_with_flags)
+
+
+@depends(assembler, c_compiler, extra_toolchain_flags)
+@imports("subprocess")
+@imports(_from="os", _import="devnull")
+def gnu_as(assembler, c_compiler, toolchain_flags):
+ # clang uses a compatible GNU assembler.
+ if c_compiler.type == "clang":
+ return True
+
+ if c_compiler.type == "gcc":
+ cmd = [assembler] + c_compiler.flags
+ if toolchain_flags:
+ cmd += toolchain_flags
+ cmd += ["-Wa,--version", "-c", "-o", devnull, "-x", "assembler", "-"]
+ # We don't actually have to provide any input on stdin, `Popen.communicate` will
+ # close the stdin pipe.
+ # clang will error if it uses its integrated assembler for this target,
+ # so handle failures gracefully.
+ if "GNU" in check_cmd_output(*cmd, stdin=subprocess.PIPE, onerror=lambda: ""):
+ return True
+
+
+set_config("GNU_AS", gnu_as)
+
+
+@depends(as_info, target)
+def as_dash_c_flag(as_info, target):
+ # armasm64 doesn't understand -c.
+ if as_info.type == "masm" and target.cpu == "aarch64":
+ return ""
+ else:
+ return "-c"
+
+
+set_config("AS_DASH_C_FLAG", as_dash_c_flag)
+
+
+@depends(as_info, target)
+def as_outoption(as_info, target):
+ # The uses of ASOUTOPTION depend on the spacing for -o/-Fo.
+ if as_info.type == "masm" and target.cpu != "aarch64":
+ return "-Fo"
+
+ return "-o "
+
+
+set_config("ASOUTOPTION", as_outoption)
+
+# clang plugin handling
+# ==============================================================
+
+option(
+ "--enable-clang-plugin",
+ env="ENABLE_CLANG_PLUGIN",
+ help="Enable building with the Clang plugin (gecko specific static analyzers)",
+)
+
+add_old_configure_assignment(
+ "ENABLE_CLANG_PLUGIN", depends_if("--enable-clang-plugin")(lambda _: True)
+)
+
+
+@depends(host_c_compiler, c_compiler, when="--enable-clang-plugin")
+def llvm_config(host_c_compiler, c_compiler):
+ clang = None
+ for compiler in (host_c_compiler, c_compiler):
+ if compiler and compiler.type == "clang":
+ clang = compiler.compiler
+ break
+ elif compiler and compiler.type == "clang-cl":
+ clang = os.path.join(os.path.dirname(compiler.compiler), "clang")
+ break
+
+ if not clang:
+ die("Cannot --enable-clang-plugin when not building with clang")
+ llvm_config = "llvm-config"
+ out = check_cmd_output(clang, "--print-prog-name=llvm-config", onerror=lambda: None)
+ if out:
+ llvm_config = out.rstrip()
+ return (llvm_config,)
+
+
+llvm_config = check_prog(
+ "LLVM_CONFIG",
+ llvm_config,
+ what="llvm-config",
+ when="--enable-clang-plugin",
+ paths=clang_search_path,
+)
+
+add_old_configure_assignment("LLVM_CONFIG", llvm_config)
+
+
+option(
+ "--enable-clang-plugin-alpha",
+ env="ENABLE_CLANG_PLUGIN_ALPHA",
+ help="Enable static analysis with clang-plugin alpha checks.",
+)
+
+
+@depends("--enable-clang-plugin", "--enable-clang-plugin-alpha")
+def check_clang_plugin_alpha(enable_clang_plugin, enable_clang_plugin_alpha):
+ if enable_clang_plugin_alpha:
+ if enable_clang_plugin:
+ return True
+ die("Cannot enable clang-plugin alpha checkers without --enable-clang-plugin.")
+
+
+add_old_configure_assignment("ENABLE_CLANG_PLUGIN_ALPHA", check_clang_plugin_alpha)
+set_define("MOZ_CLANG_PLUGIN_ALPHA", check_clang_plugin_alpha)
+
+option(
+ "--enable-mozsearch-plugin",
+ env="ENABLE_MOZSEARCH_PLUGIN",
+ help="Enable building with the mozsearch indexer plugin",
+)
+
+add_old_configure_assignment(
+ "ENABLE_MOZSEARCH_PLUGIN", depends_if("--enable-mozsearch-plugin")(lambda _: True)
+)
+
+# Libstdc++ compatibility hacks
+# ==============================================================
+#
+@depends(target, host)
+def target_or_host_is_linux(target, host):
+ return any(t.os == "GNU" and t.kernel == "Linux" for t in (target, host))
+
+
+option(
+ "--enable-stdcxx-compat",
+ env="MOZ_STDCXX_COMPAT",
+ help="Enable compatibility with older libstdc++",
+ when=target_or_host_is_linux,
+)
+
+
+@depends("--enable-stdcxx-compat", when=target_or_host_is_linux)
+def stdcxx_compat(value):
+ if value:
+ return True
+
+
+set_config("MOZ_STDCXX_COMPAT", True, when=stdcxx_compat)
+add_flag(
+ "-D_GLIBCXX_USE_CXX11_ABI=0",
+ cxx_compiler,
+ when=stdcxx_compat,
+)
+add_flag(
+ "-D_GLIBCXX_USE_CXX11_ABI=0",
+ host_cxx_compiler,
+ when=stdcxx_compat,
+)
+
+
+# Support various fuzzing options
+# ==============================================================
+option("--enable-fuzzing", help="Enable fuzzing support")
+
+
+@depends(build_project)
+def js_build(build_project):
+ return build_project == "js"
+
+
+option(
+ "--enable-js-fuzzilli",
+ when=js_build,
+ help="Enable fuzzilli support for the JS engine",
+)
+
+
+option(
+ "--enable-snapshot-fuzzing",
+ help="Enable experimental snapshot fuzzing support",
+)
+
+
+imply_option("--enable-fuzzing", True, when="--enable-snapshot-fuzzing")
+
+
+@depends("--enable-snapshot-fuzzing")
+def enable_snapshot_fuzzing(value):
+ if value:
+ return True
+
+
+@depends("--enable-fuzzing")
+def enable_fuzzing(value):
+ if value:
+ return True
+
+
+@depends("--enable-js-fuzzilli", when=js_build)
+def enable_js_fuzzilli(value):
+ if value:
+ return True
+
+
+@depends(enable_fuzzing, enable_snapshot_fuzzing)
+def check_aflfuzzer(fuzzing, snapshot_fuzzing):
+ if fuzzing and not snapshot_fuzzing:
+ return True
+
+
+@depends(
+ try_compile(
+ body="__AFL_COMPILER;", check_msg="for AFL compiler", when=check_aflfuzzer
+ )
+)
+def enable_aflfuzzer(afl):
+ if afl:
+ return True
+
+
+@depends(enable_fuzzing, enable_aflfuzzer, enable_snapshot_fuzzing, c_compiler, target)
+def enable_libfuzzer(fuzzing, afl, snapshot_fuzzing, c_compiler, target):
+ if (
+ fuzzing
+ and not afl
+ and not snapshot_fuzzing
+ and c_compiler.type == "clang"
+ and target.os != "Android"
+ ):
+ return True
+
+
+@depends(enable_fuzzing, enable_aflfuzzer, enable_libfuzzer, enable_js_fuzzilli)
+def enable_fuzzing_interfaces(fuzzing, afl, libfuzzer, enable_js_fuzzilli):
+ if fuzzing and (afl or libfuzzer) and not enable_js_fuzzilli:
+ return True
+
+
+set_config("FUZZING", enable_fuzzing)
+set_define("FUZZING", enable_fuzzing)
+
+set_config("LIBFUZZER", enable_libfuzzer)
+set_define("LIBFUZZER", enable_libfuzzer)
+add_old_configure_assignment("LIBFUZZER", enable_libfuzzer)
+
+set_config("AFLFUZZ", enable_aflfuzzer)
+set_define("AFLFUZZ", enable_aflfuzzer)
+
+set_config("FUZZING_INTERFACES", enable_fuzzing_interfaces)
+set_define("FUZZING_INTERFACES", enable_fuzzing_interfaces)
+add_old_configure_assignment("FUZZING_INTERFACES", enable_fuzzing_interfaces)
+
+set_config("FUZZING_JS_FUZZILLI", enable_js_fuzzilli)
+set_define("FUZZING_JS_FUZZILLI", enable_js_fuzzilli)
+
+set_config("FUZZING_SNAPSHOT", enable_snapshot_fuzzing)
+set_define("FUZZING_SNAPSHOT", enable_snapshot_fuzzing)
+
+
+@depends(
+ c_compiler.try_compile(
+ flags=["-fsanitize=fuzzer-no-link"],
+ when=enable_fuzzing,
+ check_msg="whether the C compiler supports -fsanitize=fuzzer-no-link",
+ ),
+ tsan,
+ enable_js_fuzzilli,
+)
+def libfuzzer_flags(value, tsan, enable_js_fuzzilli):
+ if tsan:
+ # With ThreadSanitizer, we should not use any libFuzzer instrumentation because
+ # it is incompatible (e.g. there are races on global sanitizer coverage counters).
+ # Instead we use an empty set of flags here but still build the fuzzing targets.
+ # With this setup, we can still run files through these targets in TSan builds,
+ # e.g. those obtained from regular fuzzing.
+ # This code can be removed once libFuzzer has been made compatible with TSan.
+ #
+ # Also, this code needs to be kept in sync with certain gyp files, currently:
+ # - dom/media/webrtc/transport/third_party/nICEr/nicer.gyp
+ return namespace(no_link_flag_supported=False, use_flags=[])
+
+ if enable_js_fuzzilli:
+ # Fuzzilli comes with its own trace-pc interceptors and flag requirements.
+ no_link_flag_supported = False
+ use_flags = ["-fsanitize-coverage=trace-pc-guard", "-g"]
+ elif value:
+ no_link_flag_supported = True
+ # recommended for (and only supported by) clang >= 6
+ use_flags = ["-fsanitize=fuzzer-no-link"]
+ else:
+ no_link_flag_supported = False
+ use_flags = ["-fsanitize-coverage=trace-pc-guard,trace-cmp"]
+
+ return namespace(
+ no_link_flag_supported=no_link_flag_supported,
+ use_flags=use_flags,
+ )
+
+
+set_config("HAVE_LIBFUZZER_FLAG_FUZZER_NO_LINK", libfuzzer_flags.no_link_flag_supported)
+set_config("LIBFUZZER_FLAGS", libfuzzer_flags.use_flags)
+add_old_configure_assignment("LIBFUZZER_FLAGS", libfuzzer_flags.use_flags)
+
+# Shared library building
+# ==============================================================
+
+# XXX: The use of makefile constructs in these variables is awful.
+@depends(target, c_compiler)
+def make_shared_library(target, compiler):
+ if target.os == "WINNT":
+ if compiler.type == "gcc":
+ return namespace(
+ mkshlib=["$(CXX)", "$(DSO_LDOPTS)", "-o", "$@"],
+ mkcshlib=["$(CC)", "$(DSO_LDOPTS)", "-o", "$@"],
+ )
+ elif compiler.type == "clang":
+ return namespace(
+ mkshlib=[
+ "$(CXX)",
+ "$(DSO_LDOPTS)",
+ "-Wl,-pdb,$(LINK_PDBFILE)",
+ "-o",
+ "$@",
+ ],
+ mkcshlib=[
+ "$(CC)",
+ "$(DSO_LDOPTS)",
+ "-Wl,-pdb,$(LINK_PDBFILE)",
+ "-o",
+ "$@",
+ ],
+ )
+ else:
+ linker = [
+ "$(LINKER)",
+ "-NOLOGO",
+ "-DLL",
+ "-OUT:$@",
+ "-PDB:$(LINK_PDBFILE)",
+ "$(DSO_LDOPTS)",
+ ]
+ return namespace(
+ mkshlib=linker,
+ mkcshlib=linker,
+ )
+
+ cc = ["$(CC)", "$(COMPUTED_C_LDFLAGS)"]
+ cxx = ["$(CXX)", "$(COMPUTED_CXX_LDFLAGS)"]
+ flags = ["$(PGO_CFLAGS)", "$(DSO_LDOPTS)"]
+ output = ["-o", "$@"]
+
+ if target.kernel == "Darwin":
+ soname = []
+ elif target.os == "NetBSD":
+ soname = ["-Wl,-soname,$(DSO_SONAME)"]
+ else:
+ assert compiler.type in ("gcc", "clang")
+
+ soname = ["-Wl,-h,$(DSO_SONAME)"]
+
+ return namespace(
+ mkshlib=cxx + flags + soname + output,
+ mkcshlib=cc + flags + soname + output,
+ )
+
+
+set_config("MKSHLIB", make_shared_library.mkshlib)
+set_config("MKCSHLIB", make_shared_library.mkcshlib)
+
+
+@depends(c_compiler, toolchain_prefix, when=target_is_windows)
+def rc_names(c_compiler, toolchain_prefix):
+ if c_compiler.type in ("gcc", "clang"):
+ return tuple("%s%s" % (p, "windres") for p in ("",) + (toolchain_prefix or ()))
+ return ("llvm-rc",)
+
+
+check_prog("RC", rc_names, paths=clang_search_path, when=target_is_windows)
+
+
+@template
+def ar_config(c_compiler, toolchain_prefix=None):
+ if not toolchain_prefix:
+ toolchain_prefix = dependable(None)
+
+ @depends(toolchain_prefix, c_compiler)
+ def ar_config(toolchain_prefix, c_compiler):
+ if c_compiler.type == "clang-cl":
+ return namespace(
+ names=("llvm-lib",),
+ flags=("-llvmlibthin", "-out:$@"),
+ )
+
+ names = tuple("%s%s" % (p, "ar") for p in (toolchain_prefix or ()) + ("",))
+ if c_compiler.type == "clang":
+ # Get the llvm-ar path as per the output from clang --print-prog-name=llvm-ar
+ # so that we directly get the one under the clang directory, rather than one
+ # that might be in /usr/bin and that might point to one from a different version
+ # of clang.
+ out = check_cmd_output(
+ c_compiler.compiler, "--print-prog-name=llvm-ar", onerror=lambda: None
+ )
+ llvm_ar = out.rstrip() if out else "llvm-ar"
+ names = (llvm_ar,) + names
+
+ return namespace(
+ names=names,
+ flags=("crs", "$@"),
+ )
+
+ return ar_config
+
+
+target_ar_config = ar_config(c_compiler, toolchain_prefix)
+
+check_prog("AR", target_ar_config.names, paths=clang_search_path)
+
+set_config("AR_FLAGS", target_ar_config.flags)
+
+host_ar_config = ar_config(host_c_compiler)
+
+check_prog("HOST_AR", host_ar_config.names, paths=clang_search_path)
+
+
+@depends(toolchain_prefix, c_compiler)
+def nm_names(toolchain_prefix, c_compiler):
+ names = tuple("%s%s" % (p, "nm") for p in (toolchain_prefix or ()) + ("",))
+ if c_compiler.type == "clang":
+ # Get the llvm-nm path as per the output from clang --print-prog-name=llvm-nm
+ # so that we directly get the one under the clang directory, rather than one
+ # that might be in /usr/bin and that might point to one from a different version
+ # of clang.
+ out = check_cmd_output(
+ c_compiler.compiler, "--print-prog-name=llvm-nm", onerror=lambda: None
+ )
+ llvm_nm = out.rstrip() if out else "llvm-nm"
+ names = (llvm_nm,) + names
+
+ return names
+
+
+check_prog("NM", nm_names, paths=clang_search_path, when=target_has_linux_kernel)
+
+
+option("--enable-cpp-rtti", help="Enable C++ RTTI")
+
+add_old_configure_assignment("_MOZ_USE_RTTI", "1", when="--enable-cpp-rtti")
+
+
+option(
+ "--enable-path-remapping",
+ nargs="*",
+ choices=("c", "rust"),
+ help="Enable remapping source and object paths in compiled outputs.",
+)
+
+
+@depends("--enable-path-remapping")
+def path_remapping(value):
+ if len(value):
+ return value
+ if bool(value):
+ return ["c", "rust"]
+ return []
+
+
+@depends(
+ target,
+ build_environment,
+ target_sysroot.path,
+ valid_windows_sdk_dir,
+ vc_path,
+ when="--enable-path-remapping",
+)
+def path_remappings(target, build_env, sysroot_path, windows_sdk_dir, vc_path):
+ win = target.kernel == "WINNT"
+
+ # The prefix maps are processed in the order they're specified on the
+ # command line. Therefore, to accommodate object directories in the source
+ # directory, it's important that we map the topobjdir before the topsrcdir,
+ # 'cuz we might have /src/obj/=/o/ and /src/=/s/. The various other
+ # directories might be subdirectories of topsrcdir as well, so they come
+ # earlier still.
+
+ path_remappings = []
+
+ # We will have only one sysroot or SDK, so all can have the same mnemonic: K
+ # for "kit" (since S is taken for "source"). See
+ # https://blog.llvm.org/2019/11/deterministic-builds-with-clang-and-lld.html
+ # for how to use the Windows `subst` command to map these in debuggers and
+ # IDEs.
+ if sysroot_path:
+ path_remappings.append((sysroot_path, "k:/" if win else "/sysroot/"))
+ if windows_sdk_dir:
+ path_remappings.append(
+ (windows_sdk_dir.path, "k:/" if win else "/windows_sdk/")
+ )
+ if vc_path:
+ path_remappings.append((vc_path, "v:/" if win else "/vc/"))
+
+ path_remappings += [
+ (build_env.topobjdir, "o:/" if win else "/topobjdir/"),
+ (build_env.topsrcdir, "s:/" if win else "/topsrcdir/"),
+ ]
+
+ path_remappings = [
+ (normsep(old).rstrip("/") + "/", new) for old, new in path_remappings
+ ]
+
+ # It is tempting to sort these, but we want the order to be the same across
+ # machines so that we can share cache hits. Therefore we reject bad
+ # configurations rather than trying to make the configuration good.
+ for i in range(len(path_remappings) - 1):
+ p = path_remappings[i][0]
+ for q, _ in path_remappings[i + 1 :]:
+ if q.startswith(p):
+ die(f"Cannot remap paths because {p} is an ancestor of {q}")
+
+ return path_remappings
+
+
+set_config("MMX_FLAGS", ["-mmmx"])
+set_config("SSE_FLAGS", ["-msse"])
+set_config("SSE2_FLAGS", ["-msse2"])
+set_config("SSSE3_FLAGS", ["-mssse3"])
+set_config("SSE4_2_FLAGS", ["-msse4.2"])
+set_config("FMA_FLAGS", ["-mfma"])
+set_config("AVX2_FLAGS", ["-mavx2"])