summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_gcc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_codegen_gcc
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_gcc')
-rw-r--r--compiler/rustc_codegen_gcc/.github/workflows/ci.yml131
-rw-r--r--compiler/rustc_codegen_gcc/.rustfmt.toml1
-rw-r--r--compiler/rustc_codegen_gcc/Cargo.lock330
-rw-r--r--compiler/rustc_codegen_gcc/Cargo.toml54
-rw-r--r--compiler/rustc_codegen_gcc/LICENSE-APACHE176
-rw-r--r--compiler/rustc_codegen_gcc/LICENSE-MIT23
-rw-r--r--compiler/rustc_codegen_gcc/Readme.md147
-rwxr-xr-xcompiler/rustc_codegen_gcc/build.sh67
-rw-r--r--compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml19
-rwxr-xr-xcompiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh30
-rwxr-xr-xcompiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh39
-rw-r--r--compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs1
-rwxr-xr-xcompiler/rustc_codegen_gcc/cargo.sh23
-rwxr-xr-xcompiler/rustc_codegen_gcc/clean_all.sh6
-rw-r--r--compiler/rustc_codegen_gcc/config.sh52
-rw-r--r--compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch32
-rw-r--r--compiler/rustc_codegen_gcc/example/alloc_example.rs41
-rw-r--r--compiler/rustc_codegen_gcc/example/alloc_system.rs212
-rw-r--r--compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs69
-rw-r--r--compiler/rustc_codegen_gcc/example/dst-field-align.rs67
-rw-r--r--compiler/rustc_codegen_gcc/example/example.rs208
-rw-r--r--compiler/rustc_codegen_gcc/example/mini_core.rs599
-rw-r--r--compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs431
-rw-r--r--compiler/rustc_codegen_gcc/example/mod_bench.rs37
-rw-r--r--compiler/rustc_codegen_gcc/example/std_example.rs286
-rw-r--r--compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs97
-rw-r--r--compiler/rustc_codegen_gcc/example/track-caller-attribute.rs40
-rw-r--r--compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch63
-rw-r--r--compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch49
-rw-r--r--compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch29
-rw-r--r--compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch32
-rwxr-xr-xcompiler/rustc_codegen_gcc/prepare.sh30
-rwxr-xr-xcompiler/rustc_codegen_gcc/prepare_build.sh5
-rw-r--r--compiler/rustc_codegen_gcc/rust-toolchain3
-rw-r--r--compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch14
-rwxr-xr-xcompiler/rustc_codegen_gcc/rustup.sh29
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs179
-rw-r--r--compiler/rustc_codegen_gcc/src/allocator.rs123
-rw-r--r--compiler/rustc_codegen_gcc/src/archive.rs189
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs817
-rw-r--r--compiler/rustc_codegen_gcc/src/back/mod.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/back/write.rs83
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs154
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs1561
-rw-r--r--compiler/rustc_codegen_gcc/src/callee.rs77
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs479
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs405
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs553
-rw-r--r--compiler/rustc_codegen_gcc/src/coverageinfo.rs69
-rw-r--r--compiler/rustc_codegen_gcc/src/debuginfo.rs62
-rw-r--r--compiler/rustc_codegen_gcc/src/declare.rs145
-rw-r--r--compiler/rustc_codegen_gcc/src/int.rs742
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/archs.rs5722
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs250
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1134
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs751
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs331
-rw-r--r--compiler/rustc_codegen_gcc/src/mono_item.rs38
-rw-r--r--compiler/rustc_codegen_gcc/src/type_.rs303
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs385
-rwxr-xr-xcompiler/rustc_codegen_gcc/test.sh291
-rw-r--r--compiler/rustc_codegen_gcc/tests/lang_tests_common.rs68
-rw-r--r--compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs5
-rw-r--r--compiler/rustc_codegen_gcc/tests/lang_tests_release.rs5
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/abort1.rs51
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/abort2.rs53
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/array.rs229
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/asm.rs172
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/assign.rs153
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/closure.rs230
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/condition.rs320
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/empty_main.rs39
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/exit.rs49
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/exit_code.rs39
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs223
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/int.rs340
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/int_overflow.rs140
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/mut_ref.rs165
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/operations.rs221
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs222
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/return-tuple.rs72
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/slice.rs128
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/static.rs112
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/structs.rs70
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/tuple.rs51
-rw-r--r--compiler/rustc_codegen_gcc/tools/generate_intrinsics.py238
86 files changed, 21711 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_gcc/.github/workflows/ci.yml b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml
new file mode 100644
index 000000000..8ebdabe82
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml
@@ -0,0 +1,131 @@
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ libgccjit_version: ["libgccjit.so", "libgccjit_without_int128.so", "libgccjit12.so"]
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions/checkout@v2
+ with:
+ repository: llvm/llvm-project
+ path: llvm
+
+ - name: Install packages
+ run: sudo apt-get install ninja-build ripgrep
+
+ - name: Download artifact
+ uses: dawidd6/action-download-artifact@v2
+ with:
+ workflow: main.yml
+ name: ${{ matrix.libgccjit_version }}
+ path: gcc-build
+ repo: antoyo/gcc
+ search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
+
+ - name: Setup path to libgccjit
+ run: |
+ echo $(readlink -f gcc-build) > gcc_path
+ # NOTE: the filename is still libgccjit.so even when the artifact name is different.
+ ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
+
+ - name: Set env
+ run: |
+ echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+ echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+ echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
+
+ - name: Set RUST_COMPILER_RT_ROOT
+ run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV
+
+ # https://github.com/actions/cache/issues/133
+ - name: Fixup owner of ~/.cargo/
+ # Don't remove the trailing /. It is necessary to follow the symlink.
+ run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v1.1.2
+ with:
+ path: ~/.cargo/bin
+ key: cargo-installed-crates2-ubuntu-latest
+
+ - name: Cache cargo registry
+ uses: actions/cache@v1
+ with:
+ path: ~/.cargo/registry
+ key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo index
+ uses: actions/cache@v1
+ with:
+ path: ~/.cargo/git
+ key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v1.1.2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
+
+ - name: Build
+ if: matrix.libgccjit_version != 'libgccjit12.so'
+ run: |
+ ./prepare_build.sh
+ ./build.sh
+ cargo test
+ ./clean_all.sh
+
+ - name: Build
+ if: matrix.libgccjit_version == 'libgccjit12.so'
+ run: |
+ ./prepare_build.sh
+ ./build.sh --no-default-features
+ cargo test --no-default-features
+ ./clean_all.sh
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./prepare.sh
+
+ # Compile is a separate step, as the actions-rs/cargo action supports error annotations
+ - name: Compile
+ uses: actions-rs/cargo@v1.0.3
+ with:
+ command: build
+ args: --release
+
+ - name: Test
+ if: matrix.libgccjit_version != 'libgccjit12.so'
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ ./test.sh --release
+
+ - name: Test
+ if: matrix.libgccjit_version == 'libgccjit12.so'
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ ./test.sh --release --no-default-features
diff --git a/compiler/rustc_codegen_gcc/.rustfmt.toml b/compiler/rustc_codegen_gcc/.rustfmt.toml
new file mode 100644
index 000000000..c7ad93baf
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/.rustfmt.toml
@@ -0,0 +1 @@
+disable_all_formatting = true
diff --git a/compiler/rustc_codegen_gcc/Cargo.lock b/compiler/rustc_codegen_gcc/Cargo.lock
new file mode 100644
index 000000000..6df210247
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/Cargo.lock
@@ -0,0 +1,330 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "450575f58f7bee32816abbff470cbc47797397c2a81e0eaced4b98436daf52e1"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "fm"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68fda3cff2cce84c19e5dfa5179a4b35d2c0f18b893f108002b8a6a54984acca"
+dependencies = [
+ "regex",
+]
+
+[[package]]
+name = "gccjit"
+version = "1.0.0"
+source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6"
+dependencies = [
+ "gccjit_sys",
+]
+
+[[package]]
+name = "gccjit_sys"
+version = "0.0.1"
+source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6"
+dependencies = [
+ "libc 0.1.12",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
+dependencies = [
+ "cfg-if",
+ "libc 0.2.112",
+ "wasi",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc 0.2.112",
+]
+
+[[package]]
+name = "lang_tester"
+version = "0.3.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090"
+dependencies = [
+ "fm",
+ "getopts",
+ "libc 0.2.112",
+ "num_cpus",
+ "termcolor",
+ "threadpool",
+ "wait-timeout",
+ "walkdir",
+]
+
+[[package]]
+name = "libc"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122"
+
+[[package]]
+name = "libc"
+version = "0.2.112"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125"
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "num_cpus"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+dependencies = [
+ "hermit-abi",
+ "libc 0.2.112",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba"
+
+[[package]]
+name = "rand"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
+dependencies = [
+ "libc 0.2.112",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+
+[[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "rustc_codegen_gcc"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "gccjit",
+ "lang_tester",
+ "target-lexicon",
+ "tempfile",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d"
+
+[[package]]
+name = "tempfile"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
+dependencies = [
+ "cfg-if",
+ "libc 0.2.112",
+ "rand",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+
+[[package]]
+name = "wait-timeout"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
+dependencies = [
+ "libc 0.2.112",
+]
+
+[[package]]
+name = "walkdir"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
+dependencies = [
+ "same-file",
+ "winapi",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/compiler/rustc_codegen_gcc/Cargo.toml b/compiler/rustc_codegen_gcc/Cargo.toml
new file mode 100644
index 000000000..211d19a8d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/Cargo.toml
@@ -0,0 +1,54 @@
+[package]
+name = "rustc_codegen_gcc"
+version = "0.1.0"
+authors = ["Antoni Boucher <bouanto@zoho.com>"]
+edition = "2018"
+license = "MIT OR Apache-2.0"
+
+[lib]
+crate-type = ["dylib"]
+
+[[test]]
+name = "lang_tests_debug"
+path = "tests/lang_tests_debug.rs"
+harness = false
+[[test]]
+name = "lang_tests_release"
+path = "tests/lang_tests_release.rs"
+harness = false
+
+[features]
+default = ["master"]
+master = ["gccjit/master"]
+
+[dependencies]
+gccjit = { git = "https://github.com/antoyo/gccjit.rs" }
+
+# Local copy.
+#gccjit = { path = "../gccjit.rs" }
+
+target-lexicon = "0.10.0"
+
+ar = "0.8.0"
+
+[dev-dependencies]
+lang_tester = "0.3.9"
+tempfile = "3.1.0"
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_gcc]
+# Disabling optimizations for cg_gccjit itself makes compilation after a change faster.
+opt-level = 0
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
diff --git a/compiler/rustc_codegen_gcc/LICENSE-APACHE b/compiler/rustc_codegen_gcc/LICENSE-APACHE
new file mode 100644
index 000000000..1b5ec8b78
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/compiler/rustc_codegen_gcc/LICENSE-MIT b/compiler/rustc_codegen_gcc/LICENSE-MIT
new file mode 100644
index 000000000..31aa79387
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/compiler/rustc_codegen_gcc/Readme.md b/compiler/rustc_codegen_gcc/Readme.md
new file mode 100644
index 000000000..fe23a2676
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/Readme.md
@@ -0,0 +1,147 @@
+# WIP libgccjit codegen backend for rust
+
+This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used.
+
+**Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.**
+
+## Motivation
+
+The primary goal of this project is to be able to compile Rust code on platforms unsupported by LLVM.
+A secondary goal is to check if using the gcc backend will provide any run-time speed improvement for the programs compiled using rustc.
+
+## Building
+
+**This requires a patched libgccjit in order to work.
+The patches in [this repository](https://github.com/antoyo/libgccjit-patches) need to be applied.
+(Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.)
+You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.**
+
+**Put the path to your custom build of libgccjit in the file `gcc_path`.**
+
+```bash
+$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git
+$ cd rustc_codegen_gcc
+$ git clone https://github.com/llvm/llvm-project llvm --depth 1 --single-branch
+$ export RUST_COMPILER_RT_ROOT="$PWD/llvm/compiler-rt"
+$ ./prepare_build.sh # download and patch sysroot src
+$ ./build.sh --release
+```
+
+To run the tests:
+
+```bash
+$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
+$ ./test.sh --release
+```
+
+## Usage
+
+`$cg_gccjit_dir` is the directory you cloned this repo into in the following instructions.
+
+### Cargo
+
+```bash
+$ CHANNEL="release" $cg_gccjit_dir/cargo.sh run
+```
+
+If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./test.sh`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely.
+
+### Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ rustc +$(cat $cg_gccjit_dir/rust-toolchain) -Cpanic=abort -Zcodegen-backend=$cg_gccjit_dir/target/release/librustc_codegen_gcc.so --sysroot $cg_gccjit_dir/build_sysroot/sysroot my_crate.rs
+```
+
+## Env vars
+
+<dl>
+ <dt>CG_GCCJIT_INCR_CACHE_DISABLED</dt>
+ <dd>Don't cache object files in the incremental cache. Useful during development of cg_gccjit
+ to make it possible to use incremental mode for all analyses performed by rustc without caching
+ object files when their content should have been changed by a change to cg_gccjit.</dd>
+ <dt>CG_GCCJIT_DISPLAY_CG_TIME</dt>
+ <dd>Display the time it took to perform codegen for a crate</dd>
+</dl>
+
+## Debugging
+
+Sometimes, libgccjit will crash and output an error like this:
+
+```
+during RTL pass: expand
+libgccjit.so: error: in expmed_mode_index, at expmed.h:249
+0x7f0da2e61a35 expmed_mode_index
+ ../../../gcc/gcc/expmed.h:249
+0x7f0da2e61aa4 expmed_op_cost_ptr
+ ../../../gcc/gcc/expmed.h:271
+0x7f0da2e620dc sdiv_cost_ptr
+ ../../../gcc/gcc/expmed.h:540
+0x7f0da2e62129 sdiv_cost
+ ../../../gcc/gcc/expmed.h:558
+0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int)
+ ../../../gcc/gcc/expmed.c:4335
+0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
+ ../../../gcc/gcc/expr.c:9240
+0x7f0da2cd1a1e expand_gimple_stmt_1
+ ../../../gcc/gcc/cfgexpand.c:3796
+0x7f0da2cd1c30 expand_gimple_stmt
+ ../../../gcc/gcc/cfgexpand.c:3857
+0x7f0da2cd90a9 expand_gimple_basic_block
+ ../../../gcc/gcc/cfgexpand.c:5898
+0x7f0da2cdade8 execute
+ ../../../gcc/gcc/cfgexpand.c:6582
+```
+
+To see the code which causes this error, call the following function:
+
+```c
+gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */)
+```
+
+This will create a C-like file and add the locations into the IR pointing to this C file.
+Then, rerun the program and it will output the location in the second line:
+
+```
+libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249
+```
+
+Or add a breakpoint to `add_error` in gdb and print the line number using:
+
+```
+p loc->m_line
+p loc->m_filename->m_buffer
+```
+
+To print a debug representation of a tree:
+
+```c
+debug_tree(expr);
+```
+
+To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`.
+
+### How to use a custom-build rustc
+
+ * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`).
+ * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`.
+
+### How to build a cross-compiling libgccjit
+
+#### Building libgccjit
+
+ * Follow these instructions: https://preshing.com/20141119/how-to-build-a-gcc-cross-compiler/ with the following changes:
+ * Configure gcc with `../gcc/configure --enable-host-shared --disable-multilib --enable-languages=c,jit,c++ --disable-bootstrap --enable-checking=release --prefix=/opt/m68k-gcc/ --target=m68k-linux --without-headers`.
+ * Some shells, like fish, don't define the environment variable `$MACHTYPE`.
+ * Add `CFLAGS="-Wno-error=attributes -g -O2"` at the end of the configure command for building glibc (`CFLAGS="-Wno-error=attributes -Wno-error=array-parameter -Wno-error=stringop-overflow -Wno-error=array-bounds -g -O2"` for glibc 2.31, which is useful for Debian).
+
+#### Configuring rustc_codegen_gcc
+
+ * Set `TARGET_TRIPLE="m68k-unknown-linux-gnu"` in config.sh.
+ * Since rustc doesn't support this architecture yet, set it back to `TARGET_TRIPLE="mips-unknown-linux-gnu"` (or another target having the same attributes). Alternatively, create a [target specification file](https://book.avr-rust.com/005.1-the-target-specification-json-file.html) (note that the `arch` specified in this file must be supported by the rust compiler).
+ * Set `linker='-Clinker=m68k-linux-gcc'`.
+ * Set the path to the cross-compiling libgccjit in `gcc_path`.
+ * Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::<i64>();` in `context.rs` (same for u128_type).
+ * Comment the line: `context.add_command_line_option("-masm=intel");` in src/base.rs.
+ * (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?).
diff --git a/compiler/rustc_codegen_gcc/build.sh b/compiler/rustc_codegen_gcc/build.sh
new file mode 100755
index 000000000..ba0d0d049
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+#set -x
+set -e
+
+codegen_channel=debug
+sysroot_channel=debug
+
+flags=
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --release)
+ codegen_channel=release
+ shift
+ ;;
+ --release-sysroot)
+ sysroot_channel=release
+ shift
+ ;;
+ --no-default-features)
+ flags="$flags --no-default-features"
+ shift
+ ;;
+ --features)
+ shift
+ flags="$flags --features $1"
+ shift
+ ;;
+ *)
+ echo "Unknown option $1"
+ exit 1
+ ;;
+ esac
+done
+
+if [ -f ./gcc_path ]; then
+ export GCC_PATH=$(cat gcc_path)
+else
+ echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+ exit 1
+fi
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+if [[ "$codegen_channel" == "release" ]]; then
+ export CHANNEL='release'
+ CARGO_INCREMENTAL=1 cargo rustc --release $flags
+else
+ echo $LD_LIBRARY_PATH
+ export CHANNEL='debug'
+ cargo rustc $flags
+fi
+
+source config.sh
+
+rm -r target/out || true
+mkdir -p target/out/gccjit
+
+echo "[BUILD] sysroot"
+if [[ "$sysroot_channel" == "release" ]]; then
+ time ./build_sysroot/build_sysroot.sh --release
+else
+ time ./build_sysroot/build_sysroot.sh
+fi
+
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml b/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml
new file mode 100644
index 000000000..cfadf47cc
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+compiler_builtins = "0.1"
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+
+[profile.release]
+debug = true
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh
new file mode 100755
index 000000000..f293192a0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# Requires the CHANNEL env var to be set to `debug` or `release.`
+
+set -e
+cd $(dirname "$0")
+
+pushd ../ >/dev/null
+source ./config.sh
+popd >/dev/null
+
+# Cleanup for previous run
+# v Clean target dir except for build scripts and incremental cache
+rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
+rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true
+rm -r sysroot/ 2>/dev/null || true
+
+# Build libs
+export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort"
+if [[ "$1" == "--release" ]]; then
+ sysroot_channel='release'
+ RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
+else
+ sysroot_channel='debug'
+ cargo build --target $TARGET_TRIPLE --features compiler_builtins/c
+fi
+
+# Copy files to sysroot
+mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
+cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh b/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh
new file mode 100755
index 000000000..56768bbf1
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+set -e
+cd $(dirname "$0")
+
+SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
+DST_DIR="sysroot_src"
+
+if [ ! -e $SRC_DIR ]; then
+ echo "Please install rust-src component"
+ exit 1
+fi
+
+rm -rf $DST_DIR
+mkdir -p $DST_DIR/library
+cp -r $SRC_DIR/library $DST_DIR/
+
+pushd $DST_DIR
+echo "[GIT] init"
+git init
+echo "[GIT] add"
+git add .
+echo "[GIT] commit"
+
+# This is needed on systems where nothing is configured.
+# git really needs something here, or it will fail.
+# Even using --author is not enough.
+git config user.email || git config user.email "none@example.com"
+git config user.name || git config user.name "None"
+
+git commit -m "Initial commit" -q
+for file in $(ls ../../patches/ | grep -v patcha); do
+echo "[GIT] apply" $file
+git apply ../../patches/$file
+git add -A
+git commit --no-gpg-sign -m "Patch $file"
+done
+popd
+
+echo "Successfully prepared libcore for building"
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs b/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs
new file mode 100644
index 000000000..0c9ac1ac8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs
@@ -0,0 +1 @@
+#![no_std]
diff --git a/compiler/rustc_codegen_gcc/cargo.sh b/compiler/rustc_codegen_gcc/cargo.sh
new file mode 100755
index 000000000..16e49b204
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/cargo.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+if [ -z $CHANNEL ]; then
+export CHANNEL='debug'
+fi
+
+pushd $(dirname "$0") >/dev/null
+source config.sh
+
+# read nightly compiler from rust-toolchain file
+TOOLCHAIN=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
+
+popd >/dev/null
+
+if [[ $(rustc -V) != $(rustc +${TOOLCHAIN} -V) ]]; then
+ echo "rustc_codegen_gcc is build for $(rustc +${TOOLCHAIN} -V) but the default rustc version is $(rustc -V)."
+ echo "Using $(rustc +${TOOLCHAIN} -V)."
+fi
+
+cmd=$1
+shift
+
+RUSTDOCFLAGS="$RUSTFLAGS" cargo +${TOOLCHAIN} $cmd $@
diff --git a/compiler/rustc_codegen_gcc/clean_all.sh b/compiler/rustc_codegen_gcc/clean_all.sh
new file mode 100755
index 000000000..782bd3e50
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/clean_all.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+set -e
+set -v
+
+rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/,Cargo.lock} perf.data{,.old}
+rm -rf regex/ simple-raytracer/
diff --git a/compiler/rustc_codegen_gcc/config.sh b/compiler/rustc_codegen_gcc/config.sh
new file mode 100644
index 000000000..b25e215fb
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/config.sh
@@ -0,0 +1,52 @@
+set -e
+
+export CARGO_INCREMENTAL=0
+
+if [ -f ./gcc_path ]; then
+ export GCC_PATH=$(cat gcc_path)
+else
+ echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+ exit 1
+fi
+
+unamestr=`uname`
+if [[ "$unamestr" == 'Linux' ]]; then
+ dylib_ext='so'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+ dylib_ext='dylib'
+else
+ echo "Unsupported os"
+ exit 1
+fi
+
+HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+TARGET_TRIPLE=$HOST_TRIPLE
+#TARGET_TRIPLE="m68k-unknown-linux-gnu"
+
+linker=''
+RUN_WRAPPER=''
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+ if [[ "$TARGET_TRIPLE" == "m68k-unknown-linux-gnu" ]]; then
+ TARGET_TRIPLE="mips-unknown-linux-gnu"
+ linker='-Clinker=m68k-linux-gcc'
+ elif [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+ # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ linker='-Clinker=aarch64-linux-gnu-gcc'
+ RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+ else
+ echo "Unknown non-native platform"
+ fi
+fi
+
+export RUSTFLAGS="$CG_RUSTFLAGS $linker -Cpanic=abort -Csymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot"
+
+# FIXME(antoyo): remove once the atomic shim is gone
+if [[ `uname` == 'Darwin' ]]; then
+ export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out"
+export RUSTC_LOG=warn # display metadata load errors
+
+export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
+export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
diff --git a/compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch b/compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch
new file mode 100644
index 000000000..449ca5f6e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch
@@ -0,0 +1,32 @@
+From a8fb97120d71252538b6b026695df40d02696bdb Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sat, 15 Aug 2020 20:04:38 +0200
+Subject: [PATCH] [rand] Disable failing test
+
+---
+ src/distributions/uniform.rs | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
+index 480b859..c80bb6f 100644
+--- a/src/distributions/uniform.rs
++++ b/src/distributions/uniform.rs
+@@ -1085,7 +1085,7 @@ mod tests {
+ _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
+ }
+ }
+-
++
+ #[test]
+ #[cfg(feature = "serde1")]
+ fn test_uniform_serialization() {
+@@ -1314,6 +1314,7 @@ mod tests {
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs")
+ ))]
++ #[ignore] // FIXME
+ fn test_float_assertions() {
+ use super::SampleUniform;
+ use std::panic::catch_unwind;
+--
+2.20.1
diff --git a/compiler/rustc_codegen_gcc/example/alloc_example.rs b/compiler/rustc_codegen_gcc/example/alloc_example.rs
new file mode 100644
index 000000000..74ea7ec4e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/alloc_example.rs
@@ -0,0 +1,41 @@
+#![feature(start, box_syntax, core_intrinsics, alloc_error_handler)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::boxed::Box;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[link(name = "c")]
+extern "C" {
+ fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let world: Box<&str> = box "Hello World!\0";
+ unsafe {
+ puts(*world as *const str as *const u8);
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs
new file mode 100644
index 000000000..5f66ca67f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs
@@ -0,0 +1,212 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+#![feature(allocator_api, rustc_private)]
+#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
+
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(target_arch = "x86",
+ target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "powerpc",
+ target_arch = "powerpc64")))]
+const MIN_ALIGN: usize = 8;
+#[cfg(all(any(target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64")))]
+const MIN_ALIGN: usize = 16;
+
+pub struct System;
+#[cfg(any(windows, unix, target_os = "redox"))]
+mod realloc_fallback {
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::cmp;
+ use core::ptr;
+ impl super::System {
+ pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
+ new_size: usize) -> *mut u8 {
+ // Docs for GlobalAlloc::realloc require this to be valid:
+ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+ let new_ptr = GlobalAlloc::alloc(self, new_layout);
+ if !new_ptr.is_null() {
+ let size = cmp::min(old_layout.size(), new_size);
+ ptr::copy_nonoverlapping(ptr, new_ptr, size);
+ GlobalAlloc::dealloc(self, ptr, old_layout);
+ }
+ new_ptr
+ }
+ }
+}
+#[cfg(any(unix, target_os = "redox"))]
+mod platform {
+ extern crate libc;
+ use core::ptr;
+ use MIN_ALIGN;
+ use System;
+ use core::alloc::{GlobalAlloc, Layout};
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::malloc(layout.size()) as *mut u8
+ } else {
+ #[cfg(target_os = "macos")]
+ {
+ if layout.align() > (1 << 31) {
+ return ptr::null_mut()
+ }
+ }
+ aligned_malloc(&layout)
+ }
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::calloc(layout.size(), 1) as *mut u8
+ } else {
+ let ptr = self.alloc(layout.clone());
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ libc::free(ptr as *mut libc::c_void)
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+ } else {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+ }
+ #[cfg(any(target_os = "android",
+ target_os = "hermit",
+ target_os = "redox",
+ target_os = "solaris"))]
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ // On android we currently target API level 9 which unfortunately
+ // doesn't have the `posix_memalign` API used below. Instead we use
+ // `memalign`, but this unfortunately has the property on some systems
+ // where the memory returned cannot be deallocated by `free`!
+ //
+ // Upon closer inspection, however, this appears to work just fine with
+ // Android, so for this platform we should be fine to call `memalign`
+ // (which is present in API level 9). Some helpful references could
+ // possibly be chromium using memalign [1], attempts at documenting that
+ // memalign + free is ok [2] [3], or the current source of chromium
+ // which still uses memalign on android [4].
+ //
+ // [1]: https://codereview.chromium.org/10796020/
+ // [2]: https://code.google.com/p/android/issues/detail?id=35391
+ // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+ // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+ // /memory/aligned_memory.cc
+ libc::memalign(layout.align(), layout.size()) as *mut u8
+ }
+ #[cfg(not(any(target_os = "android",
+ target_os = "hermit",
+ target_os = "redox",
+ target_os = "solaris")))]
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
+ if ret != 0 {
+ ptr::null_mut()
+ } else {
+ out as *mut u8
+ }
+ }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+ use MIN_ALIGN;
+ use System;
+ use core::alloc::{GlobalAlloc, Layout};
+ type LPVOID = *mut u8;
+ type HANDLE = LPVOID;
+ type SIZE_T = usize;
+ type DWORD = u32;
+ type BOOL = i32;
+ extern "system" {
+ fn GetProcessHeap() -> HANDLE;
+ fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+ fn GetLastError() -> DWORD;
+ }
+ #[repr(C)]
+ struct Header(*mut u8);
+ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+ unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+ &mut *(ptr as *mut Header).offset(-1)
+ }
+ unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+ *get_header(aligned) = Header(ptr);
+ aligned
+ }
+ #[inline]
+ unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+ let ptr = if layout.align() <= MIN_ALIGN {
+ HeapAlloc(GetProcessHeap(), flags, layout.size())
+ } else {
+ let size = layout.size() + layout.align();
+ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+ if ptr.is_null() {
+ ptr
+ } else {
+ align_ptr(ptr, layout.align())
+ }
+ };
+ ptr as *mut u8
+ }
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, 0)
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ if layout.align() <= MIN_ALIGN {
+ let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}",
+ GetLastError());
+ } else {
+ let header = get_header(ptr);
+ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}",
+ GetLastError());
+ }
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN {
+ HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
+ } else {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644
index 000000000..3af0ba09e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -0,0 +1,69 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+#![feature(rustc_attrs)]
+
+use std::{
+ ops::{Deref, CoerceUnsized, DispatchFromDyn},
+ marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers around `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
+fn main() {
+ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
+ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
+ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
diff --git a/compiler/rustc_codegen_gcc/example/dst-field-align.rs b/compiler/rustc_codegen_gcc/example/dst-field-align.rs
new file mode 100644
index 000000000..6c338e999
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/dst-field-align.rs
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+ a: u16,
+ b: T
+}
+
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+ fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+ a: T
+}
+
+struct HasDrop<T: ?Sized> {
+ ptr: Box<usize>,
+ data: T
+}
+
+fn main() {
+ // Test that zero-offset works properly
+ let b : Baz<usize> = Baz { a: 7 };
+ assert_eq!(b.a.get(), 7);
+ let b : &Baz<dyn Bar> = &b;
+ assert_eq!(b.a.get(), 7);
+
+ // Test that the field is aligned properly
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ assert_eq!(f.b.get(), 11);
+ let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+ let f : &Foo<dyn Bar> = &f;
+ let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+ assert_eq!(f.b.get(), 11);
+
+ // The pointers should be the same
+ assert_eq!(ptr1, ptr2);
+
+ // Test that nested DSTs work properly
+ let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+ assert_eq!(f.b.b.get(), 17);
+ let f : &Foo<Foo<dyn Bar>> = &f;
+ assert_eq!(f.b.b.get(), 17);
+
+ // Test that get the pointer via destructuring works
+
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ let f : &Foo<dyn Bar> = &f;
+ let &Foo { a: _, b: ref bar } = f;
+ assert_eq!(bar.get(), 11);
+
+ // Make sure that drop flags don't screw things up
+
+ let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+ ptr: Box::new(0),
+ data: Baz { a: [1,2,3,4] }
+ };
+ assert_eq!([1,2,3,4], d.data.a);
+
+ let d : &HasDrop<Baz<[i32]>> = &d;
+ assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_gcc/example/example.rs b/compiler/rustc_codegen_gcc/example/example.rs
new file mode 100644
index 000000000..5878e8548
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/example.rs
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+fn abc(a: u8) -> u8 {
+ a * 2
+}
+
+fn bcd(b: bool, a: u8) -> u8 {
+ if b {
+ a * 2
+ } else {
+ a * 3
+ }
+}
+
+fn call() {
+ abc(42);
+}
+
+fn indirect_call() {
+ let f: fn() = call;
+ f();
+}
+
+enum BoolOption {
+ Some(bool),
+ None,
+}
+
+fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+ match o {
+ BoolOption::Some(b) => b,
+ BoolOption::None => d,
+ }
+}
+
+fn ret_42() -> u8 {
+ 42
+}
+
+fn return_str() -> &'static str {
+ "hello world"
+}
+
+fn promoted_val() -> &'static u8 {
+ &(1 * 2)
+}
+
+fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+ abc as *const u8
+}
+
+fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+ a == b
+}
+
+fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+ (
+ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+ b as u32,
+ )
+}
+
+fn char_cast(c: char) -> u8 {
+ c as u8
+}
+
+pub struct DebugTuple(());
+
+fn debug_tuple() -> DebugTuple {
+ DebugTuple(())
+}
+
+fn size_of<T>() -> usize {
+ intrinsics::size_of::<T>()
+}
+
+fn use_size_of() -> usize {
+ size_of::<u64>()
+}
+
+unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+ intrinsics::copy::<u8>(src, dst, 1);
+}
+
+unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+ let copy2 = &intrinsics::copy::<u8>;
+ copy2(src, dst, 1);
+}
+
+const ABC: u8 = 6 * 7;
+
+fn use_const() -> u8 {
+ ABC
+}
+
+pub fn call_closure_3arg() {
+ (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+ (|_, _| {})(0u8, 42u16)
+}
+
+struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+ type Output = (u8, u8);
+
+ #[inline]
+ extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+ self.call_mut(arg)
+ }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+ #[inline]
+ extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+ (0, 42)
+ }
+}
+
+pub fn call_is_not_empty() {
+ IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+fn eq_char(a: char, b: char) -> bool {
+ a == b
+}
+
+unsafe fn transmute(c: char) -> u32 {
+ intrinsics::transmute(c)
+}
+
+unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+ &*s
+}
+
+fn use_array(arr: [u8; 3]) -> u8 {
+ arr[1]
+}
+
+fn repeat_array() -> [u8; 3] {
+ [0; 3]
+}
+
+fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+ arr
+}
+
+unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+ intrinsics::ctlz_nonzero(a)
+}
+
+fn ptr_as_usize(ptr: *const u8) -> usize {
+ ptr as usize
+}
+
+fn float_cast(a: f32, b: f64) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+fn make_array() -> [u8; 3] {
+ [42, 0, 5]
+}
+
+fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+ &("abc", "some")
+}
+
+fn index_slice(s: &[u8]) -> u8 {
+ s[2]
+}
+
+pub struct StrWrapper {
+ s: str,
+}
+
+fn str_wrapper_get(w: &StrWrapper) -> &str {
+ &w.s
+}
+
+fn i16_as_i8(a: i16) -> i8 {
+ a as i8
+}
+
+struct Unsized(u8, str);
+
+fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+ &u.0
+}
+
+fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+ &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+ a.0
+}
diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs
new file mode 100644
index 000000000..ddcbb0d9f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/mini_core.rs
@@ -0,0 +1,599 @@
+#![feature(
+ no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+ untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
+ thread_local
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+ intrinsics::unreachable();
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+ type Output;
+
+ fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ !self
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+ type Output;
+
+ fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+ type Output = Self;
+
+ fn rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ self | rhs
+ }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ *self | rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+ fn ne(&self, other: &*const T) -> bool {
+ *self != *other
+ }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+ type Output;
+
+ fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+ type Output = i8;
+
+ fn neg(self) -> i8 {
+ -self
+ }
+}
+
+impl Neg for i16 {
+ type Output = i16;
+
+ fn neg(self) -> i16 {
+ self
+ }
+}
+
+impl Neg for isize {
+ type Output = isize;
+
+ fn neg(self) -> isize {
+ -self
+ }
+}
+
+impl Neg for f32 {
+ type Output = f32;
+
+ fn neg(self) -> f32 {
+ -self
+ }
+}
+
+pub enum Option<T> {
+ Some(T),
+ None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\n\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+ loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+pub trait Allocator {
+}
+
+pub struct Global;
+
+impl Allocator for Global {}
+
+#[lang = "owned_box"]
+pub struct Box<
+ T: ?Sized,
+ A: Allocator = Global,
+>(*mut T, A);
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ fn drop(&mut self) {
+ // drop is currently performed by compiler.
+ }
+}
+
+impl<T> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: *mut T, alloc: A) {
+ libc::free(ptr as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+ fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ pub uninit: (),
+ pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ pub fn size_of<T>() -> usize;
+ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn min_align_of<T>() -> usize;
+ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn transmute<T, U>(e: T) -> U;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
+ pub fn needs_drop<T: ?::Sized>() -> bool;
+ pub fn bitreverse<T>(x: T) -> T;
+ pub fn bswap<T>(x: T) -> T;
+ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ pub fn unreachable() -> !;
+ }
+}
+
+pub mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn malloc(size: usize) -> *mut u8;
+ pub fn free(ptr: *mut u8);
+ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+extern {
+ type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[no_mangle]
+pub fn get_tls() -> u8 {
+ #[thread_local]
+ static A: u8 = 42;
+
+ A
+}
diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs
new file mode 100644
index 000000000..14fd9eeff
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs
@@ -0,0 +1,431 @@
+// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs
+
+#![feature(
+ no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage,
+ extern_types, thread_local
+)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const u8) {
+ puts(s);
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ *NUM_REF as i32
+ }
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const u8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropUnsized {
+ inner: NoisyDropInner,
+ text: str,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const u8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const u8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+ }
+
+ main().report();
+ 0
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+struct Unique<T: ?Sized> {
+ pointer: *const T,
+ _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+fn main() {
+ take_unique(Unique {
+ pointer: 0 as *const (),
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ //call_return_u128_pair();
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const u8 = hello as *const [u8] as *const u8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const u8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(!intrinsics::needs_drop::<[u8]>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+ assert!(intrinsics::needs_drop::<NoisyDropUnsized>());
+
+ Unique {
+ pointer: 0 as *const &str,
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ extern {
+ #[linkage = "weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "weak"]
+ static ABC: *const u8;
+ }
+ }
+
+ // TODO(antoyo): to make this work, support weak linkage.
+ //unsafe { assert_eq!(ABC as usize, 0); }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ #[cfg(not(jit))]
+ test_tls();
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ assert_eq!(TLS, 42);
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ let mut res = 0 as *mut c_void;
+ pthread_join(thread, &mut res);
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const u8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/mod_bench.rs b/compiler/rustc_codegen_gcc/example/mod_bench.rs
new file mode 100644
index 000000000..2e2b0052d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/mod_bench.rs
@@ -0,0 +1,37 @@
+#![feature(start, box_syntax, core_intrinsics, lang_items)]
+#![no_std]
+
+#[link(name = "c")]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+ core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ for i in 2..100_000_000 {
+ black_box((i + 1) % i);
+ }
+
+ 0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+ if i != 1 {
+ unsafe { core::intrinsics::abort(); }
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/std_example.rs b/compiler/rustc_codegen_gcc/example/std_example.rs
new file mode 100644
index 000000000..31069058a
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/std_example.rs
@@ -0,0 +1,286 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+extern {
+ pub fn printf(format: *const i8, ...) -> i32;
+}
+
+fn main() {
+ let mutex = std::sync::Mutex::new(());
+ let _guard = mutex.lock().unwrap();
+
+ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+ let stderr = ::std::io::stderr();
+ let mut stderr = stderr.lock();
+
+ std::thread::spawn(move || {
+ println!("Hello from another thread!");
+ });
+
+ writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+ let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+ println!("cargo:rustc-link-lib=z");
+
+ static ONCE: std::sync::Once = std::sync::Once::new();
+ ONCE.call_once(|| {});
+
+ let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+ // Make sure ByValPair values with differently sized components are correctly passed
+ map(None::<(u8, Box<Instruction>)>);
+
+ println!("{}", 2.3f32.exp());
+ println!("{}", 2.3f32.exp2());
+ println!("{}", 2.3f32.abs());
+ println!("{}", 2.3f32.sqrt());
+ println!("{}", 2.3f32.floor());
+ println!("{}", 2.3f32.ceil());
+ println!("{}", 2.3f32.min(1.0));
+ println!("{}", 2.3f32.max(1.0));
+ println!("{}", 2.3f32.powi(2));
+ println!("{}", 2.3f32.log2());
+ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+ println!("{}", 2.3f32.powf(2.0));
+
+ assert_eq!(-128i8, (-128i8).saturating_sub(1));
+ assert_eq!(127i8, 127i8.saturating_sub(-128));
+ assert_eq!(-128i8, (-128i8).saturating_add(-128));
+ assert_eq!(127i8, 127i8.saturating_add(1));
+
+ assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
+ assert_eq!(32767i16, 32767i16.saturating_add(1));
+
+ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+
+ let _d = 0i128.checked_div(2i128);
+ let _d = 0u128.checked_div(2u128);
+ assert_eq!(1u128 + 2, 3);
+
+ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+ let tmp = 353985398u128;
+ assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+ let tmp = -0x1234_5678_9ABC_DEF0i64;
+ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+ // Check that all u/i128 <-> float casts work correctly.
+ let houndred_u128 = 100u128;
+ let houndred_i128 = 100i128;
+ let houndred_f32 = 100.0f32;
+ let houndred_f64 = 100.0f64;
+ assert_eq!(houndred_u128 as f32, 100.0);
+ assert_eq!(houndred_u128 as f64, 100.0);
+ assert_eq!(houndred_f32 as u128, 100);
+ assert_eq!(houndred_f64 as u128, 100);
+ assert_eq!(houndred_i128 as f32, 100.0);
+ assert_eq!(houndred_i128 as f64, 100.0);
+ assert_eq!(houndred_f32 as i128, 100);
+ assert_eq!(houndred_f64 as i128, 100);
+
+ let _a = 1u32 << 2u8;
+
+ let empty: [i32; 0] = [];
+ assert!(empty.is_sorted());
+
+ println!("{:?}", std::intrinsics::caller_location());
+
+ #[cfg(feature="master")]
+ unsafe {
+ test_simd();
+ }
+
+ Box::pin(move |mut _task_context| {
+ yield ();
+ }).as_mut().resume(0);
+
+ println!("End");
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+ let x = _mm_setzero_si128();
+ let y = _mm_set1_epi16(7);
+ let or = _mm_or_si128(x, y);
+ let cmp_eq = _mm_cmpeq_epi8(y, y);
+ let cmp_lt = _mm_cmplt_epi8(y, y);
+
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+ test_mm_slli_si128();
+ test_mm_movemask_epi8();
+ test_mm256_movemask_epi8();
+ test_mm_add_epi8();
+ test_mm_add_pd();
+ test_mm_cvtepi8_epi16();
+ test_mm_cvtsi128_si64();
+
+ test_mm_extract_epi8();
+ test_mm_insert_epi16();
+
+ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+ assert_eq!(mask1, 1);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 1);
+ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 15);
+ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 16);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+ 0b0101, 0b1111_0000u8 as i8, 0, 0,
+ 0, 0, 0b1111_0000u8 as i8, 0b0101,
+ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+ );
+ let r = _mm_movemask_epi8(a);
+ assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+ let a = _mm256_set1_epi8(-1);
+ let r = _mm256_movemask_epi8(a);
+ let e = -1;
+ assert_eq!(r, e);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ #[rustfmt::skip]
+ let b = _mm_setr_epi8(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ );
+ let r = _mm_add_epi8(a, b);
+ #[rustfmt::skip]
+ let e = _mm_setr_epi8(
+ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+ );
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+ let a = _mm_setr_pd(1.0, 2.0);
+ let b = _mm_setr_pd(5.0, 10.0);
+ let r = _mm_add_pd(a, b);
+ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(feature="master")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+ unsafe {
+ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+ }
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+ panic!("{:?} != {:?}", a, b);
+ }
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+ assert_eq!(r, 5);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+ let a = _mm_set1_epi8(10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(10);
+ assert_eq_m128i(r, e);
+ let a = _mm_set1_epi8(-10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(-10);
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ -1, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ );
+ let r1 = _mm_extract_epi8(a, 0);
+ let r2 = _mm_extract_epi8(a, 3);
+ assert_eq!(r1, 0xFF);
+ assert_eq!(r2, 3);
+}
+
+#[cfg(all(feature="master", target_arch = "x86_64"))]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+ let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+ let r = _mm_insert_epi16(a, 9, 0);
+ let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+ assert_eq_m128i(r, e);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+ Continue(()),
+ Break(())
+}
+
+pub enum Instruction {
+ Increment,
+ Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+ match a {
+ None => None,
+ Some((_, instr)) => Some(instr),
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs
new file mode 100644
index 000000000..2cb84786f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+ ($($e:expr),* $(,)?) => {
+ [$(N($e)),*]
+ }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+ ($e:expr) => { Z }
+}
+
+macro_rules! z {
+ ($($e:expr),* $(,)?) => {
+ [$(zed!($e)),*]
+ }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+ ($e:expr, $t:ty $(,)?) => {{
+ const CONST_EVAL: $t = $e;
+ const fn const_eval() -> $t { $e }
+ static CONST_EVAL2: $t = const_eval();
+ let runtime_eval = $e;
+ assert_eq!(CONST_EVAL, runtime_eval);
+ assert_eq!(CONST_EVAL2, runtime_eval);
+ }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+// ($name); X; Y:
+// struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+ (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+ macro_rules! single {
+ ($($dollar $placeholder:ident),*) => { $($test)* }
+ }
+ $(single!($($values),+);)*
+ }
+}
+
+fn main() {
+ repeat! {
+ ($arr $Ty); n, N; z, Z:
+ compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+ compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+ compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+ compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+ compare_evaluation!(
+ { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+ compare_evaluation!(
+ { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+
+ compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+ compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+ compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+ }
+
+ compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+ compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs
new file mode 100644
index 000000000..93bab17e4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+ tracked()
+}
+
+fn main() {
+ let location = Location::caller();
+ assert_eq!(location.file(), file!());
+ assert_eq!(location.line(), 21);
+ assert_eq!(location.column(), 20);
+
+ let tracked = tracked();
+ assert_eq!(tracked.file(), file!());
+ assert_eq!(tracked.line(), 26);
+ assert_eq!(tracked.column(), 19);
+
+ let nested = nested_intrinsic();
+ assert_eq!(nested.file(), file!());
+ assert_eq!(nested.line(), 13);
+ assert_eq!(nested.column(), 5);
+
+ let contained = nested_tracked();
+ assert_eq!(contained.file(), file!());
+ assert_eq!(contained.line(), 17);
+ assert_eq!(contained.column(), 5);
+}
diff --git a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch
new file mode 100644
index 000000000..301b3f9bd
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch
@@ -0,0 +1,63 @@
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs | 2 ++
+ library/core/tests/slice.rs | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2021"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+ mod dragon;
+ mod grisu;
+ }
+-mod random;
+
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+ }
+ }
+
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+ }
++*/
+
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch b/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch
new file mode 100644
index 000000000..ee5ba449f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch
@@ -0,0 +1,49 @@
+From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:34:06 +0100
+Subject: [PATCH] [core] Ignore failing tests
+
+---
+ library/core/tests/iter.rs | 4 ++++
+ library/core/tests/num/bignum.rs | 10 ++++++++++
+ library/core/tests/num/mod.rs | 5 +++--
+ library/core/tests/time.rs | 1 +
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
+index 4bc44e9..8e3c7a4 100644
+--- a/library/core/tests/array.rs
++++ b/library/core/tests/array.rs
+@@ -242,6 +242,7 @@ fn iterator_drops() {
+ assert_eq!(i.get(), 5);
+ }
+
++/*
+ // This test does not work on targets without panic=unwind support.
+ // To work around this problem, test is marked is should_panic, so it will
+ // be automagically skipped on unsuitable targets, such as
+@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
+ assert_eq!(COUNTER.load(Relaxed), 0);
+ panic!("test succeeded")
+ }
++*/
+
+ #[test]
+ fn empty_array_is_always_default() {
+@@ -304,6 +304,7 @@ fn array_map() {
+ assert_eq!(b, [1, 2, 3]);
+ }
+
++/*
+ // See note on above test for why `should_panic` is used.
+ #[test]
+ #[should_panic(expected = "test succeeded")]
+@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
+ assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+ panic!("test succeeded")
+ }
++*/
+
+ #[test]
+ fn cell_allows_array_cycle() {
+-- 2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
new file mode 100644
index 000000000..d5fa1cec0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
@@ -0,0 +1,29 @@
+From b1ae000f6da1abd3b8e9b80c40bc11c89b8ae93c Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 30 Dec 2021 16:54:40 +0100
+Subject: [PATCH] [core] Disable portable-simd test
+
+---
+ library/core/tests/lib.rs | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
+index 06c7be0..359e2e7 100644
+--- a/library/core/tests/lib.rs
++++ b/library/core/tests/lib.rs
+@@ -75,7 +75,6 @@
+ #![feature(never_type)]
+ #![feature(unwrap_infallible)]
+ #![feature(result_into_ok_or_err)]
+-#![feature(portable_simd)]
+ #![feature(ptr_metadata)]
+ #![feature(once_cell)]
+ #![feature(option_result_contains)]
+@@ -127,7 +126,6 @@ mod pin;
+ mod pin_macro;
+ mod ptr;
+ mod result;
+-mod simd;
+ mod slice;
+ mod str;
+ mod str_lossy;
diff --git a/compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch b/compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch
new file mode 100644
index 000000000..dc1beae6d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch
@@ -0,0 +1,32 @@
+From eb703e627e7a84f1cd8d0d87f0f69da1f0acf765 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Fri, 3 Dec 2021 12:16:30 +0100
+Subject: [PATCH] Disable long running tests
+
+---
+ library/core/tests/slice.rs | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 8402833..84592e0 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -2462,6 +2462,7 @@ take_tests! {
+ #[cfg(not(miri))] // unused in Miri
+ const EMPTY_MAX: &'static [()] = &[(); usize::MAX];
+
++/*
+ // can't be a constant due to const mutability rules
+ #[cfg(not(miri))] // unused in Miri
+ macro_rules! empty_max_mut {
+@@ -2485,6 +2486,7 @@ take_tests! {
+ (take_mut_oob_max_range_to_inclusive, (..=usize::MAX), None, empty_max_mut!()),
+ (take_mut_in_bounds_max_range_from, (usize::MAX..), Some(&mut [] as _), empty_max_mut!()),
+ }
++*/
+
+ #[test]
+ fn test_slice_from_ptr_range() {
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_gcc/prepare.sh b/compiler/rustc_codegen_gcc/prepare.sh
new file mode 100755
index 000000000..e98f24c6e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/prepare.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+set -e
+set -v
+
+source prepare_build.sh
+
+cargo install hyperfine || echo "Skipping hyperfine install"
+
+git clone https://github.com/rust-random/rand.git || echo "rust-random/rand has already been cloned"
+pushd rand
+git checkout -- .
+git checkout 0f933f9c7176e53b2a3c7952ded484e1783f0bf1
+git am ../crate_patches/*-rand-*.patch
+popd
+
+git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned"
+pushd regex
+git checkout -- .
+git checkout 341f207c1071f7290e3f228c710817c280c8dca1
+popd
+
+git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned"
+pushd simple-raytracer
+git checkout -- .
+git checkout 804a7a21b9e673a482797aa289a18ed480e4d813
+
+# build with cg_llvm for perf comparison
+cargo build
+mv target/debug/main raytracer_cg_llvm
+popd
diff --git a/compiler/rustc_codegen_gcc/prepare_build.sh b/compiler/rustc_codegen_gcc/prepare_build.sh
new file mode 100755
index 000000000..8194360da
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/prepare_build.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+set -e
+set -v
+
+./build_sysroot/prepare_sysroot_src.sh
diff --git a/compiler/rustc_codegen_gcc/rust-toolchain b/compiler/rustc_codegen_gcc/rust-toolchain
new file mode 100644
index 000000000..b20aeb979
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/rust-toolchain
@@ -0,0 +1,3 @@
+[toolchain]
+channel = "nightly-2022-06-06"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
diff --git a/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch b/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch
new file mode 100644
index 000000000..59143eac3
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch
@@ -0,0 +1,14 @@
+diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs
+index 887d27fd6dca4..2c2239f2b83d1 100644
+--- a/src/tools/compiletest/src/header.rs
++++ b/src/tools/compiletest/src/header.rs
+@@ -806,8 +806,8 @@ pub fn make_test_description<R: Read>(
+ cfg: Option<&str>,
+ ) -> test::TestDesc {
+ let mut ignore = false;
+ #[cfg(not(bootstrap))]
+- let ignore_message: Option<String> = None;
++ let ignore_message: Option<&str> = None;
+ let mut should_fail = false;
+
+ let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some();
diff --git a/compiler/rustc_codegen_gcc/rustup.sh b/compiler/rustc_codegen_gcc/rustup.sh
new file mode 100755
index 000000000..041079bc9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/rustup.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+set -e
+
+case $1 in
+ "prepare")
+ TOOLCHAIN=$(date +%Y-%m-%d)
+
+ echo "=> Installing new nightly"
+ rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
+ echo nightly-${TOOLCHAIN} > rust-toolchain
+
+ echo "=> Uninstalling all old nightlies"
+ for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
+ rustup toolchain uninstall $nightly
+ done
+
+ ./clean_all.sh
+ ./prepare.sh
+ ;;
+ "commit")
+ git add rust-toolchain
+ git commit -m "Rustup to $(rustc -V)"
+ ;;
+ *)
+ echo "Unknown command '$1'"
+ echo "Usage: ./rustup.sh prepare|commit"
+ ;;
+esac
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
new file mode 100644
index 000000000..0ed3e1fbe
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -0,0 +1,179 @@
+use gccjit::{ToLValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::bug;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::intrinsic::ArgAbiExt;
+use crate::type_of::LayoutGccExt;
+
+impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
+ // TODO(antoyo)
+ }
+
+ fn get_param(&mut self, index: usize) -> Self::Value {
+ let func = self.current_func();
+ let param = func.get_param(index as i32);
+ let on_stack =
+ if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) {
+ on_stack_param_indices.contains(&index)
+ }
+ else {
+ false
+ };
+ if on_stack {
+ param.to_lvalue().get_address(None)
+ }
+ else {
+ param.to_rvalue()
+ }
+ }
+}
+
+impl GccType for CastTarget {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+ let rest_gcc_unit = self.rest.unit.gcc_type(cx);
+ let (rest_count, rem_bytes) =
+ if self.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ }
+ else {
+ (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
+ };
+
+ if self.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if self.rest.total <= self.rest.unit.size {
+ return rest_gcc_unit;
+ }
+
+ // Simplify to array when all chunks are the same size and type
+ if rem_bytes == 0 {
+ return cx.type_array(rest_gcc_unit, rest_count);
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args: Vec<_> = self
+ .prefix
+ .iter()
+ .flat_map(|option_reg| {
+ option_reg.map(|reg| reg.gcc_type(cx))
+ })
+ .chain((0..rest_count).map(|_| rest_gcc_unit))
+ .collect();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(self.rest.unit.kind, RegKind::Integer);
+ args.push(cx.type_ix(rem_bytes * 8));
+ }
+
+ cx.type_struct(&args, false)
+ }
+}
+
+pub trait GccType {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>;
+}
+
+impl GccType for Reg {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+ match self.kind {
+ RegKind::Integer => cx.type_ix(self.size.bits()),
+ RegKind::Float => {
+ match self.size.bits() {
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
+ _ => bug!("unsupported float: {:?}", self),
+ }
+ },
+ RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
+ }
+ }
+}
+
+pub trait FnAbiGccExt<'gcc, 'tcx> {
+ // TODO(antoyo): return a function pointer type instead?
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>);
+ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) {
+ let mut on_stack_param_indices = FxHashSet::default();
+ let args_capacity: usize = self.args.iter().map(|arg|
+ if arg.pad.is_some() {
+ 1
+ }
+ else {
+ 0
+ } +
+ if let PassMode::Pair(_, _) = arg.mode {
+ 2
+ } else {
+ 1
+ }
+ ).sum();
+ let mut argument_tys = Vec::with_capacity(
+ if let PassMode::Indirect { .. } = self.ret.mode {
+ 1
+ }
+ else {
+ 0
+ } + args_capacity,
+ );
+
+ let return_ty =
+ match self.ret.mode {
+ PassMode::Ignore => cx.type_void(),
+ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
+ PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Indirect { .. } => {
+ argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
+ }
+ };
+
+ for arg in &self.args {
+ // add padding
+ if let Some(ty) = arg.pad {
+ argument_tys.push(ty.gcc_type(cx));
+ }
+
+ let arg_ty = match arg.mode {
+ PassMode::Ignore => continue,
+ PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
+ PassMode::Pair(..) => {
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ unimplemented!();
+ }
+ PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
+ on_stack_param_indices.insert(argument_tys.len());
+ arg.memory_ty(cx)
+ },
+ PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+ };
+ argument_tys.push(arg_ty);
+ }
+
+ (return_ty, argument_tys, self.c_variadic, on_stack_param_indices)
+ }
+
+ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx);
+ let pointer_type = cx.context.new_function_pointer_type(None, return_type, &params, variadic);
+ cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
+ pointer_type
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
new file mode 100644
index 000000000..58efb81e8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/allocator.rs
@@ -0,0 +1,123 @@
+use gccjit::{FunctionType, GlobalKind, ToRValue};
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::OomStrategy;
+use rustc_span::symbol::sym;
+
+use crate::GccContext;
+
+pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) {
+ let context = &mods.context;
+ let usize =
+ match tcx.sess.target.pointer_width {
+ 16 => context.new_type::<u16>(),
+ 32 => context.new_type::<u32>(),
+ 64 => context.new_type::<u64>(),
+ tws => bug!("Unsupported target word size for int: {}", tws),
+ };
+ let i8 = context.new_type::<i8>();
+ let i8p = i8.make_pointer();
+ let void = context.new_type::<()>();
+
+ for method in ALLOCATOR_METHODS {
+ let mut types = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ types.push(usize);
+ types.push(usize);
+ }
+ AllocatorTy::Ptr => types.push(i8p),
+ AllocatorTy::Usize => types.push(usize),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(i8p),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+ let name = format!("__rust_{}", method.name);
+
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
+
+ if tcx.sess.target.options.default_hidden_visibility {
+ // TODO(antoyo): set visibility.
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ // TODO(antoyo): emit unwind tables.
+ }
+
+ let callee = kind.fn_name(method.name);
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
+ // TODO(antoyo): set visibility.
+
+ let block = func.new_block("entry");
+
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+ .collect::<Vec<_>>();
+ let ret = context.new_call(None, callee, &args);
+ //llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ block.end_with_return(None, ret);
+ }
+ else {
+ block.end_with_void_return(None);
+ }
+
+ // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
+ // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
+ }
+
+ let types = [usize, usize];
+ let name = "__rust_alloc_error_handler".to_string();
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let func = context.new_function(None, FunctionType::Exported, void, &args, name, false);
+
+ let kind =
+ if has_alloc_error_handler {
+ AllocatorKind::Global
+ }
+ else {
+ AllocatorKind::Default
+ };
+ let callee = kind.fn_name(sym::oom);
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false);
+ //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let block = func.new_block("entry");
+
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+ .collect::<Vec<_>>();
+ let _ret = context.new_call(None, callee, &args);
+ //llvm::LLVMSetTailCall(ret, True);
+ block.end_with_void_return(None);
+
+ let name = OomStrategy::SYMBOL.to_string();
+ let global = context.new_global(None, GlobalKind::Exported, i8, name);
+ let value = tcx.sess.opts.unstable_opts.oom.should_panic();
+ let value = context.new_rvalue_from_int(i8, value as i32);
+ global.global_set_initializer_rvalue(value);
+}
diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs
new file mode 100644
index 000000000..f863abdcc
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/archive.rs
@@ -0,0 +1,189 @@
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::Session;
+
+use rustc_session::cstore::DllImport;
+
+struct ArchiveConfig<'a> {
+ sess: &'a Session,
+ use_native_ar: bool,
+ use_gnu_style_archive: bool,
+}
+
+#[derive(Debug)]
+enum ArchiveEntry {
+ FromArchive {
+ archive_index: usize,
+ entry_index: usize,
+ },
+ File(PathBuf),
+}
+
+pub struct ArArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ let config = ArchiveConfig {
+ sess,
+ use_native_ar: false,
+ // FIXME test for linux and System V derivatives instead
+ use_gnu_style_archive: sess.target.options.archive_format == "gnu",
+ };
+
+ Box::new(ArArchiveBuilder {
+ config,
+ src_archives: vec![],
+ entries: vec![],
+ })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ _sess: &Session,
+ _lib_name: &str,
+ _dll_imports: &[DllImport],
+ _tmpdir: &Path,
+ ) -> PathBuf {
+ unimplemented!();
+ }
+}
+
+pub struct ArArchiveBuilder<'a> {
+ config: ArchiveConfig<'a>,
+ src_archives: Vec<(PathBuf, ar::Archive<File>)>,
+ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+ // the end of an archive for linkers to not get confused.
+ entries: Vec<(String, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+ fn add_file(&mut self, file: &Path) {
+ self.entries.push((
+ file.file_name().unwrap().to_str().unwrap().to_string(),
+ ArchiveEntry::File(file.to_owned()),
+ ));
+ }
+
+ fn add_archive(
+ &mut self,
+ archive_path: &Path,
+ mut skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> std::io::Result<()> {
+ let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
+ let archive_index = self.src_archives.len();
+
+ let mut i = 0;
+ while let Some(entry) = archive.next_entry() {
+ let entry = entry?;
+ let file_name = String::from_utf8(entry.header().identifier().to_vec())
+ .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
+ if !skip(&file_name) {
+ self.entries
+ .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
+ }
+ i += 1;
+ }
+
+ self.src_archives.push((archive_path.to_owned(), archive));
+ Ok(())
+ }
+
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ use std::process::Command;
+
+ fn add_file_using_ar(archive: &Path, file: &Path) {
+ Command::new("ar")
+ .arg("r") // add or replace file
+ .arg("-c") // silence created file message
+ .arg(archive)
+ .arg(&file)
+ .status()
+ .unwrap();
+ }
+
+ enum BuilderKind<'a> {
+ Bsd(ar::Builder<File>),
+ Gnu(ar::GnuBuilder<File>),
+ NativeAr(&'a Path),
+ }
+
+ let mut builder = if self.config.use_native_ar {
+ BuilderKind::NativeAr(output)
+ } else if self.config.use_gnu_style_archive {
+ BuilderKind::Gnu(ar::GnuBuilder::new(
+ File::create(output).unwrap(),
+ self.entries
+ .iter()
+ .map(|(name, _)| name.as_bytes().to_vec())
+ .collect(),
+ ))
+ } else {
+ BuilderKind::Bsd(ar::Builder::new(File::create(output).unwrap()))
+ };
+
+ let any_members = !self.entries.is_empty();
+
+ // Add all files
+ for (entry_name, entry) in self.entries.into_iter() {
+ match entry {
+ ArchiveEntry::FromArchive {
+ archive_index,
+ entry_index,
+ } => {
+ let (ref src_archive_path, ref mut src_archive) =
+ self.src_archives[archive_index];
+ let entry = src_archive.jump_to_entry(entry_index).unwrap();
+ let header = entry.header().clone();
+
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => {
+ builder.append(&header, entry).unwrap()
+ }
+ BuilderKind::Gnu(ref mut builder) => {
+ builder.append(&header, entry).unwrap()
+ }
+ BuilderKind::NativeAr(archive_file) => {
+ Command::new("ar")
+ .arg("x")
+ .arg(src_archive_path)
+ .arg(&entry_name)
+ .status()
+ .unwrap();
+ add_file_using_ar(archive_file, Path::new(&entry_name));
+ std::fs::remove_file(entry_name).unwrap();
+ }
+ }
+ }
+ ArchiveEntry::File(file) =>
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => {
+ builder
+ .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder"))
+ .unwrap()
+ },
+ BuilderKind::Gnu(ref mut builder) => {
+ builder
+ .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file)))
+ .unwrap()
+ },
+ BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file),
+ },
+ }
+ }
+
+ // Finalize archive
+ std::mem::drop(builder);
+
+ // Run ranlib to be able to link the archive
+ let status =
+ std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib");
+
+ if !status.success() {
+ self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ }
+
+ any_members
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
new file mode 100644
index 000000000..52fd66af0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -0,0 +1,817 @@
+use gccjit::{LValue, RValue, ToRValue, Type};
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+
+use rustc_middle::{bug, ty::Instance};
+use rustc_span::Span;
+use rustc_target::asm::*;
+
+use std::borrow::Cow;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::callee::get_fn;
+
+
+// Rust asm! and GCC Extended Asm semantics differ substantially.
+//
+// 1. Rust asm operands go along as one list of operands. Operands themselves indicate
+// if they're "in" or "out". "In" and "out" operands can interleave. One operand can be
+// both "in" and "out" (`inout(reg)`).
+//
+// GCC asm has two different lists for "in" and "out" operands. In terms of gccjit,
+// this means that all "out" operands must go before "in" operands. "In" and "out" operands
+// cannot interleave.
+//
+// 2. Operand lists in both Rust and GCC are indexed. Index starts from 0. Indexes are important
+// because the asm template refers to operands by index.
+//
+// Mapping from Rust to GCC index would be 1-1 if it wasn't for...
+//
+// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes.
+// Contrary, Rust expresses clobbers through "out" operands that aren't tied to
+// a variable (`_`), and such "clobbers" do have index.
+//
+// 4. Furthermore, GCC Extended Asm does not support explicit register constraints
+// (like `out("eax")`) directly, offering so-called "local register variables"
+// as a workaround. These variables need to be declared and initialized *before*
+// the Extended Asm block but *after* normal local variables
+// (see comment in `codegen_inline_asm` for explanation).
+//
+// With that in mind, let's see how we translate Rust syntax to GCC
+// (from now on, `CC` stands for "constraint code"):
+//
+// * `out(reg_class) var` -> translated to output operand: `"=CC"(var)`
+// * `inout(reg_class) var` -> translated to output operand: `"+CC"(var)`
+// * `in(reg_class) var` -> translated to input operand: `"CC"(var)`
+//
+// * `out(reg_class) _` -> translated to one `=r(tmp)`, where "tmp" is a temporary unused variable
+//
+// * `out("explicit register") _` -> not translated to any operands, register is simply added to clobbers list
+//
+// * `inout(reg_class) in_var => out_var` -> translated to two operands:
+// output: `"=CC"(in_var)`
+// input: `"num"(out_var)` where num is the GCC index
+// of the corresponding output operand
+//
+// * `inout(reg_class) in_var => _` -> same as `inout(reg_class) in_var => tmp`,
+// where "tmp" is a temporary unused variable
+//
+// * `out/in/inout("explicit register") var` -> translated to one or two operands as described above
+// with `"r"(var)` constraint,
+// and one register variable assigned to the desired register.
+
+const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t";
+const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix";
+
+
+struct AsmOutOperand<'a, 'tcx, 'gcc> {
+ rust_idx: usize,
+ constraint: &'a str,
+ late: bool,
+ readwrite: bool,
+
+ tmp_var: LValue<'gcc>,
+ out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>
+}
+
+struct AsmInOperand<'a, 'tcx> {
+ rust_idx: usize,
+ constraint: Cow<'a, str>,
+ val: RValue<'tcx>
+}
+
+impl AsmOutOperand<'_, '_, '_> {
+ fn to_constraint(&self) -> String {
+ let mut res = String::with_capacity(self.constraint.len() + self.late as usize + 1);
+
+ let sign = if self.readwrite { '+' } else { '=' };
+ res.push(sign);
+ if !self.late {
+ res.push('&');
+ }
+
+ res.push_str(&self.constraint);
+ res
+ }
+}
+
+enum ConstraintOrRegister {
+ Constraint(&'static str),
+ Register(&'static str)
+}
+
+
+impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ self.sess()
+ .struct_span_err(span[0], "GCC backend does not support unwinding from inline asm")
+ .emit();
+ return;
+ }
+
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+ let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+ let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // GCC index of an output operand equals its position in the array
+ let mut outputs = vec![];
+
+ // GCC index of an input operand equals its position in the array
+ // added to `outputs.len()`
+ let mut inputs = vec![];
+
+ // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _`
+ let mut clobbers = vec![];
+
+ // We're trying to preallocate space for the template
+ let mut constants_len = 0;
+
+ // There are rules we must adhere to if we want GCC to do the right thing:
+ //
+ // * Every local variable that the asm block uses as an output must be declared *before*
+ // the asm block.
+ // * There must be no instructions whatsoever between the register variables and the asm.
+ //
+ // Therefore, the backend must generate the instructions strictly in this order:
+ //
+ // 1. Output variables.
+ // 2. Register variables.
+ // 3. The asm block.
+ //
+ // We also must make sure that no input operands are emitted before output operands.
+ //
+ // This is why we work in passes, first emitting local vars, then local register vars.
+ // Also, we don't emit any asm operands immediately; we save them to
+ // the one of the buffers to be emitted later.
+
+ // 1. Normal variables (and saving operands to buffers).
+ for (rust_idx, op) in rust_operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ use ConstraintOrRegister::*;
+
+ let (constraint, ty) = match (reg_to_gcc(reg), place) {
+ (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)),
+ // When `reg` is a class and not an explicit register but the out place is not specified,
+ // we need to create an unused output variable to assign the output to. This var
+ // needs to be of a type that's "compatible" with the register class, but specific type
+ // doesn't matter.
+ (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())),
+ (Register(_), Some(_)) => {
+ // left for the next pass
+ continue
+ },
+ (Register(reg_name), None) => {
+ // `clobber_abi` can add lots of clobbers that are not supported by the target,
+ // such as AVX-512 registers, so we just ignore unsupported registers
+ let is_target_supported = reg.reg_class().supported_types(asm_arch).iter()
+ .any(|&(_, feature)| {
+ if let Some(feature) = feature {
+ self.tcx.sess.target_features.contains(&feature)
+ } else {
+ true // Register class is unconditionally supported
+ }
+ });
+
+ if is_target_supported && !clobbers.contains(&reg_name) {
+ clobbers.push(reg_name);
+ }
+ continue
+ }
+ };
+
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+ outputs.push(AsmOutOperand {
+ constraint,
+ rust_idx,
+ late,
+ readwrite: false,
+ tmp_var,
+ out_place: place
+ });
+ }
+
+ InlineAsmOperandRef::In { reg, value } => {
+ if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+ inputs.push(AsmInOperand {
+ constraint: Cow::Borrowed(constraint),
+ rust_idx,
+ val: value.immediate()
+ });
+ }
+ else {
+ // left for the next pass
+ continue
+ }
+ }
+
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+ constraint
+ }
+ else {
+ // left for the next pass
+ continue
+ };
+
+ // Rustc frontend guarantees that input and output types are "compatible",
+ // so we can just use input var's type for the output variable.
+ //
+ // This decision is also backed by the fact that LLVM needs in and out
+ // values to be of *exactly the same type*, not just "compatible".
+ // I'm not sure if GCC is so picky too, but better safe than sorry.
+ let ty = in_value.layout.gcc_type(self.cx, false);
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+
+ // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate
+ // it to one "readwrite (+) output variable", otherwise we translate it to two
+ // "out and tied in" vars as described above.
+ let readwrite = out_place.is_none();
+ outputs.push(AsmOutOperand {
+ constraint,
+ rust_idx,
+ late,
+ readwrite,
+ tmp_var,
+ out_place,
+ });
+
+ if !readwrite {
+ let out_gcc_idx = outputs.len() - 1;
+ let constraint = Cow::Owned(out_gcc_idx.to_string());
+
+ inputs.push(AsmInOperand {
+ constraint,
+ rust_idx,
+ val: in_value.immediate()
+ });
+ }
+ }
+
+ InlineAsmOperandRef::Const { ref string } => {
+ constants_len += string.len() + att_dialect as usize;
+ }
+
+ InlineAsmOperandRef::SymFn { instance } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O)
+ // or byte count suffixes (x86 Windows).
+ constants_len += self.tcx.symbol_name(instance).name.len();
+ }
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O).
+ constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
+ }
+ }
+ }
+
+ // 2. Register variables.
+ for (rust_idx, op) in rust_operands.iter().enumerate() {
+ match *op {
+ // `out("explicit register") var`
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+ let out_place = if let Some(place) = place {
+ place
+ }
+ else {
+ // processed in the previous pass
+ continue
+ };
+
+ let ty = out_place.layout.gcc_type(self.cx, false);
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+ tmp_var.set_register_name(reg_name);
+
+ outputs.push(AsmOutOperand {
+ constraint: "r".into(),
+ rust_idx,
+ late,
+ readwrite: false,
+ tmp_var,
+ out_place: Some(out_place)
+ });
+ }
+
+ // processed in the previous pass
+ }
+
+ // `in("explicit register") var`
+ InlineAsmOperandRef::In { reg, value } => {
+ if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+ let ty = value.layout.gcc_type(self.cx, false);
+ let reg_var = self.current_func().new_local(None, ty, "input_register");
+ reg_var.set_register_name(reg_name);
+ self.llbb().add_assignment(None, reg_var, value.immediate());
+
+ inputs.push(AsmInOperand {
+ constraint: "r".into(),
+ rust_idx,
+ val: reg_var.to_rvalue()
+ });
+ }
+
+ // processed in the previous pass
+ }
+
+ // `inout("explicit register") in_var => out_var`
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+ // See explanation in the first pass.
+ let ty = in_value.layout.gcc_type(self.cx, false);
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+ tmp_var.set_register_name(reg_name);
+
+ outputs.push(AsmOutOperand {
+ constraint: "r".into(),
+ rust_idx,
+ late,
+ readwrite: false,
+ tmp_var,
+ out_place,
+ });
+
+ let constraint = Cow::Owned((outputs.len() - 1).to_string());
+ inputs.push(AsmInOperand {
+ constraint,
+ rust_idx,
+ val: in_value.immediate()
+ });
+ }
+
+ // processed in the previous pass
+ }
+
+ InlineAsmOperandRef::SymFn { instance } => {
+ inputs.push(AsmInOperand {
+ constraint: "X".into(),
+ rust_idx,
+ val: self.cx.rvalue_as_function(get_fn(self.cx, instance))
+ .get_address(None),
+ });
+ }
+
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ inputs.push(AsmInOperand {
+ constraint: "X".into(),
+ rust_idx,
+ val: self.cx.get_static(def_id).get_address(None),
+ });
+ }
+
+ InlineAsmOperandRef::Const { .. } => {
+ // processed in the previous pass
+ }
+ }
+ }
+
+ // 3. Build the template string
+
+ let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
+ if att_dialect {
+ template_str.push_str(ATT_SYNTAX_INS);
+ }
+
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ // TODO(@Commeownist): switch to `Iterator::intersperse` once it's stable
+ let mut iter = string.split('%');
+ if let Some(s) = iter.next() {
+ template_str.push_str(s);
+ }
+
+ for s in iter {
+ template_str.push_str("%%");
+ template_str.push_str(s);
+ }
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ let mut push_to_template = |modifier, gcc_idx| {
+ use std::fmt::Write;
+
+ template_str.push('%');
+ if let Some(modifier) = modifier {
+ template_str.push(modifier);
+ }
+ write!(template_str, "{}", gcc_idx).expect("pushing to string failed");
+ };
+
+ match rust_operands[operand_idx] {
+ InlineAsmOperandRef::Out { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+ let gcc_index = outputs.iter()
+ .position(|op| operand_idx == op.rust_idx)
+ .expect("wrong rust index");
+ push_to_template(modifier, gcc_index);
+ }
+
+ InlineAsmOperandRef::In { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+ let in_gcc_index = inputs.iter()
+ .position(|op| operand_idx == op.rust_idx)
+ .expect("wrong rust index");
+ let gcc_index = in_gcc_index + outputs.len();
+ push_to_template(modifier, gcc_index);
+ }
+
+ InlineAsmOperandRef::InOut { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+
+ // The input register is tied to the output, so we can just use the index of the output register
+ let gcc_index = outputs.iter()
+ .position(|op| operand_idx == op.rust_idx)
+ .expect("wrong rust index");
+ push_to_template(modifier, gcc_index);
+ }
+
+ InlineAsmOperandRef::SymFn { instance } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O)
+ // or byte count suffixes (x86 Windows).
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O).
+ let instance = Instance::mono(self.tcx, def_id);
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+
+ InlineAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the template
+ if att_dialect {
+ template_str.push('$');
+ }
+ template_str.push_str(string);
+ }
+ }
+ }
+ }
+ }
+
+ if att_dialect {
+ template_str.push_str(INTEL_SYNTAX_INS);
+ }
+
+ // 4. Generate Extended Asm block
+
+ let block = self.llbb();
+ let extended_asm = block.add_extended_asm(None, &template_str);
+
+ for op in &outputs {
+ extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var);
+ }
+
+ for op in &inputs {
+ extended_asm.add_input_operand(None, &op.constraint, op.val);
+ }
+
+ for clobber in clobbers.iter() {
+ extended_asm.add_clobber(clobber);
+ }
+
+ if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+ // TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient
+ // on all architectures. For instance, what about FP stack?
+ extended_asm.add_clobber("cc");
+ }
+ if !options.contains(InlineAsmOptions::NOMEM) {
+ extended_asm.add_clobber("memory");
+ }
+ if !options.contains(InlineAsmOptions::PURE) {
+ extended_asm.set_volatile_flag(true);
+ }
+ if !options.contains(InlineAsmOptions::NOSTACK) {
+ // TODO(@Commeownist): figure out how to align stack
+ }
+ if options.contains(InlineAsmOptions::NORETURN) {
+ let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
+ let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
+ self.call(self.type_void(), builtin_unreachable, &[], None);
+ }
+
+ // Write results to outputs.
+ //
+ // We need to do this because:
+ // 1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases
+ // (especially with current `rustc_backend_ssa` API).
+ // 2. Not every output operand has an `out_place`, and it's required by `add_output_operand`.
+ //
+ // Instead, we generate a temporary output variable for each output operand, and then this loop,
+ // generates `out_place = tmp_var;` assignments if out_place exists.
+ for op in &outputs {
+ if let Some(place) = op.out_place {
+ OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place);
+ }
+ }
+
+ }
+}
+
+fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize {
+ let len: usize = template.iter().map(|piece| {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ string.len()
+ }
+ InlineAsmTemplatePiece::Placeholder { .. } => {
+ // '%' + 1 char modifier + 1 char index
+ 3
+ }
+ }
+ })
+ .sum();
+
+ // increase it by 5% to account for possible '%' signs that'll be duplicated
+ // I pulled the number out of blue, but should be fair enough
+ // as the upper bound
+ let mut res = (len as f32 * 1.05) as usize + constants_len;
+
+ if att_dialect {
+ res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len();
+ }
+ res
+}
+
+/// Converts a register class to a GCC constraint code.
+fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
+ let constraint = match reg {
+ // For vector registers LLVM wants the register name to match the type size.
+ InlineAsmRegOrRegClass::Reg(reg) => {
+ match reg {
+ InlineAsmReg::X86(_) => {
+ // TODO(antoyo): add support for vector register.
+ //
+ // // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
+ return ConstraintOrRegister::Register(match reg.name() {
+ // Some of registers' names does not map 1-1 from rust to gcc
+ "st(0)" => "st",
+
+ name => name,
+ });
+ }
+
+ _ => unimplemented!(),
+ }
+ },
+ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(),
+ InlineAsmRegClass::Avr(_) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Msp430(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ },
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => unimplemented!(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg,
+ ) => unreachable!("clobber-only"),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("GCC backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+ };
+
+ ConstraintOrRegister::Constraint(constraint)
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)=> cx.type_i32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Avr(_) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::Msp430(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ },
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => cx.type_i16(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::tmm_reg) => unimplemented!(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ },
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Default to Intel syntax on x86
+ let att_dialect = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+ && options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ for line in string.lines() {
+ // NOTE: gcc does not allow inline comment, so remove them.
+ let line =
+ if let Some(index) = line.rfind("//") {
+ &line[..index]
+ }
+ else {
+ line
+ };
+ template_str.push_str(line);
+ template_str.push('\n');
+ }
+ },
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+ match operands[operand_idx] {
+ GlobalAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the
+ // template. Note that we don't need to escape %
+ // here unlike normal inline assembly.
+ template_str.push_str(string);
+ }
+
+ GlobalAsmOperandRef::SymFn { instance } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O)
+ // or byte count suffixes (x86 Windows).
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+
+ GlobalAsmOperandRef::SymStatic { def_id } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O).
+ let instance = Instance::mono(self.tcx, def_id);
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+ }
+ }
+ }
+ }
+
+ let template_str =
+ if att_dialect {
+ format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str)
+ }
+ else {
+ template_str
+ };
+ // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually.
+ let template_str = format!(".pushsection .text\n{}\n.popsection", template_str);
+ self.context.add_top_level_asm(None, &template_str);
+ }
+}
+
+fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Avr(_) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(_) => unimplemented!(),
+ InlineAsmRegClass::Mips(_) => unimplemented!(),
+ InlineAsmRegClass::Msp430(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(_) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(_) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+ None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') },
+ Some('l') => Some('b'),
+ Some('h') => Some('h'),
+ Some('x') => Some('w'),
+ Some('e') => Some('k'),
+ Some('r') => Some('q'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+ InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+ (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+ (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+ (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+ (_, Some('x')) => Some('x'),
+ (_, Some('y')) => Some('t'),
+ (_, Some('z')) => Some('g'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => None,
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ },
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/back/mod.rs b/compiler/rustc_codegen_gcc/src/back/mod.rs
new file mode 100644
index 000000000..d692799d7
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/mod.rs
@@ -0,0 +1 @@
+pub mod write;
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
new file mode 100644
index 000000000..efcf18d31
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -0,0 +1,83 @@
+use std::{env, fs};
+
+use gccjit::OutputKind;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig};
+use rustc_errors::Handler;
+use rustc_session::config::OutputType;
+use rustc_span::fatal_error::FatalError;
+use rustc_target::spec::SplitDebuginfo;
+
+use crate::{GccCodegenBackend, GccContext};
+
+pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+ {
+ let context = &module.module_llvm.context;
+
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+
+ let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
+ // TODO(antoyo)
+ }
+
+ if config.emit_ir {
+ unimplemented!();
+ }
+
+ if config.emit_asm {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+ context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+ if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
+ println!("Module {}", module.name);
+ }
+ if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
+ println!("Dumping reproducer {}", module.name);
+ let _ = fs::create_dir("/tmp/reproducers");
+ // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
+ // transmuting an rvalue to an lvalue.
+ // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
+ context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
+ println!("Dumped reproducer {}", module.name);
+ }
+ if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
+ let _ = fs::create_dir("/tmp/gccjit_dumps");
+ let path = &format!("/tmp/gccjit_dumps/{}.c", module.name);
+ context.dump_to_file(path, true);
+ }
+ context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+ }
+
+ EmitObj::Bitcode => {
+ // TODO(antoyo)
+ }
+
+ EmitObj::None => {}
+ }
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
+ unimplemented!();
+}
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
new file mode 100644
index 000000000..8f9f6f98f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -0,0 +1,154 @@
+use std::env;
+use std::time::Instant;
+
+use gccjit::{
+ Context,
+ FunctionType,
+ GlobalKind,
+};
+use rustc_middle::dep_graph;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::mir::mono::Linkage;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::DebugInfoMethods;
+use rustc_session::config::DebugInfo;
+use rustc_span::Symbol;
+
+use crate::GccContext;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
+ match linkage {
+ Linkage::External => GlobalKind::Imported,
+ Linkage::AvailableExternally => GlobalKind::Imported,
+ Linkage::LinkOnceAny => unimplemented!(),
+ Linkage::LinkOnceODR => unimplemented!(),
+ Linkage::WeakAny => unimplemented!(),
+ Linkage::WeakODR => unimplemented!(),
+ Linkage::Appending => unimplemented!(),
+ Linkage::Internal => GlobalKind::Internal,
+ Linkage::Private => GlobalKind::Internal,
+ Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
+ Linkage::Common => unimplemented!(),
+ }
+}
+
+pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
+ match linkage {
+ Linkage::External => FunctionType::Exported,
+ Linkage::AvailableExternally => FunctionType::Extern,
+ Linkage::LinkOnceAny => unimplemented!(),
+ Linkage::LinkOnceODR => unimplemented!(),
+ Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
+ Linkage::WeakODR => unimplemented!(),
+ Linkage::Appending => unimplemented!(),
+ Linkage::Internal => FunctionType::Internal,
+ Linkage::Private => FunctionType::Internal,
+ Linkage::ExternalWeak => unimplemented!(),
+ Linkage::Common => unimplemented!(),
+ }
+}
+
+pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) {
+ let prof_timer = tcx.prof.generic_activity("codegen_module");
+ let start_time = Instant::now();
+
+ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+ let (module, _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ (cgu_name, supports_128bit_integers),
+ module_codegen,
+ Some(dep_graph::hash_result),
+ );
+ let time_to_codegen = start_time.elapsed();
+ drop(prof_timer);
+
+ // We assume that the cost to run GCC on a CGU is proportional to
+ // the time we needed for codegenning it.
+ let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
+
+ fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> {
+ let cgu = tcx.codegen_unit(cgu_name);
+ // Instantiate monomorphizations without filling out definitions yet...
+ //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
+ let context = Context::default();
+ // TODO(antoyo): only set on x86 platforms.
+ context.add_command_line_option("-masm=intel");
+ // TODO(antoyo): only add the following cli argument if the feature is supported.
+ context.add_command_line_option("-msse2");
+ context.add_command_line_option("-mavx2");
+ context.add_command_line_option("-msha");
+ context.add_command_line_option("-mpclmul");
+ // FIXME(antoyo): the following causes an illegal instruction on vmovdqu64 in std_example on my CPU.
+ // Only add if the CPU supports it.
+ //context.add_command_line_option("-mavx512f");
+ for arg in &tcx.sess.opts.cg.llvm_args {
+ context.add_command_line_option(arg);
+ }
+ // NOTE: This is needed to compile the file src/intrinsic/archs.rs during a bootstrap of rustc.
+ context.add_command_line_option("-fno-var-tracking-assignments");
+ // NOTE: an optimization (https://github.com/rust-lang/rustc_codegen_gcc/issues/53).
+ context.add_command_line_option("-fno-semantic-interposition");
+ // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292).
+ context.add_command_line_option("-fno-strict-aliasing");
+
+ if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
+ context.add_command_line_option("-ffunction-sections");
+ context.add_command_line_option("-fdata-sections");
+ }
+
+ if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
+ context.set_dump_code_on_compile(true);
+ }
+ if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
+ context.set_dump_initial_gimple(true);
+ }
+ context.set_debug_info(true);
+ if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
+ context.set_dump_everything(true);
+ }
+ if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
+ context.set_keep_intermediates(true);
+ }
+
+ // TODO(bjorn3): Remove once unwinding is properly implemented
+ context.set_allow_unreachable_blocks(true);
+
+ {
+ let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
+
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+ for &(mono_item, (linkage, visibility)) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ }
+
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(mono_item, _) in &mono_items {
+ mono_item.define::<Builder<'_, '_, '_>>(&cx);
+ }
+
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx);
+
+ // Finalize debuginfo
+ if cx.sess().opts.debuginfo != DebugInfo::None {
+ cx.debuginfo_finalize();
+ }
+ }
+
+ ModuleCodegen {
+ name: cgu_name.to_string(),
+ module_llvm: GccContext {
+ context
+ },
+ kind: ModuleKind::Regular,
+ }
+ }
+
+ (module, cost)
+}
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
new file mode 100644
index 000000000..4d40dd099
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -0,0 +1,1561 @@
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::convert::TryFrom;
+use std::ops::Deref;
+
+use gccjit::{
+ BinaryOp,
+ Block,
+ ComparisonOp,
+ Context,
+ Function,
+ LValue,
+ RValue,
+ ToRValue,
+ Type,
+ UnaryOp,
+};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+ BackendTypes,
+ BaseTypeMethods,
+ BuilderMethods,
+ ConstMethods,
+ DerivedTypeMethods,
+ LayoutTypeMethods,
+ HasCodegen,
+ OverflowOp,
+ StaticBuilderMethods,
+};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{
+ self,
+ call::FnAbi,
+ Align,
+ HasDataLayout,
+ Size,
+ TargetDataLayout,
+ WrappingRange,
+};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::context::CodegenCx;
+use crate::intrinsic::llvm;
+use crate::type_of::LayoutGccExt;
+
+// TODO(antoyo)
+type Funclet = ();
+
+// TODO(antoyo): remove this variable.
+static mut RETURN_VALUE_COUNT: usize = 0;
+
+enum ExtremumOperation {
+ Max,
+ Min,
+}
+
+pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+ pub cx: &'a CodegenCx<'gcc, 'tcx>,
+ pub block: Block<'gcc>,
+ stack_var_count: Cell<usize>,
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+ Builder {
+ cx,
+ block,
+ stack_var_count: Cell::new(0),
+ }
+ }
+
+ fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+ let size = src.get_type().get_size();
+
+ let func = self.current_func();
+
+ let load_ordering =
+ match order {
+ // TODO(antoyo): does this make sense?
+ AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+ _ => order,
+ };
+ let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
+ let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
+ let return_value = func.new_local(None, previous_value.get_type(), "return_value");
+ self.llbb().add_assignment(None, previous_var, previous_value);
+ self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
+
+ let while_block = func.new_block("while");
+ let after_block = func.new_block("after_while");
+ self.llbb().end_with_jump(None, while_block);
+
+ // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
+ // state need to be updated.
+ self.switch_to_block(while_block);
+
+ let comparison_operator =
+ match operation {
+ ExtremumOperation::Max => ComparisonOp::LessThan,
+ ExtremumOperation::Min => ComparisonOp::GreaterThan,
+ };
+
+ let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
+ let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+ let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
+ let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
+
+ while_block.end_with_conditional(None, cond, while_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ return_value.to_rvalue()
+ }
+
+ fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+ let size = src.get_type().get_size();
+ let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
+ let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
+
+ let void_ptr_type = self.context.new_type::<*mut ()>();
+ let volatile_void_ptr_type = void_ptr_type.make_volatile();
+ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+ let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
+
+ // NOTE: not sure why, but we have the wrong type here.
+ let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
+ let src = self.context.new_cast(None, src, int_type);
+ self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
+ }
+
+ pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
+ self.llbb().add_assignment(None, lvalue, value);
+ }
+
+ fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+ let mut all_args_match = true;
+ let mut param_types = vec![];
+ let param_count = func.get_param_count();
+ for (index, arg) in args.iter().enumerate().take(param_count) {
+ let param = func.get_param(index as i32);
+ let param = param.to_rvalue().get_type();
+ if param != arg.get_type() {
+ all_args_match = false;
+ }
+ param_types.push(param);
+ }
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = param_types
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(_i, (expected_ty, &actual_val))| {
+ let actual_ty = actual_val.get_type();
+ if expected_ty != actual_ty {
+ self.bitcast(actual_val, expected_ty)
+ }
+ else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+ let mut all_args_match = true;
+ let mut param_types = vec![];
+ let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
+ for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
+ let param = gcc_func.get_param_type(index);
+ if param != arg.get_type() {
+ all_args_match = false;
+ }
+ param_types.push(param);
+ }
+
+ let mut on_stack_param_indices = FxHashSet::default();
+ if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
+ on_stack_param_indices = indices.clone();
+ }
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let func_name = format!("{:?}", func_ptr);
+
+ let casted_args: Vec<_> = param_types
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(index, (expected_ty, &actual_val))| {
+ if llvm::ignore_arg_cast(&func_name, index, args.len()) {
+ return actual_val;
+ }
+
+ let actual_ty = actual_val.get_type();
+ if expected_ty != actual_ty {
+ if !actual_ty.is_vector() && !expected_ty.is_vector() && actual_ty.is_integral() && expected_ty.is_integral() && actual_ty.get_size() != expected_ty.get_size() {
+ self.context.new_cast(None, actual_val, expected_ty)
+ }
+ else if on_stack_param_indices.contains(&index) {
+ actual_val.dereference(None).to_rvalue()
+ }
+ else {
+ assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index);
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ self.bitcast(actual_val, expected_ty)
+ }
+ }
+ else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+ let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
+ let stored_ty = self.cx.val_ty(val);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
+ if dest_ptr_ty == stored_ptr_ty {
+ ptr
+ }
+ else {
+ self.bitcast(ptr, stored_ptr_ty)
+ }
+ }
+
+ pub fn current_func(&self) -> Function<'gcc> {
+ self.block.get_function()
+ }
+
+ fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // TODO(antoyo): remove when the API supports a different type for functions.
+ let func: Function<'gcc> = self.cx.rvalue_as_function(func);
+ let args = self.check_call("call", func, args);
+
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local or call add_eval().
+ let return_type = func.get_return_type();
+ let void_type = self.context.new_type::<()>();
+ let current_func = self.block.get_function();
+ if return_type != void_type {
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ result.to_rvalue()
+ }
+ else {
+ self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
+ // Return dummy value when not having return value.
+ self.context.new_rvalue_from_long(self.isize_type, 0)
+ }
+ }
+
+ fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ let args = self.check_ptr_call("call", func_ptr, args);
+
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local or call add_eval().
+ let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
+ let return_type = gcc_func.get_return_type();
+ let void_type = self.context.new_type::<()>();
+ let current_func = self.block.get_function();
+
+ if return_type != void_type {
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ let func_name = format!("{:?}", func_ptr);
+ let args = llvm::adjust_intrinsic_arguments(&self, gcc_func, args, &func_name);
+ self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ result.to_rvalue()
+ }
+ else {
+ #[cfg(not(feature="master"))]
+ if gcc_func.get_param_count() == 0 {
+ // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
+ }
+ else {
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ }
+ #[cfg(feature="master")]
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ // Return dummy value when not having return value.
+ let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
+ self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+ result.to_rvalue()
+ }
+ }
+
+ pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local.
+ let return_type = self.context.new_type::<bool>();
+ let current_func = self.block.get_function();
+ // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ result.to_rvalue()
+ }
+}
+
+impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
+ type CodegenCx = CodegenCx<'gcc, 'tcx>;
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.tcx()
+ }
+}
+
+impl HasDataLayout for Builder<'_, '_, '_> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.cx.data_layout()
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ self.cx.handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
+ type Target = CodegenCx<'gcc, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ self.cx
+ }
+}
+
+impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
+ type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
+ type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
+ type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
+ type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
+ type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
+
+ type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
+ type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
+ type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+ Builder::with_cx(cx, block)
+ }
+
+ fn llbb(&self) -> Block<'gcc> {
+ self.block
+ }
+
+ fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
+ let func = cx.rvalue_as_function(func);
+ func.new_block(name)
+ }
+
+ fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
+ let func = self.current_func();
+ func.new_block(name)
+ }
+
+ fn switch_to_block(&mut self, block: Self::BasicBlock) {
+ self.block = block;
+ }
+
+ fn ret_void(&mut self) {
+ self.llbb().end_with_void_return(None)
+ }
+
+ fn ret(&mut self, value: RValue<'gcc>) {
+ let value =
+ if self.structs_as_pointer.borrow().contains(&value) {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ value.dereference(None).to_rvalue()
+ }
+ else {
+ value
+ };
+ self.llbb().end_with_return(None, value);
+ }
+
+ fn br(&mut self, dest: Block<'gcc>) {
+ self.llbb().end_with_jump(None, dest)
+ }
+
+ fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
+ self.llbb().end_with_conditional(None, cond, then_block, else_block)
+ }
+
+ fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
+ let mut gcc_cases = vec![];
+ let typ = self.val_ty(value);
+ for (on_val, dest) in cases {
+ let on_val = self.const_uint_big(typ, on_val);
+ gcc_cases.push(self.context.new_case(on_val, on_val, dest));
+ }
+ self.block.end_with_switch(None, value, default_block, &gcc_cases);
+ }
+
+ fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // TODO(bjorn3): Properly implement unwinding.
+ let call_site = self.call(typ, func, args, None);
+ let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
+ self.llbb().end_with_conditional(None, condition, then, catch);
+ call_site
+ }
+
+ fn unreachable(&mut self) {
+ let func = self.context.get_builtin_function("__builtin_unreachable");
+ self.block.add_eval(None, self.context.new_call(None, func, &[]));
+ let return_type = self.block.get_function().get_return_type();
+ let void_type = self.context.new_type::<()>();
+ if return_type == void_type {
+ self.block.end_with_void_return(None)
+ }
+ else {
+ let return_value = self.current_func()
+ .new_local(None, return_type, "unreachableReturn");
+ self.block.end_with_return(None, return_value)
+ }
+ }
+
+ fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_add(a, b)
+ }
+
+ fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_sub(a, b)
+ }
+
+ fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a - b
+ }
+
+ fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_mul(a, b)
+ }
+
+ fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_udiv(a, b)
+ }
+
+ fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): poison if not exact.
+ let a_type = a.get_type().to_unsigned(self);
+ let a = self.gcc_int_cast(a, a_type);
+ let b_type = b.get_type().to_unsigned(self);
+ let b = self.gcc_int_cast(b, b_type);
+ a / b
+ }
+
+ fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_sdiv(a, b)
+ }
+
+ fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): poison if not exact.
+ // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
+ // should be the same.
+ let typ = a.get_type().to_signed(self);
+ let b = self.context.new_cast(None, b, typ);
+ a / b
+ }
+
+ fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a / b
+ }
+
+ fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_urem(a, b)
+ }
+
+ fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_srem(a, b)
+ }
+
+ fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ if a.get_type().is_compatible_with(self.cx.float_type) {
+ let fmodf = self.context.get_builtin_function("fmodf");
+ // FIXME(antoyo): this seems to produce the wrong result.
+ return self.context.new_call(None, fmodf, &[a, b]);
+ }
+ assert_eq!(a.get_type().unqualified(), self.cx.double_type);
+
+ let fmod = self.context.get_builtin_function("fmod");
+ return self.context.new_call(None, fmod, &[a, b]);
+ }
+
+ fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_shl(a, b)
+ }
+
+ fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_lshr(a, b)
+ }
+
+ fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
+ // It seems to be if the value is signed.
+ self.gcc_lshr(a, b)
+ }
+
+ fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_and(a, b)
+ }
+
+ fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.gcc_or(a, b)
+ }
+
+ fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_xor(a, b)
+ }
+
+ fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_neg(a)
+ }
+
+ fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ }
+
+ fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_not(a)
+ }
+
+ fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_add(a, b)
+ }
+
+ fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a - b
+ }
+
+ fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): should generate poison value?
+ self.gcc_sub(a, b)
+ }
+
+ fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
+ self.gcc_checked_binop(oop, typ, lhs, rhs)
+ }
+
+ fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
+ // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
+ // Ideally, we shouldn't need to do this check.
+ let aligned_type =
+ if ty == self.cx.u128_type || ty == self.cx.i128_type {
+ ty
+ }
+ else {
+ ty.get_aligned(align.bytes())
+ };
+ // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
+ self.stack_var_count.set(self.stack_var_count.get() + 1);
+ self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+ }
+
+ fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+ let block = self.llbb();
+ let function = block.get_function();
+ // NOTE: instead of returning the dereference here, we have to assign it to a variable in
+ // the current basic block. Otherwise, it could be used in another basic block, causing a
+ // dereference after a drop, for instance.
+ // TODO(antoyo): handle align of the load instruction.
+ let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer());
+ let deref = ptr.dereference(None).to_rvalue();
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
+ block.add_assignment(None, loaded_value, deref);
+ loaded_value.to_rvalue()
+ }
+
+ fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use ty.
+ let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
+ ptr.dereference(None).to_rvalue()
+ }
+
+ fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
+ // TODO(antoyo): use ty.
+ // TODO(antoyo): handle alignment.
+ let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
+ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+ let volatile_const_void_ptr_type = self.context.new_type::<()>()
+ .make_const()
+ .make_volatile()
+ .make_pointer();
+ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+ self.context.new_call(None, atomic_load, &[ptr, ordering])
+ }
+
+ fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
+ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+ if place.layout.is_zst() {
+ return OperandRef::new_zst(self, place.layout);
+ }
+
+ fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
+ let vr = scalar.valid_range(bx);
+ match scalar.primitive() {
+ abi::Int(..) => {
+ if !scalar.is_always_valid(bx) {
+ bx.range_metadata(load, vr);
+ }
+ }
+ abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
+ bx.nonnull_metadata(load);
+ }
+ _ => {}
+ }
+ }
+
+ let val =
+ if let Some(llextra) = place.llextra {
+ OperandValue::Ref(place.llval, Some(llextra), place.align)
+ }
+ else if place.layout.is_gcc_immediate() {
+ let load = self.load(
+ place.layout.gcc_type(self, false),
+ place.llval,
+ place.align,
+ );
+ if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+ scalar_load_metadata(self, load, scalar);
+ }
+ OperandValue::Immediate(self.to_immediate(load, place.layout))
+ }
+ else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+ let b_offset = a.size(self).align_to(b.align(self).abi);
+ let pair_type = place.layout.gcc_type(self, false);
+
+ let mut load = |i, scalar: &abi::Scalar, align| {
+ let llptr = self.struct_gep(pair_type, place.llval, i as u64);
+ let llty = place.layout.scalar_pair_element_gcc_type(self, i, false);
+ let load = self.load(llty, llptr, align);
+ scalar_load_metadata(self, load, scalar);
+ if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+ };
+
+ OperandValue::Pair(
+ load(0, a, place.align),
+ load(1, b, place.align.restrict_for_offset(b_offset)),
+ )
+ }
+ else {
+ OperandValue::Ref(place.llval, None, place.align)
+ };
+
+ OperandRef { val, layout: place.layout }
+ }
+
+ fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
+ let zero = self.const_usize(0);
+ let count = self.const_usize(count);
+ let start = dest.project_index(&mut self, zero).llval;
+ let end = dest.project_index(&mut self, count).llval;
+
+ let header_bb = self.append_sibling_block("repeat_loop_header");
+ let body_bb = self.append_sibling_block("repeat_loop_body");
+ let next_bb = self.append_sibling_block("repeat_loop_next");
+
+ let ptr_type = start.get_type();
+ let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
+ let current_val = current.to_rvalue();
+ self.assign(current, start);
+
+ self.br(header_bb);
+
+ self.switch_to_block(header_bb);
+ let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
+ self.cond_br(keep_going, body_bb, next_bb);
+
+ self.switch_to_block(body_bb);
+ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+ cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+
+ let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
+ self.llbb().add_assignment(None, current, next);
+ self.br(header_bb);
+
+ self.switch_to_block(next_bb);
+ self
+ }
+
+ fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
+ // TODO(antoyo)
+ }
+
+ fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
+ self.store_with_flags(val, ptr, align, MemFlags::empty())
+ }
+
+ fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> {
+ let ptr = self.check_store(val, ptr);
+ let destination = ptr.dereference(None);
+ // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
+ // to type so it gets the proper alignment.
+ let destination_type = destination.to_rvalue().get_type().unqualified();
+ let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
+ let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type);
+ let aligned_destination = aligned_destination.dereference(None);
+ self.llbb().add_assignment(None, aligned_destination, val);
+ // TODO(antoyo): handle align and flags.
+ // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
+ self.cx.context.new_rvalue_zero(self.type_i32())
+ }
+
+ fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
+ // TODO(antoyo): handle alignment.
+ let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
+ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ let volatile_const_void_ptr_type = self.context.new_type::<()>()
+ .make_volatile()
+ .make_pointer();
+ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+
+ // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
+ // the following cast is required to avoid this error:
+ // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
+ let int_type = atomic_store.get_param(1).to_rvalue().get_type();
+ let value = self.context.new_cast(None, value, int_type);
+ self.llbb()
+ .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
+ }
+
+ fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ let mut result = ptr;
+ for index in indices {
+ result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
+ }
+ result
+ }
+
+ fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
+ // TODO(antoyo): specify inbounds somehow.
+ match indices.len() {
+ 1 => {
+ self.context.new_array_access(None, ptr, indices[0]).get_address(None)
+ },
+ 2 => {
+ let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
+ self.context.new_array_access(None, array, indices[1]).get_address(None)
+ },
+ _ => unimplemented!(),
+ }
+ }
+
+ fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value = ptr.dereference(None).to_rvalue();
+
+ if value_type.dyncast_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, value, index);
+ element.get_address(None)
+ }
+ else if let Some(vector_type) = value_type.dyncast_vector() {
+ let array_type = vector_type.get_element_type().make_pointer();
+ let array = self.bitcast(ptr, array_type);
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, array, index);
+ element.get_address(None)
+ }
+ else if let Some(struct_type) = value_type.is_struct() {
+ ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+
+ /* Casts */
+ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check that it indeed truncate the value.
+ self.gcc_int_cast(value, dest_ty)
+ }
+
+ fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check that it indeed sign extend the value.
+ if dest_ty.dyncast_vector().is_some() {
+ // TODO(antoyo): nothing to do as it is only for LLVM?
+ return value;
+ }
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_float_to_uint_cast(value, dest_ty)
+ }
+
+ fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_float_to_int_cast(value, dest_ty)
+ }
+
+ fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_uint_to_float_cast(value, dest_ty)
+ }
+
+ fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_int_to_float_cast(value, dest_ty)
+ }
+
+ fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): make sure it truncates.
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
+ self.intcast(usize_value, dest_ty, false)
+ }
+
+ fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let usize_value = self.intcast(value, self.cx.type_isize(), false);
+ self.cx.const_bitcast(usize_value, dest_ty)
+ }
+
+ fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.cx.const_bitcast(value, dest_ty)
+ }
+
+ fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
+ // NOTE: is_signed is for value, not dest_typ.
+ self.gcc_int_cast(value, dest_typ)
+ }
+
+ fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let val_type = value.get_type();
+ match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
+ (false, true) => {
+ // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
+ // a pointer, which is not supported by gccjit.
+ return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
+ },
+ (false, false) => {
+ // When they are not pointers, we want a transmute (or reinterpret_cast).
+ self.bitcast(value, dest_ty)
+ },
+ (true, true) => self.cx.context.new_cast(None, value, dest_ty),
+ (true, false) => unimplemented!(),
+ }
+ }
+
+ /* Comparisons */
+ fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_icmp(op, lhs, rhs)
+ }
+
+ fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+
+ /* Miscellaneous instructions */
+ fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
+ let size = self.intcast(size, self.type_size_t(), false);
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+ let memcpy = self.context.get_builtin_function("memcpy");
+ // TODO(antoyo): handle aligns and is_volatile.
+ self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+ }
+
+ fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memmove.
+ let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align);
+ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+ self.store_with_flags(val, ptr, dst_align, flags);
+ return;
+ }
+ let size = self.intcast(size, self.type_size_t(), false);
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+
+ let memmove = self.context.get_builtin_function("memmove");
+ // TODO(antoyo): handle is_volatile.
+ self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+ }
+
+ fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let ptr = self.pointercast(ptr, self.type_i8p());
+ let memset = self.context.get_builtin_function("memset");
+ // TODO(antoyo): handle align and is_volatile.
+ let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
+ let size = self.intcast(size, self.type_size_t(), false);
+ self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+ }
+
+ fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
+ let func = self.current_func();
+ let variable = func.new_local(None, then_val.get_type(), "selectVar");
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+ then_block.add_assignment(None, variable, then_val);
+ then_block.end_with_jump(None, after_block);
+
+ if !then_val.get_type().is_compatible_with(else_val.get_type()) {
+ else_val = self.context.new_cast(None, else_val, then_val.get_type());
+ }
+ else_block.add_assignment(None, variable, else_val);
+ else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ variable.to_rvalue()
+ }
+
+ #[allow(dead_code)]
+ fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value_type = aggregate_value.get_type();
+
+ if value_type.dyncast_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, aggregate_value, index);
+ element.get_address(None)
+ }
+ else if value_type.dyncast_vector().is_some() {
+ panic!();
+ }
+ else if let Some(pointer_type) = value_type.get_pointee() {
+ if let Some(struct_type) = pointer_type.is_struct() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+ else if let Some(struct_type) = value_type.is_struct() {
+ aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+
+ fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value_type = aggregate_value.get_type();
+
+ let lvalue =
+ if value_type.dyncast_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ self.context.new_array_access(None, aggregate_value, index)
+ }
+ else if value_type.dyncast_vector().is_some() {
+ panic!();
+ }
+ else if let Some(pointer_type) = value_type.get_pointee() {
+ if let Some(struct_type) = pointer_type.is_struct() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ };
+
+ let lvalue_type = lvalue.to_rvalue().get_type();
+ let value =
+ // NOTE: sometimes, rustc will create a value with the wrong type.
+ if lvalue_type != value.get_type() {
+ self.context.new_cast(None, value, lvalue_type)
+ }
+ else {
+ value
+ };
+
+ self.llbb().add_assignment(None, lvalue, value);
+
+ aggregate_value
+ }
+
+ fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> {
+ let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1");
+ let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
+ let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
+ self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
+ .to_rvalue()
+ // TODO(antoyo): Properly implement unwinding.
+ // the above is just to make the compilation work as it seems
+ // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
+ }
+
+ fn resume(&mut self, _exn: RValue<'gcc>) {
+ // TODO(bjorn3): Properly implement unwinding.
+ self.unreachable();
+ }
+
+ fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
+ unimplemented!();
+ }
+
+ fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) {
+ unimplemented!();
+ }
+
+ fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
+ unimplemented!();
+ }
+
+ fn catch_switch(
+ &mut self,
+ _parent: Option<RValue<'gcc>>,
+ _unwind: Option<Block<'gcc>>,
+ _handlers: &[Block<'gcc>],
+ ) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ // Atomic Operations
+ fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+ let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
+ self.llbb().add_assignment(None, expected, cmp);
+ let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
+
+ let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
+ let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
+ let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
+
+ let value_type = result.to_rvalue().get_type();
+ if let Some(struct_type) = value_type.is_struct() {
+ self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
+ // NOTE: since success contains the call to the intrinsic, it must be stored before
+ // expected so that we store expected after the call.
+ self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
+ }
+ // TODO(antoyo): handle when value is not a struct.
+
+ result.to_rvalue()
+ }
+
+ fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+ let size = src.get_type().get_size();
+ let name =
+ match op {
+ AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+ AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+ AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+ AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+ AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+ AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+ AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+ AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+ AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+ AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+ AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+ };
+
+
+ let atomic_function = self.context.get_builtin_function(name);
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+ let void_ptr_type = self.context.new_type::<*mut ()>();
+ let volatile_void_ptr_type = void_ptr_type.make_volatile();
+ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+ // FIXME(antoyo): not sure why, but we have the wrong type here.
+ let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
+ let src = self.context.new_cast(None, src, new_src_type);
+ let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
+ self.context.new_cast(None, res, src.get_type())
+ }
+
+ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
+ let name =
+ match scope {
+ SynchronizationScope::SingleThread => "__atomic_signal_fence",
+ SynchronizationScope::CrossThread => "__atomic_thread_fence",
+ };
+ let thread_fence = self.context.get_builtin_function(name);
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
+ }
+
+ fn set_invariant_load(&mut self, load: RValue<'gcc>) {
+ // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
+ self.normal_function_addresses.borrow_mut().insert(load);
+ // TODO(antoyo)
+ }
+
+ fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+ // TODO(antoyo)
+ }
+
+ fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+ // TODO(antoyo)
+ }
+
+ fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // FIXME(antoyo): remove when having a proper API.
+ let gcc_func = unsafe { std::mem::transmute(func) };
+ if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
+ self.function_call(func, args, funclet)
+ }
+ else {
+ // If it's a not function that was defined, it's a function pointer.
+ self.function_ptr_call(func, args, funclet)
+ }
+ }
+
+ fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ // FIXME(antoyo): this does not zero-extend.
+ if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
+ // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
+ // Fix the code in codegen_ssa::base::from_immediate.
+ return value;
+ }
+ self.gcc_int_cast(value, dest_typ)
+ }
+
+ fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
+ self.cx
+ }
+
+ fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
+ // FIXME(bjorn3): implement
+ }
+
+ fn set_span(&mut self, _span: Span) {}
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+ if self.cx().val_ty(val) == self.cx().type_i1() {
+ self.zext(val, self.cx().type_i8())
+ }
+ else {
+ val
+ }
+ }
+
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+ if scalar.is_bool() {
+ return self.trunc(val, self.cx().type_i1());
+ }
+ val
+ }
+
+ fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+ None
+ }
+
+ fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+ None
+ }
+
+ fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
+ unimplemented!();
+ }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ #[cfg(feature="master")]
+ pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
+ let struct_type = mask.get_type().is_struct().expect("mask of struct type");
+
+ // TODO(antoyo): use a recursive unqualified() here.
+ let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type");
+ let element_type = vector_type.get_element_type();
+ let vec_num_units = vector_type.get_num_units();
+
+ let mask_num_units = struct_type.get_field_count();
+ let mut vector_elements = vec![];
+ let mask_element_type =
+ if element_type.is_integral() {
+ element_type
+ }
+ else {
+ #[cfg(feature="master")]
+ {
+ self.cx.type_ix(element_type.get_size() as u64 * 8)
+ }
+ #[cfg(not(feature="master"))]
+ self.int_type
+ };
+ for i in 0..mask_num_units {
+ let field = struct_type.get_field(i as i32);
+ vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type));
+ }
+
+ // NOTE: the mask needs to be the same length as the input vectors, so add the missing
+ // elements in the mask if needed.
+ for _ in mask_num_units..vec_num_units {
+ vector_elements.push(self.context.new_rvalue_zero(mask_element_type));
+ }
+
+ let array_type = self.context.new_array_type(None, element_type, vec_num_units as i32);
+ let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
+ let (v1, v2) =
+ if vec_num_units < mask_num_units {
+ // NOTE: the mask needs to be the same length as the input vectors, so join the 2
+ // vectors and create a dummy second vector.
+ // TODO(antoyo): switch to using new_vector_access.
+ let array = self.context.new_bitcast(None, v1, array_type);
+ let mut elements = vec![];
+ for i in 0..vec_num_units {
+ elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+ }
+ // TODO(antoyo): switch to using new_vector_access.
+ let array = self.context.new_bitcast(None, v2, array_type);
+ for i in 0..(mask_num_units - vec_num_units) {
+ elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+ }
+ let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements);
+ let zero = self.context.new_rvalue_zero(element_type);
+ let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]);
+ (v1, v2)
+ }
+ else {
+ (v1, v2)
+ };
+
+ let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
+ let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
+ let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
+ let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask);
+
+ if vec_num_units != mask_num_units {
+ // NOTE: if padding was added, only select the number of elements of the masks to
+ // remove that padding in the result.
+ let mut elements = vec![];
+ // TODO(antoyo): switch to using new_vector_access.
+ let array = self.context.new_bitcast(None, result, array_type);
+ for i in 0..mask_num_units {
+ elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+ }
+ self.context.new_rvalue_from_vector(None, result_type, &elements)
+ }
+ else {
+ result
+ }
+ }
+
+ #[cfg(not(feature="master"))]
+ pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ #[cfg(feature="master")]
+ pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
+ where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+ {
+ let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+ let element_count = vector_type.get_num_units();
+ let mut vector_elements = vec![];
+ for i in 0..element_count {
+ vector_elements.push(i);
+ }
+ let mask_type = self.context.new_vector_type(self.int_type, element_count as u64);
+ let mut shift = 1;
+ let mut res = src;
+ while shift < element_count {
+ let vector_elements: Vec<_> =
+ vector_elements.iter()
+ .map(|i| self.context.new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32))
+ .collect();
+ let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
+ let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask);
+ shift *= 2;
+ res = op(res, shifted, &self.context);
+ }
+ self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type))
+ .to_rvalue()
+ }
+
+ #[cfg(not(feature="master"))]
+ pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
+ where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+ {
+ unimplemented!();
+ }
+
+ pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
+ self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b))
+ }
+
+ pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ // Inspired by Hacker's Delight min implementation.
+ pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+ self.vector_reduce(src, |a, b, context| {
+ let differences_or_zeros = difference_or_zero(a, b, context);
+ context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
+ })
+ }
+
+ // Inspired by Hacker's Delight max implementation.
+ pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+ self.vector_reduce(src, |a, b, context| {
+ let differences_or_zeros = difference_or_zero(a, b, context);
+ context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
+ })
+ }
+
+ pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> {
+ // cond is a vector of integers, not of bools.
+ let cond_type = cond.get_type();
+ let vector_type = cond_type.unqualified().dyncast_vector().expect("vector type");
+ let num_units = vector_type.get_num_units();
+ let element_type = vector_type.get_element_type();
+ let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
+ let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros);
+
+ let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros);
+ let then_vals = masks & then_val;
+
+ let ones = vec![self.context.new_rvalue_one(element_type); num_units];
+ let ones = self.context.new_rvalue_from_vector(None, cond_type, &ones);
+ let inverted_masks = masks + ones;
+ // NOTE: sometimes, the type of else_val can be different than the type of then_val in
+ // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
+ // operation to work.
+ let else_val = self.context.new_bitcast(None, else_val, then_val.get_type());
+ let else_vals = inverted_masks & else_val;
+
+ then_vals | else_vals
+ }
+}
+
+fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> {
+ let difference = a - b;
+ let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a);
+ difference & masks
+}
+
+impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+ fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
+ // Forward to the `get_static` method of `CodegenCx`
+ self.cx().get_static(def_id).get_address(None)
+ }
+}
+
+impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ self.cx.param_env()
+ }
+}
+
+impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.cx.target_spec()
+ }
+}
+
+pub trait ToGccComp {
+ fn to_gcc_comparison(&self) -> ComparisonOp;
+}
+
+impl ToGccComp for IntPredicate {
+ fn to_gcc_comparison(&self) -> ComparisonOp {
+ match *self {
+ IntPredicate::IntEQ => ComparisonOp::Equals,
+ IntPredicate::IntNE => ComparisonOp::NotEquals,
+ IntPredicate::IntUGT => ComparisonOp::GreaterThan,
+ IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
+ IntPredicate::IntULT => ComparisonOp::LessThan,
+ IntPredicate::IntULE => ComparisonOp::LessThanEquals,
+ IntPredicate::IntSGT => ComparisonOp::GreaterThan,
+ IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
+ IntPredicate::IntSLT => ComparisonOp::LessThan,
+ IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
+ }
+ }
+}
+
+impl ToGccComp for RealPredicate {
+ fn to_gcc_comparison(&self) -> ComparisonOp {
+ // TODO(antoyo): check that ordered vs non-ordered is respected.
+ match *self {
+ RealPredicate::RealPredicateFalse => unreachable!(),
+ RealPredicate::RealOEQ => ComparisonOp::Equals,
+ RealPredicate::RealOGT => ComparisonOp::GreaterThan,
+ RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
+ RealPredicate::RealOLT => ComparisonOp::LessThan,
+ RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
+ RealPredicate::RealONE => ComparisonOp::NotEquals,
+ RealPredicate::RealORD => unreachable!(),
+ RealPredicate::RealUNO => unreachable!(),
+ RealPredicate::RealUEQ => ComparisonOp::Equals,
+ RealPredicate::RealUGT => ComparisonOp::GreaterThan,
+ RealPredicate::RealUGE => ComparisonOp::GreaterThan,
+ RealPredicate::RealULT => ComparisonOp::LessThan,
+ RealPredicate::RealULE => ComparisonOp::LessThan,
+ RealPredicate::RealUNE => ComparisonOp::NotEquals,
+ RealPredicate::RealPredicateTrue => unreachable!(),
+ }
+ }
+}
+
+#[repr(C)]
+#[allow(non_camel_case_types)]
+enum MemOrdering {
+ __ATOMIC_RELAXED,
+ __ATOMIC_CONSUME,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELEASE,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_SEQ_CST,
+}
+
+trait ToGccOrdering {
+ fn to_gcc(self) -> i32;
+}
+
+impl ToGccOrdering for AtomicOrdering {
+ fn to_gcc(self) -> i32 {
+ use MemOrdering::*;
+
+ let ordering =
+ match self {
+ AtomicOrdering::Unordered => __ATOMIC_RELAXED,
+ AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+ AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+ AtomicOrdering::Release => __ATOMIC_RELEASE,
+ AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+ AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+ };
+ ordering as i32
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
new file mode 100644
index 000000000..c1041125e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -0,0 +1,77 @@
+use gccjit::{FunctionType, RValue};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+
+use crate::abi::FnAbiGccExt;
+use crate::context::CodegenCx;
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let tcx = cx.tcx();
+
+ assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_escaping_bound_vars());
+
+ if let Some(&func) = cx.function_instances.borrow().get(&instance) {
+ return func;
+ }
+
+ let sym = tcx.symbol_name(instance).name;
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+ let func =
+ if let Some(func) = cx.get_declared_value(&sym) {
+ // Create a fn pointer with the new signature.
+ let ptrty = fn_abi.ptr_to_gcc_type(cx);
+
+ // This is subtle and surprising, but sometimes we have to bitcast
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
+ // library, they may not use precisely the same types: for
+ // example, they will probably each declare their own structs,
+ // which are distinct types from LLVM's point of view (nominal
+ // types).
+ //
+ // Now, if those two crates are linked into an application, and
+ // they contain inlined code, you can wind up with a situation
+ // where both of those functions wind up being loaded into this
+ // application simultaneously. In that case, the same function
+ // (from LLVM's point of view) requires two types. But of course
+ // LLVM won't allow one function to have two types.
+ //
+ // What we currently do, therefore, is declare the function with
+ // one of the two types (whichever happens to come first) and then
+ // bitcast as needed when the function is referenced to make sure
+ // it has the type we expect.
+ //
+ // This can occur on either a crate-local or crate-external
+ // reference. It also occurs when testing libcore and in some
+ // other weird situations. Annoying.
+ if cx.val_ty(func) != ptrty {
+ // TODO(antoyo): cast the pointer.
+ func
+ }
+ else {
+ func
+ }
+ }
+ else {
+ cx.linkage.set(FunctionType::Extern);
+ let func = cx.declare_fn(&sym, &fn_abi);
+
+ // TODO(antoyo): set linkage and attributes.
+ func
+ };
+
+ cx.function_instances.borrow_mut().insert(instance, func);
+
+ func
+}
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
new file mode 100644
index 000000000..ccb6cbbc2
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -0,0 +1,479 @@
+use gccjit::LValue;
+use gccjit::{RValue, Type, ToRValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+ BaseTypeMethods,
+ ConstMethods,
+ DerivedTypeMethods,
+ MiscMethods,
+ StaticMethods,
+};
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::layout::{TyAndLayout, LayoutOf};
+use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_target::abi::{self, HasDataLayout, Pointer, Size};
+
+use crate::consts::const_alloc_to_gcc;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
+ bytes_in_context(self, bytes)
+ }
+
+ fn global_string(&self, string: &str) -> LValue<'gcc> {
+ // TODO(antoyo): handle non-null-terminated strings.
+ let string = self.context.new_string_literal(&*string);
+ let sym = self.generate_local_symbol_name("str");
+ let global = self.declare_private_global(&sym, self.val_ty(string));
+ global.global_set_initializer_rvalue(string);
+ global
+ // TODO(antoyo): set linkage.
+ }
+}
+
+pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
+ let context = &cx.context;
+ let byte_type = context.new_type::<u8>();
+ let typ = context.new_array_type(None, byte_type, bytes.len() as i32);
+ let elements: Vec<_> =
+ bytes.iter()
+ .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32))
+ .collect();
+ context.new_array_constructor(None, typ, &elements)
+}
+
+pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool {
+ typ.get_pointee().is_some()
+}
+
+impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ if type_is_pointer(typ) {
+ self.context.new_null(typ)
+ }
+ else {
+ self.const_int(typ, 0)
+ }
+ }
+
+ fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ let local = self.current_func.borrow().expect("func")
+ .new_local(None, typ, "undefined");
+ if typ.is_struct().is_some() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ let pointer = local.get_address(None);
+ self.structs_as_pointer.borrow_mut().insert(pointer);
+ pointer
+ }
+ else {
+ local.to_rvalue()
+ }
+ }
+
+ fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+ self.gcc_int(typ, int)
+ }
+
+ fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+ self.gcc_uint(typ, int)
+ }
+
+ fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+ self.gcc_uint_big(typ, num)
+ }
+
+ fn const_bool(&self, val: bool) -> RValue<'gcc> {
+ self.const_uint(self.type_i1(), val as u64)
+ }
+
+ fn const_i16(&self, i: i16) -> RValue<'gcc> {
+ self.const_int(self.type_i16(), i as i64)
+ }
+
+ fn const_i32(&self, i: i32) -> RValue<'gcc> {
+ self.const_int(self.type_i32(), i as i64)
+ }
+
+ fn const_u32(&self, i: u32) -> RValue<'gcc> {
+ self.const_uint(self.type_u32(), i as u64)
+ }
+
+ fn const_u64(&self, i: u64) -> RValue<'gcc> {
+ self.const_uint(self.type_u64(), i)
+ }
+
+ fn const_usize(&self, i: u64) -> RValue<'gcc> {
+ let bit_size = self.data_layout().pointer_size.bits();
+ if bit_size < 64 {
+ // make sure it doesn't overflow
+ assert!(i < (1 << bit_size));
+ }
+
+ self.const_uint(self.usize_type, i)
+ }
+
+ fn const_u8(&self, _i: u8) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> {
+ self.context.new_rvalue_from_double(typ, val)
+ }
+
+ fn const_str(&self, s: &str) -> (RValue<'gcc>, RValue<'gcc>) {
+ let str_global = *self
+ .const_str_cache
+ .borrow_mut()
+ .raw_entry_mut()
+ .from_key(s)
+ .or_insert_with(|| (s.to_owned(), self.global_string(s)))
+ .1;
+ let len = s.len();
+ let cs = self.const_ptrcast(str_global.get_address(None),
+ self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)),
+ );
+ (cs, self.const_usize(len as u64))
+ }
+
+ fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
+ let fields: Vec<_> = values.iter()
+ .map(|value| value.get_type())
+ .collect();
+ // TODO(antoyo): cache the type? It's anonymous, so probably not.
+ let typ = self.type_struct(&fields, packed);
+ let struct_type = typ.is_struct().expect("struct type");
+ self.context.new_struct_constructor(None, struct_type.as_type(), None, values)
+ }
+
+ fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
+ // TODO(antoyo)
+ None
+ }
+
+ fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
+ // TODO(antoyo)
+ None
+ }
+
+ fn zst_to_backend(&self, _ty: Type<'gcc>) -> RValue<'gcc> {
+ self.const_undef(self.type_ix(0))
+ }
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
+ let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
+ match cv {
+ Scalar::Int(int) => {
+ let data = int.assert_bits(layout.size(self));
+
+ // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
+ // the paths for floating-point values.
+ if ty == self.float_type {
+ return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
+ }
+ else if ty == self.double_type {
+ return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
+ }
+
+ let value = self.const_uint_big(self.type_ix(bitsize), data);
+ // TODO(bjorn3): assert size is correct
+ self.const_bitcast(value, ty)
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts();
+ let base_addr =
+ match self.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let init = const_alloc_to_gcc(self, alloc);
+ let alloc = alloc.inner();
+ let value =
+ match alloc.mutability {
+ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+ _ => self.static_addr_of(init, alloc.align, None),
+ };
+ if !self.sess().fewer_names() {
+ // TODO(antoyo): set value name.
+ }
+ value
+ },
+ GlobalAlloc::Function(fn_instance) => {
+ self.get_fn_addr(fn_instance)
+ },
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc = self.tcx.global_alloc(self.tcx.vtable_allocation((ty, trait_ref))).unwrap_memory();
+ let init = const_alloc_to_gcc(self, alloc);
+ self.static_addr_of(init, alloc.inner().align, None)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(self.tcx.is_static(def_id));
+ self.get_static(def_id).get_address(None)
+ },
+ };
+ let ptr_type = base_addr.get_type();
+ let base_addr = self.const_bitcast(base_addr, self.usize_type);
+ let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
+ let ptr = self.const_bitcast(base_addr + offset, ptr_type);
+ if layout.primitive() != Pointer {
+ self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
+ }
+ else {
+ self.const_bitcast(ptr, ty)
+ }
+ }
+ }
+ }
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value {
+ const_alloc_to_gcc(self, alloc)
+ }
+
+ fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> {
+ assert_eq!(alloc.inner().align, layout.align.abi);
+ let ty = self.type_ptr_to(layout.gcc_type(self, true));
+ let value =
+ if layout.size == Size::ZERO {
+ let value = self.const_usize(alloc.inner().align.bytes());
+ self.context.new_cast(None, value, ty)
+ }
+ else {
+ let init = const_alloc_to_gcc(self, alloc);
+ let base_addr = self.static_addr_of(init, alloc.inner().align, None);
+
+ let array = self.const_bitcast(base_addr, self.type_i8p());
+ let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None);
+ self.const_bitcast(value, ty)
+ };
+ PlaceRef::new_sized(value, layout)
+ }
+
+ fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, val, ty)
+ }
+}
+
+pub trait SignType<'gcc, 'tcx> {
+ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
+ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx)
+ }
+
+ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx)
+ }
+
+ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if self.is_u8(cx) {
+ cx.i8_type
+ }
+ else if self.is_u16(cx) {
+ cx.i16_type
+ }
+ else if self.is_u32(cx) {
+ cx.i32_type
+ }
+ else if self.is_u64(cx) {
+ cx.i64_type
+ }
+ else if self.is_u128(cx) {
+ cx.i128_type
+ }
+ else if self.is_uchar(cx) {
+ cx.char_type
+ }
+ else if self.is_ushort(cx) {
+ cx.short_type
+ }
+ else if self.is_uint(cx) {
+ cx.int_type
+ }
+ else if self.is_ulong(cx) {
+ cx.long_type
+ }
+ else if self.is_ulonglong(cx) {
+ cx.longlong_type
+ }
+ else {
+ self.clone()
+ }
+ }
+
+ fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if self.is_i8(cx) {
+ cx.u8_type
+ }
+ else if self.is_i16(cx) {
+ cx.u16_type
+ }
+ else if self.is_i32(cx) {
+ cx.u32_type
+ }
+ else if self.is_i64(cx) {
+ cx.u64_type
+ }
+ else if self.is_i128(cx) {
+ cx.u128_type
+ }
+ else if self.is_char(cx) {
+ cx.uchar_type
+ }
+ else if self.is_short(cx) {
+ cx.ushort_type
+ }
+ else if self.is_int(cx) {
+ cx.uint_type
+ }
+ else if self.is_long(cx) {
+ cx.ulong_type
+ }
+ else if self.is_longlong(cx) {
+ cx.ulonglong_type
+ }
+ else {
+ self.clone()
+ }
+ }
+}
+
+pub trait TypeReflection<'gcc, 'tcx> {
+ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_vector(&self) -> bool;
+}
+
+impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
+ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.uchar_type
+ }
+
+ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ushort_type
+ }
+
+ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.uint_type
+ }
+
+ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ulong_type
+ }
+
+ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ulonglong_type
+ }
+
+ fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.char_type
+ }
+
+ fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.short_type
+ }
+
+ fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.int_type
+ }
+
+ fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.long_type
+ }
+
+ fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.longlong_type
+ }
+
+ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i8_type
+ }
+
+ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u8_type
+ }
+
+ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i16_type
+ }
+
+ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u16_type
+ }
+
+ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i32_type
+ }
+
+ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u32_type
+ }
+
+ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i64_type
+ }
+
+ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u64_type
+ }
+
+ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i128_type.unqualified()
+ }
+
+ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u128_type.unqualified()
+ }
+
+ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_type::<f32>()
+ }
+
+ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_type::<f64>()
+ }
+
+ fn is_vector(&self) -> bool {
+ let mut typ = self.clone();
+ loop {
+ if typ.dyncast_vector().is_some() {
+ return true;
+ }
+
+ let old_type = typ;
+ typ = typ.unqualified();
+ if old_type == typ {
+ break;
+ }
+ }
+
+ false
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
new file mode 100644
index 000000000..c0b8d2181
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -0,0 +1,405 @@
+use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_middle::{bug, span_bug};
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange};
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
+ if value.get_type() == self.bool_type.make_pointer() {
+ if let Some(pointee) = typ.get_pointee() {
+ if pointee.dyncast_vector().is_some() {
+ panic!()
+ }
+ }
+ }
+ // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some
+ // SIMD builtins require a constant value.
+ self.bitcast_if_needed(value, typ)
+ }
+}
+
+impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
+ fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+ // TODO(antoyo): implement a proper rvalue comparison in libgccjit instead of doing the
+ // following:
+ for (value, variable) in &*self.const_globals.borrow() {
+ if format!("{:?}", value) == format!("{:?}", cv) {
+ if let Some(global_variable) = self.global_lvalues.borrow().get(variable) {
+ let alignment = align.bits() as i32;
+ if alignment > global_variable.get_alignment() {
+ global_variable.set_alignment(alignment);
+ }
+ }
+ return *variable;
+ }
+ }
+ let global_value = self.static_addr_of_mut(cv, align, kind);
+ #[cfg(feature = "master")]
+ self.global_lvalues.borrow().get(&global_value)
+ .expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`")
+ .global_set_readonly();
+ self.const_globals.borrow_mut().insert(cv, global_value);
+ global_value
+ }
+
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ let value =
+ match codegen_static_initializer(&self, def_id) {
+ Ok((value, _)) => value,
+ // Error has already been reported
+ Err(_) => return,
+ };
+
+ let global = self.get_static(def_id);
+
+ // boolean SSA values are i1, but they have to be stored in i8 slots,
+ // otherwise some LLVM optimization passes don't work as expected
+ let val_llty = self.val_ty(value);
+ let value =
+ if val_llty == self.type_i1() {
+ unimplemented!();
+ }
+ else {
+ value
+ };
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+ // TODO(antoyo): set alignment.
+
+ let value = self.bitcast_if_needed(value, gcc_type);
+ global.global_set_initializer_rvalue(value);
+
+ // As an optimization, all shared statics which do not have interior
+ // mutability are placed into read-only memory.
+ if !is_mutable {
+ if self.type_is_freeze(ty) {
+ #[cfg(feature = "master")]
+ global.global_set_readonly();
+ }
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ // Do not allow LLVM to change the alignment of a TLS on macOS.
+ //
+ // By default a global's alignment can be freely increased.
+ // This allows LLVM to generate more performant instructions
+ // e.g., using load-aligned into a SIMD register.
+ //
+ // However, on macOS 10.10 or below, the dynamic linker does not
+ // respect any alignment given on the TLS (radar 24221680).
+ // This will violate the alignment assumption, and causing segfault at runtime.
+ //
+ // This bug is very easy to trigger. In `println!` and `panic!`,
+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+ // which the values would be `mem::replace`d on initialization.
+ // The implementation of `mem::replace` will use SIMD
+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+ // which macOS's dyld disregarded and causing crashes
+ // (see issues #51794, #51758, #50867, #48866 and #44056).
+ //
+ // To workaround the bug, we trick LLVM into not increasing
+ // the global's alignment by explicitly assigning a section to it
+ // (equivalent to automatically generating a `#[link_section]` attribute).
+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+ // of `lib/IR/Globals.cpp` for why this works.
+ //
+ // When the alignment is not increased, the optimized `mem::replace`
+ // will use load-unaligned instructions instead, and thus avoiding the crash.
+ //
+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
+ if self.tcx.sess.target.options.is_like_osx {
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state
+ // (not as part of the interpreter execution).
+ //
+ // FIXME: This check requires that the (arbitrary) value of undefined bytes
+ // happens to be zero. Instead, we should only check the value of defined bytes
+ // and set all undefined bytes to zero if this allocation is headed for the
+ // BSS.
+ unimplemented!();
+ }
+ }
+
+ // Wasm statics with custom link sections get special treatment as they
+ // go into custom sections of the wasm executable.
+ if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+ if let Some(_section) = attrs.link_section {
+ unimplemented!();
+ }
+ } else {
+ // TODO(antoyo): set link section.
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::USED) || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
+ self.add_used_global(global.to_rvalue());
+ }
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ fn add_used_global(&self, _global: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn add_compiler_used_global(&self, _global: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+ let global =
+ match kind {
+ Some(kind) if !self.tcx.sess.fewer_names() => {
+ let name = self.generate_local_symbol_name(kind);
+ // TODO(antoyo): check if it's okay that no link_section is set.
+
+ let typ = self.val_ty(cv).get_aligned(align.bytes());
+ let global = self.declare_private_global(&name[..], typ);
+ global
+ }
+ _ => {
+ let typ = self.val_ty(cv).get_aligned(align.bytes());
+ let global = self.declare_unnamed_global(typ);
+ global
+ },
+ };
+ global.global_set_initializer_rvalue(cv);
+ // TODO(antoyo): set unnamed address.
+ let rvalue = global.get_address(None);
+ self.global_lvalues.borrow_mut().insert(rvalue, global);
+ rvalue
+ }
+
+ pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> {
+ let instance = Instance::mono(self.tcx, def_id);
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ if let Some(&global) = self.instances.borrow().get(&instance) {
+ return global;
+ }
+
+ let defined_in_current_codegen_unit =
+ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+ assert!(
+ !defined_in_current_codegen_unit,
+ "consts::get_static() should always hit the cache for \
+ statics defined in the same CGU, but did not for `{:?}`",
+ def_id
+ );
+
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let sym = self.tcx.symbol_name(instance).name;
+
+ let global =
+ if let Some(def_id) = def_id.as_local() {
+ let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let llty = self.layout_of(ty).gcc_type(self, true);
+ // FIXME: refactor this to work without accessing the HIR
+ let global = match self.tcx.hir().get(id) {
+ Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => {
+ if let Some(global) = self.get_declared_value(&sym) {
+ if self.val_ty(global) != self.type_ptr_to(llty) {
+ span_bug!(span, "Conflicting types for static");
+ }
+ }
+
+ let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.declare_global(
+ &sym,
+ llty,
+ GlobalKind::Exported,
+ is_tls,
+ fn_attrs.link_section,
+ );
+
+ if !self.tcx.is_reachable_non_generic(def_id) {
+ // TODO(antoyo): set visibility.
+ }
+
+ global
+ }
+
+ Node::ForeignItem(&hir::ForeignItem {
+ span,
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ }) => {
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ check_and_apply_linkage(&self, &fn_attrs, ty, sym, span)
+ }
+
+ item => bug!("get_static: expected static, found {:?}", item),
+ };
+
+ global
+ }
+ else {
+ // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
+ //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
+
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let span = self.tcx.def_span(def_id);
+ let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+
+ let needs_dll_storage_attr = false; // TODO(antoyo)
+
+ // If this assertion triggers, there's something wrong with commandline
+ // argument validation.
+ debug_assert!(
+ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && self.tcx.sess.target.options.is_like_msvc
+ && self.tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ if needs_dll_storage_attr {
+ // This item is external but not foreign, i.e., it originates from an external Rust
+ // crate. Since we don't know whether this crate will be linked dynamically or
+ // statically in the final application, we always mark such symbols as 'dllimport'.
+ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+ // to make things work.
+ //
+ // However, in some scenarios we defer emission of statics to downstream
+ // crates, so there are cases where a static with an upstream DefId
+ // is actually present in the current crate. We can find out via the
+ // is_codegened_item query.
+ if !self.tcx.is_codegened_item(def_id) {
+ unimplemented!();
+ }
+ }
+ global
+ };
+
+ // TODO(antoyo): set dll storage class.
+
+ self.instances.borrow_mut().insert(instance, global);
+ global
+ }
+}
+
+pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> {
+ let alloc = alloc.inner();
+ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
+
+ let mut next_offset = 0;
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let offset = offset.bytes();
+ assert_eq!(offset as usize as u64, offset);
+ let offset = offset as usize;
+ if offset > next_offset {
+ // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // is within the bounds of the allocation, and it doesn't affect interpreter execution
+ // (we inspect the result after interpreter execution). Any undef byte is replaced with
+ // some arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
+ llvals.push(cx.const_bytes(bytes));
+ }
+ let ptr_offset =
+ read_target_uint( dl.endian,
+ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+ // affect interpreter execution (we inspect the result after interpreter execution),
+ // and we properly interpret the relocation as a relocation pointer offset.
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+ )
+ .expect("const_alloc_to_llvm: could not read relocation pointer")
+ as u64;
+ llvals.push(cx.scalar_to_backend(
+ InterpScalar::from_pointer(
+ interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
+ &cx.tcx,
+ ),
+ abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) },
+ cx.type_i8p(),
+ ));
+ next_offset = offset + pointer_size;
+ }
+ if alloc.len() >= next_offset {
+ let range = next_offset..alloc.len();
+ // This `inspect` is okay since we have check that it is after all relocations, it is
+ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+ // inspect the result after interpreter execution). Any undef byte is replaced with some
+ // arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ llvals.push(cx.const_bytes(bytes));
+ }
+
+ cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
+ let alloc = cx.tcx.eval_static_initializer(def_id)?;
+ Ok((const_alloc_to_gcc(cx, alloc), alloc))
+}
+
+fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> LValue<'gcc> {
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let llty = cx.layout_of(ty).gcc_type(cx, true);
+ if let Some(linkage) = attrs.linkage {
+ // If this is a static with a linkage specified, then we need to handle
+ // it a little specially. The typesystem prevents things like &T and
+ // extern "C" fn() from being non-null, so we can't just declare a
+ // static and call it a day. Some linkages (like weak) will make it such
+ // that the static actually has a null value.
+ let llty2 =
+ if let ty::RawPtr(ref mt) = ty.kind() {
+ cx.layout_of(mt.ty).gcc_type(cx, true)
+ }
+ else {
+ cx.sess().span_fatal(
+ span,
+ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+ )
+ };
+ // Declare a symbol `foo` with the desired linkage.
+ let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage));
+
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+ let mut real_name = "_rust_extern_with_linkage_".to_string();
+ real_name.push_str(&sym);
+ let global2 = cx.define_global(&real_name, llty, is_tls, attrs.link_section);
+ // TODO(antoyo): set linkage.
+ global2.global_set_initializer_rvalue(global1.get_address(None));
+ // TODO(antoyo): use global_set_initializer() when it will work.
+ global2
+ }
+ else {
+ // Generate an external declaration.
+ // FIXME(nagisa): investigate whether it can be changed into define_global
+
+ // Thread-local statics in some other crate need to *always* be linked
+ // against in a thread-local fashion, so we need to be sure to apply the
+ // thread-local attribute locally if it was present remotely. If we
+ // don't do this then linker errors can be generated where the linker
+ // complains that one object files has a thread local version of the
+ // symbol and another one doesn't.
+ cx.declare_global(&sym, llty, GlobalKind::Imported, is_tls, attrs.link_section)
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
new file mode 100644
index 000000000..478f6d893
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -0,0 +1,553 @@
+use std::cell::{Cell, RefCell};
+
+use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type};
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::{
+ BackendTypes,
+ MiscMethods,
+};
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::span_bug;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
+use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers};
+use rustc_session::Session;
+use rustc_span::Span;
+use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
+
+use crate::callee::get_fn;
+
+#[derive(Clone)]
+pub struct FuncSig<'gcc> {
+ pub params: Vec<Type<'gcc>>,
+ pub return_type: Type<'gcc>,
+}
+
+pub struct CodegenCx<'gcc, 'tcx> {
+ pub check_overflow: bool,
+ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+ pub context: &'gcc Context<'gcc>,
+
+ // TODO(bjorn3): Can this field be removed?
+ pub current_func: RefCell<Option<Function<'gcc>>>,
+ pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
+
+ pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
+ pub intrinsics: RefCell<FxHashMap<String, Function<'gcc>>>,
+
+ pub tls_model: gccjit::TlsModel,
+
+ pub bool_type: Type<'gcc>,
+ pub i8_type: Type<'gcc>,
+ pub i16_type: Type<'gcc>,
+ pub i32_type: Type<'gcc>,
+ pub i64_type: Type<'gcc>,
+ pub i128_type: Type<'gcc>,
+ pub isize_type: Type<'gcc>,
+
+ pub u8_type: Type<'gcc>,
+ pub u16_type: Type<'gcc>,
+ pub u32_type: Type<'gcc>,
+ pub u64_type: Type<'gcc>,
+ pub u128_type: Type<'gcc>,
+ pub usize_type: Type<'gcc>,
+
+ pub char_type: Type<'gcc>,
+ pub uchar_type: Type<'gcc>,
+ pub short_type: Type<'gcc>,
+ pub ushort_type: Type<'gcc>,
+ pub int_type: Type<'gcc>,
+ pub uint_type: Type<'gcc>,
+ pub long_type: Type<'gcc>,
+ pub ulong_type: Type<'gcc>,
+ pub longlong_type: Type<'gcc>,
+ pub ulonglong_type: Type<'gcc>,
+ pub sizet_type: Type<'gcc>,
+
+ pub supports_128bit_integers: bool,
+
+ pub float_type: Type<'gcc>,
+ pub double_type: Type<'gcc>,
+
+ pub linkage: Cell<FunctionType>,
+ pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
+ pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
+ pub tcx: TyCtxt<'tcx>,
+
+ pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
+
+ pub types_with_fields_to_set: RefCell<FxHashMap<Type<'gcc>, (Struct<'gcc>, TyAndLayout<'tcx>)>>,
+
+ /// Cache instances of monomorphic and polymorphic items
+ pub instances: RefCell<FxHashMap<Instance<'tcx>, LValue<'gcc>>>,
+ /// Cache function instances of monomorphic and polymorphic items
+ pub function_instances: RefCell<FxHashMap<Instance<'tcx>, RValue<'gcc>>>,
+ /// Cache generated vtables
+ pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+
+ // TODO(antoyo): improve the SSA API to not require those.
+ // Mapping from function pointer type to indexes of on stack parameters.
+ pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>,
+ // Mapping from function to indexes of on stack parameters.
+ pub on_stack_function_params: RefCell<FxHashMap<Function<'gcc>, FxHashSet<usize>>>,
+
+ /// Cache of emitted const globals (value -> global)
+ pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
+
+ /// Map from the address of a global variable (rvalue) to the global variable itself (lvalue).
+ /// TODO(antoyo): remove when the rustc API is fixed.
+ pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>,
+
+ /// Cache of constant strings,
+ pub const_str_cache: RefCell<FxHashMap<String, LValue<'gcc>>>,
+
+ /// Cache of globals.
+ pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
+
+ /// A counter that is used for generating local symbol names
+ local_gen_sym_counter: Cell<usize>,
+
+ eh_personality: Cell<Option<RValue<'gcc>>>,
+
+ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+
+ /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
+ /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
+ /// As such, this set remembers which of these pointers were returned by this function so that
+ /// they can be dereferenced later.
+ /// FIXME(antoyo): fix the rustc API to avoid having this hack.
+ pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
+ let check_overflow = tcx.sess.overflow_checks();
+
+ let i8_type = context.new_c_type(CType::Int8t);
+ let i16_type = context.new_c_type(CType::Int16t);
+ let i32_type = context.new_c_type(CType::Int32t);
+ let i64_type = context.new_c_type(CType::Int64t);
+ let u8_type = context.new_c_type(CType::UInt8t);
+ let u16_type = context.new_c_type(CType::UInt16t);
+ let u32_type = context.new_c_type(CType::UInt32t);
+ let u64_type = context.new_c_type(CType::UInt64t);
+
+ let (i128_type, u128_type) =
+ if supports_128bit_integers {
+ let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ (i128_type, u128_type)
+ }
+ else {
+ let i128_type = context.new_array_type(None, i64_type, 2);
+ let u128_type = context.new_array_type(None, u64_type, 2);
+ (i128_type, u128_type)
+ };
+
+ let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
+
+ let float_type = context.new_type::<f32>();
+ let double_type = context.new_type::<f64>();
+
+ let char_type = context.new_c_type(CType::Char);
+ let uchar_type = context.new_c_type(CType::UChar);
+ let short_type = context.new_c_type(CType::Short);
+ let ushort_type = context.new_c_type(CType::UShort);
+ let int_type = context.new_c_type(CType::Int);
+ let uint_type = context.new_c_type(CType::UInt);
+ let long_type = context.new_c_type(CType::Long);
+ let ulong_type = context.new_c_type(CType::ULong);
+ let longlong_type = context.new_c_type(CType::LongLong);
+ let ulonglong_type = context.new_c_type(CType::ULongLong);
+ let sizet_type = context.new_c_type(CType::SizeT);
+
+ let isize_type = context.new_c_type(CType::LongLong);
+ let usize_type = context.new_c_type(CType::ULongLong);
+ let bool_type = context.new_type::<bool>();
+
+ // TODO(antoyo): only have those assertions on x86_64.
+ assert_eq!(isize_type.get_size(), i64_type.get_size());
+ assert_eq!(usize_type.get_size(), u64_type.get_size());
+
+ let mut functions = FxHashMap::default();
+ let builtins = [
+ "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
+ "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
+ "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
+ "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
+ "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
+ "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
+ "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
+ "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
+ "__builtin_expect_with_probability",
+ ];
+
+ for builtin in builtins.iter() {
+ functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
+ }
+
+ Self {
+ check_overflow,
+ codegen_unit,
+ context,
+ current_func: RefCell::new(None),
+ normal_function_addresses: Default::default(),
+ functions: RefCell::new(functions),
+ intrinsics: RefCell::new(FxHashMap::default()),
+
+ tls_model,
+
+ bool_type,
+ i8_type,
+ i16_type,
+ i32_type,
+ i64_type,
+ i128_type,
+ isize_type,
+ usize_type,
+ u8_type,
+ u16_type,
+ u32_type,
+ u64_type,
+ u128_type,
+ char_type,
+ uchar_type,
+ short_type,
+ ushort_type,
+ int_type,
+ uint_type,
+ long_type,
+ ulong_type,
+ longlong_type,
+ ulonglong_type,
+ sizet_type,
+
+ supports_128bit_integers,
+
+ float_type,
+ double_type,
+
+ linkage: Cell::new(FunctionType::Internal),
+ instances: Default::default(),
+ function_instances: Default::default(),
+ on_stack_params: Default::default(),
+ on_stack_function_params: Default::default(),
+ vtables: Default::default(),
+ const_globals: Default::default(),
+ global_lvalues: Default::default(),
+ const_str_cache: Default::default(),
+ globals: Default::default(),
+ scalar_types: Default::default(),
+ types: Default::default(),
+ tcx,
+ struct_types: Default::default(),
+ types_with_fields_to_set: Default::default(),
+ local_gen_sym_counter: Cell::new(0),
+ eh_personality: Cell::new(None),
+ pointee_infos: Default::default(),
+ structs_as_pointer: Default::default(),
+ }
+ }
+
+ pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
+ let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
+ debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(),
+ "{:?} ({:?}) is not a function", value, value.get_type());
+ function
+ }
+
+ pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool {
+ let types = [
+ self.u8_type,
+ self.u16_type,
+ self.u32_type,
+ self.u64_type,
+ self.i8_type,
+ self.i16_type,
+ self.i32_type,
+ self.i64_type,
+ ];
+
+ for native_type in types {
+ if native_type.is_compatible_with(typ) {
+ return true;
+ }
+ }
+
+ self.supports_128bit_integers &&
+ (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+ }
+
+ pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
+ !self.supports_128bit_integers &&
+ (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+ }
+
+ pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+ self.is_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
+ }
+
+ pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+ self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
+ }
+
+ pub fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ pub fn bitcast_if_needed(&self, value: RValue<'gcc>, expected_type: Type<'gcc>) -> RValue<'gcc> {
+ if value.get_type() != expected_type {
+ self.context.new_bitcast(None, value, expected_type)
+ }
+ else {
+ value
+ }
+ }
+}
+
+impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
+ type Value = RValue<'gcc>;
+ type Function = RValue<'gcc>;
+
+ type BasicBlock = Block<'gcc>;
+ type Type = Type<'gcc>;
+ type Funclet = (); // TODO(antoyo)
+
+ type DIScope = (); // TODO(antoyo)
+ type DILocation = (); // TODO(antoyo)
+ type DIVariable = (); // TODO(antoyo)
+}
+
+impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
+ &self.vtables
+ }
+
+ fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let func = get_fn(self, instance);
+ *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func));
+ func
+ }
+
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let func_name = self.tcx.symbol_name(instance).name;
+
+ let func =
+ if self.intrinsics.borrow().contains_key(func_name) {
+ self.intrinsics.borrow()[func_name].clone()
+ }
+ else {
+ let func = get_fn(self, instance);
+ self.rvalue_as_function(func)
+ };
+ let ptr = func.get_address(None);
+
+ // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
+ // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
+
+ self.normal_function_addresses.borrow_mut().insert(ptr);
+
+ ptr
+ }
+
+ fn eh_personality(&self) -> RValue<'gcc> {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to codegen that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ if let Some(llpersonality) = self.eh_personality.get() {
+ return llpersonality;
+ }
+ let tcx = self.tcx;
+ let llfn = match tcx.lang_items().eh_personality() {
+ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ ty::Instance::resolve(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ tcx.intern_substs(&[]),
+ )
+ .unwrap().unwrap(),
+ ),
+ _ => {
+ let _name = if wants_msvc_seh(self.sess()) {
+ "__CxxFrameHandler3"
+ } else {
+ "rust_eh_personality"
+ };
+ //let func = self.declare_func(name, self.type_i32(), &[], true);
+ // FIXME(antoyo): this hack should not be needed. That will probably be removed when
+ // unwinding support is added.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+ };
+ // TODO(antoyo): apply target cpu attributes.
+ self.eh_personality.set(Some(llfn));
+ llfn
+ }
+
+ fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ fn check_overflow(&self) -> bool {
+ self.check_overflow
+ }
+
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+ self.codegen_unit
+ }
+
+ fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+ unimplemented!();
+ }
+
+ fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn create_used_variable(&self) {
+ unimplemented!();
+ }
+
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+ if self.get_declared_value("main").is_none() {
+ Some(self.declare_cfn("main", fn_type))
+ }
+ else {
+ // If the symbol already exists, it is an error: for example, the user wrote
+ // #[no_mangle] extern "C" fn main(..) {..}
+ // instead of #[start]
+ None
+ }
+ }
+
+ fn compiler_used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+ unimplemented!()
+ }
+
+ fn create_compiler_used_variable(&self) {
+ unimplemented!()
+ }
+}
+
+impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let LayoutError::SizeOverflow(_) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
+ /// Generates a new symbol name with the given prefix. This symbol name must
+ /// only be used for definitions with `internal` or `private` linkage.
+ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+ let idx = self.local_gen_sym_counter.get();
+ self.local_gen_sym_counter.set(idx + 1);
+ // Include a '.' character, so there can be no accidental conflicts with
+ // user defined names
+ let mut name = String::with_capacity(prefix.len() + 6);
+ name.push_str(prefix);
+ name.push_str(".");
+ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+ name
+ }
+}
+
+fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
+ match tls_model {
+ TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
+ TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
+ TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
+ TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/coverageinfo.rs b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
new file mode 100644
index 000000000..872fc2472
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
@@ -0,0 +1,69 @@
+use gccjit::RValue;
+use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::{
+ CodeRegion,
+ CounterValueReference,
+ ExpressionOperandId,
+ InjectedExpressionId,
+ Op,
+};
+use rustc_middle::ty::Instance;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn set_function_source_hash(
+ &mut self,
+ _instance: Instance<'tcx>,
+ _function_source_hash: u64,
+ ) -> bool {
+ unimplemented!();
+ }
+
+ fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
+ // TODO(antoyo)
+ false
+ }
+
+ fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
+ // TODO(antoyo)
+ false
+ }
+
+ fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
+ // TODO(antoyo)
+ false
+ }
+}
+
+impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn coverageinfo_finalize(&self) {
+ // TODO(antoyo)
+ }
+
+ fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ /// Functions with MIR-based coverage are normally codegenned _only_ if
+ /// called. LLVM coverage tools typically expect every function to be
+ /// defined (even if unused), with at least one call to LLVM intrinsic
+ /// `instrprof.increment`.
+ ///
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented.
+ ///
+ /// For used/called functions, the coverageinfo was already added to the
+ /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+ /// But in this case, since the unused function was _not_ previously
+ /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+ /// them. The first `CodeRegion` is used to add a single counter, with the
+ /// same counter ID used in the injected `instrprof.increment` intrinsic
+ /// call. Since the function is never called, all other `CodeRegion`s can be
+ /// added as `unreachable_region`s.
+ fn define_unused_fn(&self, _def_id: DefId) {
+ unimplemented!();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
new file mode 100644
index 000000000..266759ed6
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -0,0 +1,62 @@
+use gccjit::RValue;
+use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods};
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::Size;
+use rustc_target::abi::call::FnAbi;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
+ unimplemented!();
+ }
+
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+ // TODO(antoyo): insert reference to gdb debug scripts section global.
+ }
+
+ fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
+ unimplemented!();
+ }
+
+ fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
+ unimplemented!();
+ }
+}
+
+impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn create_vtable_debuginfo(&self, _ty: Ty<'tcx>, _trait_ref: Option<PolyExistentialTraitRef<'tcx>>, _vtable: Self::Value) {
+ // TODO(antoyo)
+ }
+
+ fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
+ // TODO(antoyo)
+ None
+ }
+
+ fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope {
+ unimplemented!();
+ }
+
+ fn debuginfo_finalize(&self) {
+ // TODO(antoyo)
+ }
+
+ fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
+ unimplemented!();
+ }
+
+ fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
+ unimplemented!();
+ }
+
+ fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
+ unimplemented!();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
new file mode 100644
index 000000000..a619e2f77
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/declare.rs
@@ -0,0 +1,145 @@
+use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::Ty;
+use rustc_span::Symbol;
+use rustc_target::abi::call::FnAbi;
+
+use crate::abi::FnAbiGccExt;
+use crate::context::CodegenCx;
+use crate::intrinsic::llvm;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ if self.globals.borrow().contains_key(name) {
+ let typ = self.globals.borrow()[name].get_type();
+ let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
+ if is_tls {
+ global.set_tls_model(self.tls_model);
+ }
+ if let Some(link_section) = link_section {
+ global.set_link_section(link_section.as_str());
+ }
+ global
+ }
+ else {
+ self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
+ }
+ }
+
+ pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
+ let name = self.generate_local_symbol_name("global");
+ self.context.new_global(None, GlobalKind::Internal, ty, &name)
+ }
+
+ pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
+ let global = self.context.new_global(None, linkage, ty, name);
+ let global_address = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global_address);
+ global
+ }
+
+ /*pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
+ self.linkage.set(FunctionType::Exported);
+ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }*/
+
+ pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ let global = self.context.new_global(None, global_kind, ty, name);
+ if is_tls {
+ global.set_tls_model(self.tls_model);
+ }
+ if let Some(link_section) = link_section {
+ global.set_link_section(link_section.as_str());
+ }
+ let global_address = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global_address);
+ global
+ }
+
+ pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> {
+ let global = self.context.new_global(None, GlobalKind::Internal, ty, name);
+ let global_address = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global_address);
+ global
+ }
+
+ pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use the fn_type parameter.
+ let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
+ let return_type = self.type_i32();
+ let variadic = false;
+ self.linkage.set(FunctionType::Exported);
+ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic);
+ // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
+ // for the main function.
+ *self.current_func.borrow_mut() = Some(func);
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
+ let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self);
+ let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &params, variadic);
+ self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ self.get_or_insert_global(name, ty, is_tls, link_section)
+ }
+
+ pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
+ // TODO(antoyo): use a different field than globals, because this seems to return a function?
+ self.globals.borrow().get(name).cloned()
+ }
+}
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
+ if name.starts_with("llvm.") {
+ let intrinsic = llvm::intrinsic(name, cx);
+ cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic);
+ return intrinsic;
+ }
+ let func =
+ if cx.functions.borrow().contains_key(name) {
+ cx.functions.borrow()[name]
+ }
+ else {
+ let params: Vec<_> = param_types.into_iter().enumerate()
+ .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
+ .collect();
+ let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
+ cx.functions.borrow_mut().insert(name.to_string(), func);
+ func
+ };
+
+ // TODO(antoyo): set function calling convention.
+ // TODO(antoyo): set unnamed address.
+ // TODO(antoyo): set no red zone function attribute.
+ // TODO(antoyo): set attributes for optimisation.
+ // TODO(antoyo): set attributes for non lazy bind.
+
+ // FIXME(antoyo): invalid cast.
+ func
+}
+
+// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
+// Unsupported characters: `$` and `.`.
+pub fn mangle_name(name: &str) -> String {
+ name.replace(|char: char| {
+ if !char.is_alphanumeric() && char != '_' {
+ debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char);
+ true
+ }
+ else {
+ false
+ }
+ }, "_")
+}
diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs
new file mode 100644
index 000000000..0c5dab004
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/int.rs
@@ -0,0 +1,742 @@
+//! Module to handle integer operations.
+//! This module exists because some integer types are not supported on some gcc platforms, e.g.
+//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
+
+use std::convert::TryFrom;
+
+use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
+use rustc_middle::ty::Ty;
+
+use crate::builder::ToGccComp;
+use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit unsigned %: __umodti3
+ self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
+ }
+
+ pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit signed %: __modti3
+ self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
+ }
+
+ pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let typ = a.get_type();
+ if self.is_native_int_type_or_bool(typ) {
+ let operation =
+ if typ.is_bool() {
+ UnaryOp::LogicalNegate
+ }
+ else {
+ UnaryOp::BitwiseNegate
+ };
+ self.cx.context.new_unary_op(None, operation, typ, a)
+ }
+ else {
+ // TODO(antoyo): use __negdi2 and __negti2 instead?
+ let element_type = typ.dyncast_array().expect("element type");
+ let values = [
+ self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
+ self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
+ ];
+ self.cx.context.new_array_constructor(None, typ, &values)
+ }
+ }
+
+ pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ if self.is_native_int_type(a_type) {
+ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ }
+ else {
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
+ self.context.new_call(None, func, &[a])
+ }
+ }
+
+ pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
+ }
+
+ pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type(a_type);
+ let b_native = self.is_native_int_type(b_type);
+ if a_native && b_native {
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
+ // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
+ if a_type.is_signed(self) != b_type.is_signed(self) {
+ let b = self.context.new_cast(None, b, a_type);
+ a >> b
+ }
+ else {
+ a >> b
+ }
+ }
+ else if a_native && !b_native {
+ self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
+ }
+ else {
+ // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
+ // significant half of the number) which uses lshr.
+
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+
+ let func = self.current_func();
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ let b0_block = func.new_block("b0");
+ let actual_else_block = func.new_block("actual_else");
+
+ let result = func.new_local(None, a_type, "shiftResult");
+
+ let sixty_four = self.gcc_int(native_int_type, 64);
+ let sixty_three = self.gcc_int(native_int_type, 63);
+ let zero = self.gcc_zero(native_int_type);
+ let b = self.gcc_int_cast(b, native_int_type);
+ let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+ self.llbb().end_with_conditional(None, condition, then_block, else_block);
+
+ // TODO(antoyo): take endianness into account.
+ let shift_value = self.gcc_sub(b, sixty_four);
+ let high = self.high(a);
+ let sign =
+ if a_type.is_signed(self) {
+ high >> sixty_three
+ }
+ else {
+ zero
+ };
+ let values = [
+ high >> shift_value,
+ sign,
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ then_block.add_assignment(None, result, array_value);
+ then_block.end_with_jump(None, after_block);
+
+ let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+ else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+
+ b0_block.add_assignment(None, result, a);
+ b0_block.end_with_jump(None, after_block);
+
+ let shift_value = self.gcc_sub(sixty_four, b);
+ // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+ let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
+ let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
+ let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
+ let values = [
+ (high << shift_value) | shifted_low,
+ high >> b,
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ actual_else_block.add_assignment(None, result, array_value);
+ actual_else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ }
+
+ fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ if a_type != b_type {
+ if a_type.is_vector() {
+ // Vector types need to be bitcast.
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ b = self.context.new_bitcast(None, b, a.get_type());
+ }
+ else {
+ b = self.context.new_cast(None, b, a.get_type());
+ }
+ }
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ let signed = a_type.is_compatible_with(self.i128_type);
+ let func_name =
+ match (operation, signed) {
+ (BinaryOp::Plus, true) => "__rust_i128_add",
+ (BinaryOp::Plus, false) => "__rust_u128_add",
+ (BinaryOp::Minus, true) => "__rust_i128_sub",
+ (BinaryOp::Minus, false) => "__rust_u128_sub",
+ _ => unreachable!("unexpected additive operation {:?}", operation),
+ };
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[a, b])
+ }
+ }
+
+ pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.additive_operation(BinaryOp::Plus, a, b)
+ }
+
+ pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
+ }
+
+ pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.additive_operation(BinaryOp::Minus, a, b)
+ }
+
+ fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "u"
+ };
+ let func_name = format!("__{}{}ti3", sign, operation_name);
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[a, b])
+ }
+ }
+
+ pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check if the types are signed?
+ // 128-bit, signed: __divti3
+ // TODO(antoyo): convert the arguments to signed?
+ self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
+ }
+
+ pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit, unsigned: __udivti3
+ self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
+ }
+
+ pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
+ use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
+
+ let new_kind =
+ match typ.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
+ // TODO(antoyo): remove duplication with intrinsic?
+ let name =
+ if self.is_native_int_type(lhs.get_type()) {
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I8) => "__builtin_add_overflow",
+ Int(I16) => "__builtin_add_overflow",
+ Int(I32) => "__builtin_sadd_overflow",
+ Int(I64) => "__builtin_saddll_overflow",
+ Int(I128) => "__builtin_add_overflow",
+
+ Uint(U8) => "__builtin_add_overflow",
+ Uint(U16) => "__builtin_add_overflow",
+ Uint(U32) => "__builtin_uadd_overflow",
+ Uint(U64) => "__builtin_uaddll_overflow",
+ Uint(U128) => "__builtin_add_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I8) => "__builtin_sub_overflow",
+ Int(I16) => "__builtin_sub_overflow",
+ Int(I32) => "__builtin_ssub_overflow",
+ Int(I64) => "__builtin_ssubll_overflow",
+ Int(I128) => "__builtin_sub_overflow",
+
+ Uint(U8) => "__builtin_sub_overflow",
+ Uint(U16) => "__builtin_sub_overflow",
+ Uint(U32) => "__builtin_usub_overflow",
+ Uint(U64) => "__builtin_usubll_overflow",
+ Uint(U128) => "__builtin_sub_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I8) => "__builtin_mul_overflow",
+ Int(I16) => "__builtin_mul_overflow",
+ Int(I32) => "__builtin_smul_overflow",
+ Int(I64) => "__builtin_smulll_overflow",
+ Int(I128) => "__builtin_mul_overflow",
+
+ Uint(U8) => "__builtin_mul_overflow",
+ Uint(U16) => "__builtin_mul_overflow",
+ Uint(U32) => "__builtin_umul_overflow",
+ Uint(U64) => "__builtin_umulll_overflow",
+ Uint(U128) => "__builtin_mul_overflow",
+
+ _ => unreachable!(),
+ },
+ }
+ }
+ else {
+ match new_kind {
+ Int(I128) | Uint(U128) => {
+ let func_name =
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I128) => "__rust_i128_addo",
+ Uint(U128) => "__rust_u128_addo",
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I128) => "__rust_i128_subo",
+ Uint(U128) => "__rust_u128_subo",
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
+ Uint(U128) => "__rust_u128_mulo",
+ _ => unreachable!(),
+ },
+ };
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let result_field = self.context.new_field(None, a_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ return (int_result, overflow);
+ },
+ _ => {
+ match oop {
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I32) => "__mulosi4",
+ Int(I64) => "__mulodi4",
+ _ => unreachable!(),
+ },
+ _ => unimplemented!("overflow operation for {:?}", new_kind),
+ }
+ }
+ }
+ };
+
+ let intrinsic = self.context.get_builtin_function(&name);
+ let res = self.current_func()
+ // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
+ .new_local(None, rhs.get_type(), "binopResult")
+ .get_address(None);
+ let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
+ (res.dereference(None).to_rvalue(), overflow)
+ }
+
+ pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
+ let signed = a_type.is_compatible_with(self.i128_type);
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "u"
+ };
+ let func_name = format!("__{}cmpti2", sign);
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
+ let cmp = self.context.new_call(None, func, &[lhs, rhs]);
+ let (op, limit) =
+ match op {
+ IntPredicate::IntEQ => {
+ return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
+ },
+ IntPredicate::IntNE => {
+ return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
+ },
+ IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
+ IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
+ IntPredicate::IntULT => (ComparisonOp::Equals, 0),
+ IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
+ IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
+ IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
+ IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
+ IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
+ };
+ self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
+ }
+ else {
+ let left_type = lhs.get_type();
+ let right_type = rhs.get_type();
+ if left_type != right_type {
+ // NOTE: because libgccjit cannot compare function pointers.
+ if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
+ lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
+ rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
+ }
+ // NOTE: hack because we try to cast a vector type to the same vector type.
+ else if format!("{:?}", left_type) != format!("{:?}", right_type) {
+ rhs = self.context.new_cast(None, rhs, left_type);
+ }
+ }
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+ }
+
+ pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ a ^ b
+ }
+ else {
+ let values = [
+ self.low(a) ^ self.low(b),
+ self.high(a) ^ self.high(b),
+ ];
+ self.context.new_array_constructor(None, a_type, &values)
+ }
+ }
+
+ pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type(a_type);
+ let b_native = self.is_native_int_type(b_type);
+ if a_native && b_native {
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ if a_type.is_unsigned(self) && b_type.is_signed(self) {
+ let a = self.context.new_cast(None, a, b_type);
+ let result = a << b;
+ self.context.new_cast(None, result, a_type)
+ }
+ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+ let b = self.context.new_cast(None, b, a_type);
+ a << b
+ }
+ else {
+ a << b
+ }
+ }
+ else if a_native && !b_native {
+ self.gcc_shl(a, self.gcc_int_cast(b, a_type))
+ }
+ else {
+ // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+
+ let func = self.current_func();
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ let b0_block = func.new_block("b0");
+ let actual_else_block = func.new_block("actual_else");
+
+ let result = func.new_local(None, a_type, "shiftResult");
+
+ let b = self.gcc_int_cast(b, native_int_type);
+ let sixty_four = self.gcc_int(native_int_type, 64);
+ let zero = self.gcc_zero(native_int_type);
+ let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+ self.llbb().end_with_conditional(None, condition, then_block, else_block);
+
+ // TODO(antoyo): take endianness into account.
+ let values = [
+ zero,
+ self.low(a) << (b - sixty_four),
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ then_block.add_assignment(None, result, array_value);
+ then_block.end_with_jump(None, after_block);
+
+ let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+ else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+
+ b0_block.add_assignment(None, result, a);
+ b0_block.end_with_jump(None, after_block);
+
+ // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+ let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
+ let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
+ let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
+ let values = [
+ self.low(a) << b,
+ (self.high(a) << b) | high_low,
+ ];
+
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ actual_else_block.add_assignment(None, result, array_value);
+ actual_else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ }
+
+ pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let arg_type = arg.get_type();
+ if !self.is_native_int_type(arg_type) {
+ let native_int_type = arg_type.dyncast_array().expect("get element type");
+ let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
+ let swapped_lsb = self.gcc_bswap(lsb, width / 2);
+ let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
+ let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
+ let swapped_msb = self.gcc_bswap(msb, width / 2);
+ let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
+
+ // NOTE: we also need to swap the two elements here, in addition to swapping inside
+ // the elements themselves like done above.
+ return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
+ }
+
+ // TODO(antoyo): check if it's faster to use string literals and a
+ // match instead of format!.
+ let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
+ // FIXME(antoyo): this cast should not be necessary. Remove
+ // when having proper sized integer types.
+ let param_type = bswap.get_param(0).to_rvalue().get_type();
+ if param_type != arg_type {
+ arg = self.bitcast(arg, param_type);
+ }
+ self.cx.context.new_call(None, bswap, &[arg])
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
+ }
+ else {
+ // NOTE: set the sign in high.
+ self.from_low_high(typ, int, -(int.is_negative() as i64))
+ }
+ }
+
+ pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+ }
+ else {
+ self.from_low_high(typ, int as i64, 0)
+ }
+ }
+
+ pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+ let low = num as u64;
+ let high = (num >> 64) as u64;
+ if num >> 64 != 0 {
+ // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
+ if self.is_native_int_type(typ) {
+ let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
+ let high = self.context.new_rvalue_from_long(typ, high as i64);
+
+ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+ let shift = high << sixty_four;
+ shift | self.context.new_cast(None, low, typ)
+ }
+ else {
+ self.from_low_high(typ, low as i64, high as i64)
+ }
+ }
+ else if typ.is_i128(self) {
+ let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+ self.gcc_int_cast(num, typ)
+ }
+ else {
+ self.gcc_uint(typ, num as u64)
+ }
+ }
+
+ pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_zero(typ)
+ }
+ else {
+ self.from_low_high(typ, 0, 0)
+ }
+ }
+
+ pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
+ if self.is_native_int_type_or_bool(typ) {
+ typ.get_size() as u64 * 8
+ }
+ else {
+ // NOTE: the only unsupported types are u128 and i128.
+ 128
+ }
+ }
+
+ fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type_or_bool(a_type);
+ let b_native = self.is_native_int_type_or_bool(b_type);
+ if a_type.is_vector() && b_type.is_vector() {
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else if a_native && b_native {
+ if a_type != b_type {
+ b = self.context.new_cast(None, b, a_type);
+ }
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+ let values = [
+ self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
+ self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
+ ];
+ self.context.new_array_constructor(None, a_type, &values)
+ }
+ }
+
+ pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
+ }
+
+ // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
+ pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
+ self.context.new_cast(None, value, dest_typ)
+ }
+ else if self.is_native_int_type_or_bool(dest_typ) {
+ self.context.new_cast(None, self.low(value), dest_typ)
+ }
+ else if self.is_native_int_type_or_bool(value_type) {
+ let dest_element_type = dest_typ.dyncast_array().expect("get element type");
+
+ // NOTE: set the sign of the value.
+ let zero = self.context.new_rvalue_zero(value_type);
+ let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
+ let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
+ let values = [
+ self.context.new_cast(None, value, dest_element_type),
+ self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
+ ];
+ self.context.new_array_constructor(None, dest_typ, &values)
+ }
+ else {
+ // Since u128 and i128 are the only types that can be unsupported, we know the type of
+ // value and the destination type have the same size, so a bitcast is fine.
+
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ self.context.new_bitcast(None, value, dest_typ)
+ }
+ }
+
+ fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(value_type) {
+ return self.context.new_cast(None, value, dest_typ);
+ }
+
+ let name_suffix =
+ match self.type_kind(dest_typ) {
+ TypeKind::Float => "tisf",
+ TypeKind::Double => "tidf",
+ kind => panic!("cannot cast a non-native integer to type {:?}", kind),
+ };
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "un"
+ };
+ let func_name = format!("__float{}{}", sign, name_suffix);
+ let param = self.context.new_parameter(None, value_type, "n");
+ let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+ self.context.new_call(None, func, &[value])
+ }
+
+ pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.int_to_float_cast(true, value, dest_typ)
+ }
+
+ pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.int_to_float_cast(false, value, dest_typ)
+ }
+
+ fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(dest_typ) {
+ return self.context.new_cast(None, value, dest_typ);
+ }
+
+ let name_suffix =
+ match self.type_kind(value_type) {
+ TypeKind::Float => "sfti",
+ TypeKind::Double => "dfti",
+ kind => panic!("cannot cast a {:?} to non-native integer", kind),
+ };
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "uns"
+ };
+ let func_name = format!("__fix{}{}", sign, name_suffix);
+ let param = self.context.new_parameter(None, value_type, "n");
+ let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+ self.context.new_call(None, func, &[value])
+ }
+
+ pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.float_to_int_cast(true, value, dest_typ)
+ }
+
+ pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.float_to_int_cast(false, value, dest_typ)
+ }
+
+ fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
+ .to_rvalue()
+ }
+
+ fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
+ .to_rvalue()
+ }
+
+ fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
+ let native_int_type = typ.dyncast_array().expect("get element type");
+ let values = [
+ self.context.new_rvalue_from_long(native_int_type, low),
+ self.context.new_rvalue_from_long(native_int_type, high),
+ ];
+ self.context.new_array_constructor(None, typ, &values)
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
new file mode 100644
index 000000000..fb6c38fa0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
@@ -0,0 +1,5722 @@
+// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py`
+// DO NOT EDIT IT!
+match name {
+ // AMDGPU
+ "llvm.AMDGPU.div.fixup.f32" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fixup.f64" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fixup.v2f64" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fixup.v4f32" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fmas.f32" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.div.fmas.f64" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.div.fmas.v2f64" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.div.fmas.v4f32" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.ldexp.f32" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.ldexp.f64" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.ldexp.v2f64" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.ldexp.v4f32" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.rcp.f32" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rcp.f64" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rcp.v2f64" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rcp.v4f32" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rsq.clamped.f32" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.clamped.f64" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.clamped.v2f64" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.clamped.v4f32" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.f32" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.rsq.f64" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.rsq.v2f64" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.rsq.v4f32" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.trig.preop.f32" => "__builtin_amdgpu_trig_preop",
+ "llvm.AMDGPU.trig.preop.f64" => "__builtin_amdgpu_trig_preop",
+ "llvm.AMDGPU.trig.preop.v2f64" => "__builtin_amdgpu_trig_preop",
+ "llvm.AMDGPU.trig.preop.v4f32" => "__builtin_amdgpu_trig_preop",
+ // aarch64
+ "llvm.aarch64.dmb" => "__builtin_arm_dmb",
+ "llvm.aarch64.dsb" => "__builtin_arm_dsb",
+ "llvm.aarch64.isb" => "__builtin_arm_isb",
+ "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8",
+ "llvm.aarch64.sve.aese" => "__builtin_sve_svaese_u8",
+ "llvm.aarch64.sve.aesimc" => "__builtin_sve_svaesimc_u8",
+ "llvm.aarch64.sve.aesmc" => "__builtin_sve_svaesmc_u8",
+ "llvm.aarch64.sve.rax1" => "__builtin_sve_svrax1_u64",
+ "llvm.aarch64.sve.rdffr" => "__builtin_sve_svrdffr",
+ "llvm.aarch64.sve.rdffr.z" => "__builtin_sve_svrdffr_z",
+ "llvm.aarch64.sve.setffr" => "__builtin_sve_svsetffr",
+ "llvm.aarch64.sve.sm4e" => "__builtin_sve_svsm4e_u32",
+ "llvm.aarch64.sve.sm4ekey" => "__builtin_sve_svsm4ekey_u32",
+ "llvm.aarch64.sve.wrffr" => "__builtin_sve_svwrffr",
+ "llvm.aarch64.tcancel" => "__builtin_arm_tcancel",
+ "llvm.aarch64.tcommit" => "__builtin_arm_tcommit",
+ "llvm.aarch64.tstart" => "__builtin_arm_tstart",
+ "llvm.aarch64.ttest" => "__builtin_arm_ttest",
+ // amdgcn
+ "llvm.amdgcn.alignbyte" => "__builtin_amdgcn_alignbyte",
+ "llvm.amdgcn.buffer.wbinvl1" => "__builtin_amdgcn_buffer_wbinvl1",
+ "llvm.amdgcn.buffer.wbinvl1.sc" => "__builtin_amdgcn_buffer_wbinvl1_sc",
+ "llvm.amdgcn.buffer.wbinvl1.vol" => "__builtin_amdgcn_buffer_wbinvl1_vol",
+ "llvm.amdgcn.cubeid" => "__builtin_amdgcn_cubeid",
+ "llvm.amdgcn.cubema" => "__builtin_amdgcn_cubema",
+ "llvm.amdgcn.cubesc" => "__builtin_amdgcn_cubesc",
+ "llvm.amdgcn.cubetc" => "__builtin_amdgcn_cubetc",
+ "llvm.amdgcn.cvt.pk.i16" => "__builtin_amdgcn_cvt_pk_i16",
+ "llvm.amdgcn.cvt.pk.u16" => "__builtin_amdgcn_cvt_pk_u16",
+ "llvm.amdgcn.cvt.pk.u8.f32" => "__builtin_amdgcn_cvt_pk_u8_f32",
+ "llvm.amdgcn.cvt.pknorm.i16" => "__builtin_amdgcn_cvt_pknorm_i16",
+ "llvm.amdgcn.cvt.pknorm.u16" => "__builtin_amdgcn_cvt_pknorm_u16",
+ "llvm.amdgcn.cvt.pkrtz" => "__builtin_amdgcn_cvt_pkrtz",
+ "llvm.amdgcn.dispatch.id" => "__builtin_amdgcn_dispatch_id",
+ "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute",
+ "llvm.amdgcn.ds.fadd.v2bf16" => "__builtin_amdgcn_ds_atomic_fadd_v2bf16",
+ "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier",
+ "llvm.amdgcn.ds.gws.init" => "__builtin_amdgcn_ds_gws_init",
+ "llvm.amdgcn.ds.gws.sema.br" => "__builtin_amdgcn_ds_gws_sema_br",
+ "llvm.amdgcn.ds.gws.sema.p" => "__builtin_amdgcn_ds_gws_sema_p",
+ "llvm.amdgcn.ds.gws.sema.release.all" => "__builtin_amdgcn_ds_gws_sema_release_all",
+ "llvm.amdgcn.ds.gws.sema.v" => "__builtin_amdgcn_ds_gws_sema_v",
+ "llvm.amdgcn.ds.permute" => "__builtin_amdgcn_ds_permute",
+ "llvm.amdgcn.ds.swizzle" => "__builtin_amdgcn_ds_swizzle",
+ "llvm.amdgcn.endpgm" => "__builtin_amdgcn_endpgm",
+ "llvm.amdgcn.fdot2" => "__builtin_amdgcn_fdot2",
+ "llvm.amdgcn.fmed3" => "__builtin_amdgcn_fmed3",
+ "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy",
+ "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize",
+ "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr",
+ "llvm.amdgcn.implicitarg.ptr" => "__builtin_amdgcn_implicitarg_ptr",
+ "llvm.amdgcn.interp.mov" => "__builtin_amdgcn_interp_mov",
+ "llvm.amdgcn.interp.p1" => "__builtin_amdgcn_interp_p1",
+ "llvm.amdgcn.interp.p1.f16" => "__builtin_amdgcn_interp_p1_f16",
+ "llvm.amdgcn.interp.p2" => "__builtin_amdgcn_interp_p2",
+ "llvm.amdgcn.interp.p2.f16" => "__builtin_amdgcn_interp_p2_f16",
+ "llvm.amdgcn.is.private" => "__builtin_amdgcn_is_private",
+ "llvm.amdgcn.is.shared" => "__builtin_amdgcn_is_shared",
+ "llvm.amdgcn.kernarg.segment.ptr" => "__builtin_amdgcn_kernarg_segment_ptr",
+ "llvm.amdgcn.lerp" => "__builtin_amdgcn_lerp",
+ "llvm.amdgcn.mbcnt.hi" => "__builtin_amdgcn_mbcnt_hi",
+ "llvm.amdgcn.mbcnt.lo" => "__builtin_amdgcn_mbcnt_lo",
+ "llvm.amdgcn.mqsad.pk.u16.u8" => "__builtin_amdgcn_mqsad_pk_u16_u8",
+ "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8",
+ "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8",
+ "llvm.amdgcn.perm" => "__builtin_amdgcn_perm",
+ "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16",
+ "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16",
+ "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8",
+ "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr",
+ "llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy",
+ "llvm.amdgcn.readfirstlane" => "__builtin_amdgcn_readfirstlane",
+ "llvm.amdgcn.readlane" => "__builtin_amdgcn_readlane",
+ "llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy",
+ "llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier",
+ "llvm.amdgcn.s.dcache.inv" => "__builtin_amdgcn_s_dcache_inv",
+ "llvm.amdgcn.s.dcache.inv.vol" => "__builtin_amdgcn_s_dcache_inv_vol",
+ "llvm.amdgcn.s.dcache.wb" => "__builtin_amdgcn_s_dcache_wb",
+ "llvm.amdgcn.s.dcache.wb.vol" => "__builtin_amdgcn_s_dcache_wb_vol",
+ "llvm.amdgcn.s.decperflevel" => "__builtin_amdgcn_s_decperflevel",
+ "llvm.amdgcn.s.get.waveid.in.workgroup" => "__builtin_amdgcn_s_get_waveid_in_workgroup",
+ "llvm.amdgcn.s.getpc" => "__builtin_amdgcn_s_getpc",
+ "llvm.amdgcn.s.getreg" => "__builtin_amdgcn_s_getreg",
+ "llvm.amdgcn.s.incperflevel" => "__builtin_amdgcn_s_incperflevel",
+ "llvm.amdgcn.s.memrealtime" => "__builtin_amdgcn_s_memrealtime",
+ "llvm.amdgcn.s.memtime" => "__builtin_amdgcn_s_memtime",
+ "llvm.amdgcn.s.sendmsg" => "__builtin_amdgcn_s_sendmsg",
+ "llvm.amdgcn.s.sendmsghalt" => "__builtin_amdgcn_s_sendmsghalt",
+ "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio",
+ "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg",
+ "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep",
+ "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt",
+ "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8",
+ "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16",
+ "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8",
+ "llvm.amdgcn.sched.barrier" => "__builtin_amdgcn_sched_barrier",
+ "llvm.amdgcn.sdot2" => "__builtin_amdgcn_sdot2",
+ "llvm.amdgcn.sdot4" => "__builtin_amdgcn_sdot4",
+ "llvm.amdgcn.sdot8" => "__builtin_amdgcn_sdot8",
+ "llvm.amdgcn.udot2" => "__builtin_amdgcn_udot2",
+ "llvm.amdgcn.udot4" => "__builtin_amdgcn_udot4",
+ "llvm.amdgcn.udot8" => "__builtin_amdgcn_udot8",
+ "llvm.amdgcn.wave.barrier" => "__builtin_amdgcn_wave_barrier",
+ "llvm.amdgcn.wavefrontsize" => "__builtin_amdgcn_wavefrontsize",
+ "llvm.amdgcn.writelane" => "__builtin_amdgcn_writelane",
+ // arm
+ "llvm.arm.cdp" => "__builtin_arm_cdp",
+ "llvm.arm.cdp2" => "__builtin_arm_cdp2",
+ "llvm.arm.cmse.tt" => "__builtin_arm_cmse_TT",
+ "llvm.arm.cmse.tta" => "__builtin_arm_cmse_TTA",
+ "llvm.arm.cmse.ttat" => "__builtin_arm_cmse_TTAT",
+ "llvm.arm.cmse.ttt" => "__builtin_arm_cmse_TTT",
+ "llvm.arm.dmb" => "__builtin_arm_dmb",
+ "llvm.arm.dsb" => "__builtin_arm_dsb",
+ "llvm.arm.get.fpscr" => "__builtin_arm_get_fpscr",
+ "llvm.arm.isb" => "__builtin_arm_isb",
+ "llvm.arm.ldc" => "__builtin_arm_ldc",
+ "llvm.arm.ldc2" => "__builtin_arm_ldc2",
+ "llvm.arm.ldc2l" => "__builtin_arm_ldc2l",
+ "llvm.arm.ldcl" => "__builtin_arm_ldcl",
+ "llvm.arm.mcr" => "__builtin_arm_mcr",
+ "llvm.arm.mcr2" => "__builtin_arm_mcr2",
+ "llvm.arm.mcrr" => "__builtin_arm_mcrr",
+ "llvm.arm.mcrr2" => "__builtin_arm_mcrr2",
+ "llvm.arm.mrc" => "__builtin_arm_mrc",
+ "llvm.arm.mrc2" => "__builtin_arm_mrc2",
+ "llvm.arm.qadd" => "__builtin_arm_qadd",
+ "llvm.arm.qadd16" => "__builtin_arm_qadd16",
+ "llvm.arm.qadd8" => "__builtin_arm_qadd8",
+ "llvm.arm.qasx" => "__builtin_arm_qasx",
+ "llvm.arm.qsax" => "__builtin_arm_qsax",
+ "llvm.arm.qsub" => "__builtin_arm_qsub",
+ "llvm.arm.qsub16" => "__builtin_arm_qsub16",
+ "llvm.arm.qsub8" => "__builtin_arm_qsub8",
+ "llvm.arm.sadd16" => "__builtin_arm_sadd16",
+ "llvm.arm.sadd8" => "__builtin_arm_sadd8",
+ "llvm.arm.sasx" => "__builtin_arm_sasx",
+ "llvm.arm.sel" => "__builtin_arm_sel",
+ "llvm.arm.set.fpscr" => "__builtin_arm_set_fpscr",
+ "llvm.arm.shadd16" => "__builtin_arm_shadd16",
+ "llvm.arm.shadd8" => "__builtin_arm_shadd8",
+ "llvm.arm.shasx" => "__builtin_arm_shasx",
+ "llvm.arm.shsax" => "__builtin_arm_shsax",
+ "llvm.arm.shsub16" => "__builtin_arm_shsub16",
+ "llvm.arm.shsub8" => "__builtin_arm_shsub8",
+ "llvm.arm.smlabb" => "__builtin_arm_smlabb",
+ "llvm.arm.smlabt" => "__builtin_arm_smlabt",
+ "llvm.arm.smlad" => "__builtin_arm_smlad",
+ "llvm.arm.smladx" => "__builtin_arm_smladx",
+ "llvm.arm.smlald" => "__builtin_arm_smlald",
+ "llvm.arm.smlaldx" => "__builtin_arm_smlaldx",
+ "llvm.arm.smlatb" => "__builtin_arm_smlatb",
+ "llvm.arm.smlatt" => "__builtin_arm_smlatt",
+ "llvm.arm.smlawb" => "__builtin_arm_smlawb",
+ "llvm.arm.smlawt" => "__builtin_arm_smlawt",
+ "llvm.arm.smlsd" => "__builtin_arm_smlsd",
+ "llvm.arm.smlsdx" => "__builtin_arm_smlsdx",
+ "llvm.arm.smlsld" => "__builtin_arm_smlsld",
+ "llvm.arm.smlsldx" => "__builtin_arm_smlsldx",
+ "llvm.arm.smuad" => "__builtin_arm_smuad",
+ "llvm.arm.smuadx" => "__builtin_arm_smuadx",
+ "llvm.arm.smulbb" => "__builtin_arm_smulbb",
+ "llvm.arm.smulbt" => "__builtin_arm_smulbt",
+ "llvm.arm.smultb" => "__builtin_arm_smultb",
+ "llvm.arm.smultt" => "__builtin_arm_smultt",
+ "llvm.arm.smulwb" => "__builtin_arm_smulwb",
+ "llvm.arm.smulwt" => "__builtin_arm_smulwt",
+ "llvm.arm.smusd" => "__builtin_arm_smusd",
+ "llvm.arm.smusdx" => "__builtin_arm_smusdx",
+ "llvm.arm.ssat" => "__builtin_arm_ssat",
+ "llvm.arm.ssat16" => "__builtin_arm_ssat16",
+ "llvm.arm.ssax" => "__builtin_arm_ssax",
+ "llvm.arm.ssub16" => "__builtin_arm_ssub16",
+ "llvm.arm.ssub8" => "__builtin_arm_ssub8",
+ "llvm.arm.stc" => "__builtin_arm_stc",
+ "llvm.arm.stc2" => "__builtin_arm_stc2",
+ "llvm.arm.stc2l" => "__builtin_arm_stc2l",
+ "llvm.arm.stcl" => "__builtin_arm_stcl",
+ "llvm.arm.sxtab16" => "__builtin_arm_sxtab16",
+ "llvm.arm.sxtb16" => "__builtin_arm_sxtb16",
+ "llvm.arm.thread.pointer" => "__builtin_thread_pointer",
+ "llvm.arm.uadd16" => "__builtin_arm_uadd16",
+ "llvm.arm.uadd8" => "__builtin_arm_uadd8",
+ "llvm.arm.uasx" => "__builtin_arm_uasx",
+ "llvm.arm.uhadd16" => "__builtin_arm_uhadd16",
+ "llvm.arm.uhadd8" => "__builtin_arm_uhadd8",
+ "llvm.arm.uhasx" => "__builtin_arm_uhasx",
+ "llvm.arm.uhsax" => "__builtin_arm_uhsax",
+ "llvm.arm.uhsub16" => "__builtin_arm_uhsub16",
+ "llvm.arm.uhsub8" => "__builtin_arm_uhsub8",
+ "llvm.arm.uqadd16" => "__builtin_arm_uqadd16",
+ "llvm.arm.uqadd8" => "__builtin_arm_uqadd8",
+ "llvm.arm.uqasx" => "__builtin_arm_uqasx",
+ "llvm.arm.uqsax" => "__builtin_arm_uqsax",
+ "llvm.arm.uqsub16" => "__builtin_arm_uqsub16",
+ "llvm.arm.uqsub8" => "__builtin_arm_uqsub8",
+ "llvm.arm.usad8" => "__builtin_arm_usad8",
+ "llvm.arm.usada8" => "__builtin_arm_usada8",
+ "llvm.arm.usat" => "__builtin_arm_usat",
+ "llvm.arm.usat16" => "__builtin_arm_usat16",
+ "llvm.arm.usax" => "__builtin_arm_usax",
+ "llvm.arm.usub16" => "__builtin_arm_usub16",
+ "llvm.arm.usub8" => "__builtin_arm_usub8",
+ "llvm.arm.uxtab16" => "__builtin_arm_uxtab16",
+ "llvm.arm.uxtb16" => "__builtin_arm_uxtb16",
+ // bpf
+ "llvm.bpf.btf.type.id" => "__builtin_bpf_btf_type_id",
+ "llvm.bpf.compare" => "__builtin_bpf_compare",
+ "llvm.bpf.load.byte" => "__builtin_bpf_load_byte",
+ "llvm.bpf.load.half" => "__builtin_bpf_load_half",
+ "llvm.bpf.load.word" => "__builtin_bpf_load_word",
+ "llvm.bpf.passthrough" => "__builtin_bpf_passthrough",
+ "llvm.bpf.preserve.enum.value" => "__builtin_bpf_preserve_enum_value",
+ "llvm.bpf.preserve.field.info" => "__builtin_bpf_preserve_field_info",
+ "llvm.bpf.preserve.type.info" => "__builtin_bpf_preserve_type_info",
+ "llvm.bpf.pseudo" => "__builtin_bpf_pseudo",
+ // cuda
+ "llvm.cuda.syncthreads" => "__syncthreads",
+ // hexagon
+ "llvm.hexagon.A2.abs" => "__builtin_HEXAGON_A2_abs",
+ "llvm.hexagon.A2.absp" => "__builtin_HEXAGON_A2_absp",
+ "llvm.hexagon.A2.abssat" => "__builtin_HEXAGON_A2_abssat",
+ "llvm.hexagon.A2.add" => "__builtin_HEXAGON_A2_add",
+ "llvm.hexagon.A2.addh.h16.hh" => "__builtin_HEXAGON_A2_addh_h16_hh",
+ "llvm.hexagon.A2.addh.h16.hl" => "__builtin_HEXAGON_A2_addh_h16_hl",
+ "llvm.hexagon.A2.addh.h16.lh" => "__builtin_HEXAGON_A2_addh_h16_lh",
+ "llvm.hexagon.A2.addh.h16.ll" => "__builtin_HEXAGON_A2_addh_h16_ll",
+ "llvm.hexagon.A2.addh.h16.sat.hh" => "__builtin_HEXAGON_A2_addh_h16_sat_hh",
+ "llvm.hexagon.A2.addh.h16.sat.hl" => "__builtin_HEXAGON_A2_addh_h16_sat_hl",
+ "llvm.hexagon.A2.addh.h16.sat.lh" => "__builtin_HEXAGON_A2_addh_h16_sat_lh",
+ "llvm.hexagon.A2.addh.h16.sat.ll" => "__builtin_HEXAGON_A2_addh_h16_sat_ll",
+ "llvm.hexagon.A2.addh.l16.hl" => "__builtin_HEXAGON_A2_addh_l16_hl",
+ "llvm.hexagon.A2.addh.l16.ll" => "__builtin_HEXAGON_A2_addh_l16_ll",
+ "llvm.hexagon.A2.addh.l16.sat.hl" => "__builtin_HEXAGON_A2_addh_l16_sat_hl",
+ "llvm.hexagon.A2.addh.l16.sat.ll" => "__builtin_HEXAGON_A2_addh_l16_sat_ll",
+ "llvm.hexagon.A2.addi" => "__builtin_HEXAGON_A2_addi",
+ "llvm.hexagon.A2.addp" => "__builtin_HEXAGON_A2_addp",
+ "llvm.hexagon.A2.addpsat" => "__builtin_HEXAGON_A2_addpsat",
+ "llvm.hexagon.A2.addsat" => "__builtin_HEXAGON_A2_addsat",
+ "llvm.hexagon.A2.addsp" => "__builtin_HEXAGON_A2_addsp",
+ "llvm.hexagon.A2.and" => "__builtin_HEXAGON_A2_and",
+ "llvm.hexagon.A2.andir" => "__builtin_HEXAGON_A2_andir",
+ "llvm.hexagon.A2.andp" => "__builtin_HEXAGON_A2_andp",
+ "llvm.hexagon.A2.aslh" => "__builtin_HEXAGON_A2_aslh",
+ "llvm.hexagon.A2.asrh" => "__builtin_HEXAGON_A2_asrh",
+ "llvm.hexagon.A2.combine.hh" => "__builtin_HEXAGON_A2_combine_hh",
+ "llvm.hexagon.A2.combine.hl" => "__builtin_HEXAGON_A2_combine_hl",
+ "llvm.hexagon.A2.combine.lh" => "__builtin_HEXAGON_A2_combine_lh",
+ "llvm.hexagon.A2.combine.ll" => "__builtin_HEXAGON_A2_combine_ll",
+ "llvm.hexagon.A2.combineii" => "__builtin_HEXAGON_A2_combineii",
+ "llvm.hexagon.A2.combinew" => "__builtin_HEXAGON_A2_combinew",
+ "llvm.hexagon.A2.max" => "__builtin_HEXAGON_A2_max",
+ "llvm.hexagon.A2.maxp" => "__builtin_HEXAGON_A2_maxp",
+ "llvm.hexagon.A2.maxu" => "__builtin_HEXAGON_A2_maxu",
+ "llvm.hexagon.A2.maxup" => "__builtin_HEXAGON_A2_maxup",
+ "llvm.hexagon.A2.min" => "__builtin_HEXAGON_A2_min",
+ "llvm.hexagon.A2.minp" => "__builtin_HEXAGON_A2_minp",
+ "llvm.hexagon.A2.minu" => "__builtin_HEXAGON_A2_minu",
+ "llvm.hexagon.A2.minup" => "__builtin_HEXAGON_A2_minup",
+ "llvm.hexagon.A2.neg" => "__builtin_HEXAGON_A2_neg",
+ "llvm.hexagon.A2.negp" => "__builtin_HEXAGON_A2_negp",
+ "llvm.hexagon.A2.negsat" => "__builtin_HEXAGON_A2_negsat",
+ "llvm.hexagon.A2.not" => "__builtin_HEXAGON_A2_not",
+ "llvm.hexagon.A2.notp" => "__builtin_HEXAGON_A2_notp",
+ "llvm.hexagon.A2.or" => "__builtin_HEXAGON_A2_or",
+ "llvm.hexagon.A2.orir" => "__builtin_HEXAGON_A2_orir",
+ "llvm.hexagon.A2.orp" => "__builtin_HEXAGON_A2_orp",
+ "llvm.hexagon.A2.roundsat" => "__builtin_HEXAGON_A2_roundsat",
+ "llvm.hexagon.A2.sat" => "__builtin_HEXAGON_A2_sat",
+ "llvm.hexagon.A2.satb" => "__builtin_HEXAGON_A2_satb",
+ "llvm.hexagon.A2.sath" => "__builtin_HEXAGON_A2_sath",
+ "llvm.hexagon.A2.satub" => "__builtin_HEXAGON_A2_satub",
+ "llvm.hexagon.A2.satuh" => "__builtin_HEXAGON_A2_satuh",
+ "llvm.hexagon.A2.sub" => "__builtin_HEXAGON_A2_sub",
+ "llvm.hexagon.A2.subh.h16.hh" => "__builtin_HEXAGON_A2_subh_h16_hh",
+ "llvm.hexagon.A2.subh.h16.hl" => "__builtin_HEXAGON_A2_subh_h16_hl",
+ "llvm.hexagon.A2.subh.h16.lh" => "__builtin_HEXAGON_A2_subh_h16_lh",
+ "llvm.hexagon.A2.subh.h16.ll" => "__builtin_HEXAGON_A2_subh_h16_ll",
+ "llvm.hexagon.A2.subh.h16.sat.hh" => "__builtin_HEXAGON_A2_subh_h16_sat_hh",
+ "llvm.hexagon.A2.subh.h16.sat.hl" => "__builtin_HEXAGON_A2_subh_h16_sat_hl",
+ "llvm.hexagon.A2.subh.h16.sat.lh" => "__builtin_HEXAGON_A2_subh_h16_sat_lh",
+ "llvm.hexagon.A2.subh.h16.sat.ll" => "__builtin_HEXAGON_A2_subh_h16_sat_ll",
+ "llvm.hexagon.A2.subh.l16.hl" => "__builtin_HEXAGON_A2_subh_l16_hl",
+ "llvm.hexagon.A2.subh.l16.ll" => "__builtin_HEXAGON_A2_subh_l16_ll",
+ "llvm.hexagon.A2.subh.l16.sat.hl" => "__builtin_HEXAGON_A2_subh_l16_sat_hl",
+ "llvm.hexagon.A2.subh.l16.sat.ll" => "__builtin_HEXAGON_A2_subh_l16_sat_ll",
+ "llvm.hexagon.A2.subp" => "__builtin_HEXAGON_A2_subp",
+ "llvm.hexagon.A2.subri" => "__builtin_HEXAGON_A2_subri",
+ "llvm.hexagon.A2.subsat" => "__builtin_HEXAGON_A2_subsat",
+ "llvm.hexagon.A2.svaddh" => "__builtin_HEXAGON_A2_svaddh",
+ "llvm.hexagon.A2.svaddhs" => "__builtin_HEXAGON_A2_svaddhs",
+ "llvm.hexagon.A2.svadduhs" => "__builtin_HEXAGON_A2_svadduhs",
+ "llvm.hexagon.A2.svavgh" => "__builtin_HEXAGON_A2_svavgh",
+ "llvm.hexagon.A2.svavghs" => "__builtin_HEXAGON_A2_svavghs",
+ "llvm.hexagon.A2.svnavgh" => "__builtin_HEXAGON_A2_svnavgh",
+ "llvm.hexagon.A2.svsubh" => "__builtin_HEXAGON_A2_svsubh",
+ "llvm.hexagon.A2.svsubhs" => "__builtin_HEXAGON_A2_svsubhs",
+ "llvm.hexagon.A2.svsubuhs" => "__builtin_HEXAGON_A2_svsubuhs",
+ "llvm.hexagon.A2.swiz" => "__builtin_HEXAGON_A2_swiz",
+ "llvm.hexagon.A2.sxtb" => "__builtin_HEXAGON_A2_sxtb",
+ "llvm.hexagon.A2.sxth" => "__builtin_HEXAGON_A2_sxth",
+ "llvm.hexagon.A2.sxtw" => "__builtin_HEXAGON_A2_sxtw",
+ "llvm.hexagon.A2.tfr" => "__builtin_HEXAGON_A2_tfr",
+ "llvm.hexagon.A2.tfrih" => "__builtin_HEXAGON_A2_tfrih",
+ "llvm.hexagon.A2.tfril" => "__builtin_HEXAGON_A2_tfril",
+ "llvm.hexagon.A2.tfrp" => "__builtin_HEXAGON_A2_tfrp",
+ "llvm.hexagon.A2.tfrpi" => "__builtin_HEXAGON_A2_tfrpi",
+ "llvm.hexagon.A2.tfrsi" => "__builtin_HEXAGON_A2_tfrsi",
+ "llvm.hexagon.A2.vabsh" => "__builtin_HEXAGON_A2_vabsh",
+ "llvm.hexagon.A2.vabshsat" => "__builtin_HEXAGON_A2_vabshsat",
+ "llvm.hexagon.A2.vabsw" => "__builtin_HEXAGON_A2_vabsw",
+ "llvm.hexagon.A2.vabswsat" => "__builtin_HEXAGON_A2_vabswsat",
+ "llvm.hexagon.A2.vaddb.map" => "__builtin_HEXAGON_A2_vaddb_map",
+ "llvm.hexagon.A2.vaddh" => "__builtin_HEXAGON_A2_vaddh",
+ "llvm.hexagon.A2.vaddhs" => "__builtin_HEXAGON_A2_vaddhs",
+ "llvm.hexagon.A2.vaddub" => "__builtin_HEXAGON_A2_vaddub",
+ "llvm.hexagon.A2.vaddubs" => "__builtin_HEXAGON_A2_vaddubs",
+ "llvm.hexagon.A2.vadduhs" => "__builtin_HEXAGON_A2_vadduhs",
+ "llvm.hexagon.A2.vaddw" => "__builtin_HEXAGON_A2_vaddw",
+ "llvm.hexagon.A2.vaddws" => "__builtin_HEXAGON_A2_vaddws",
+ "llvm.hexagon.A2.vavgh" => "__builtin_HEXAGON_A2_vavgh",
+ "llvm.hexagon.A2.vavghcr" => "__builtin_HEXAGON_A2_vavghcr",
+ "llvm.hexagon.A2.vavghr" => "__builtin_HEXAGON_A2_vavghr",
+ "llvm.hexagon.A2.vavgub" => "__builtin_HEXAGON_A2_vavgub",
+ "llvm.hexagon.A2.vavgubr" => "__builtin_HEXAGON_A2_vavgubr",
+ "llvm.hexagon.A2.vavguh" => "__builtin_HEXAGON_A2_vavguh",
+ "llvm.hexagon.A2.vavguhr" => "__builtin_HEXAGON_A2_vavguhr",
+ "llvm.hexagon.A2.vavguw" => "__builtin_HEXAGON_A2_vavguw",
+ "llvm.hexagon.A2.vavguwr" => "__builtin_HEXAGON_A2_vavguwr",
+ "llvm.hexagon.A2.vavgw" => "__builtin_HEXAGON_A2_vavgw",
+ "llvm.hexagon.A2.vavgwcr" => "__builtin_HEXAGON_A2_vavgwcr",
+ "llvm.hexagon.A2.vavgwr" => "__builtin_HEXAGON_A2_vavgwr",
+ "llvm.hexagon.A2.vcmpbeq" => "__builtin_HEXAGON_A2_vcmpbeq",
+ "llvm.hexagon.A2.vcmpbgtu" => "__builtin_HEXAGON_A2_vcmpbgtu",
+ "llvm.hexagon.A2.vcmpheq" => "__builtin_HEXAGON_A2_vcmpheq",
+ "llvm.hexagon.A2.vcmphgt" => "__builtin_HEXAGON_A2_vcmphgt",
+ "llvm.hexagon.A2.vcmphgtu" => "__builtin_HEXAGON_A2_vcmphgtu",
+ "llvm.hexagon.A2.vcmpweq" => "__builtin_HEXAGON_A2_vcmpweq",
+ "llvm.hexagon.A2.vcmpwgt" => "__builtin_HEXAGON_A2_vcmpwgt",
+ "llvm.hexagon.A2.vcmpwgtu" => "__builtin_HEXAGON_A2_vcmpwgtu",
+ "llvm.hexagon.A2.vconj" => "__builtin_HEXAGON_A2_vconj",
+ "llvm.hexagon.A2.vmaxb" => "__builtin_HEXAGON_A2_vmaxb",
+ "llvm.hexagon.A2.vmaxh" => "__builtin_HEXAGON_A2_vmaxh",
+ "llvm.hexagon.A2.vmaxub" => "__builtin_HEXAGON_A2_vmaxub",
+ "llvm.hexagon.A2.vmaxuh" => "__builtin_HEXAGON_A2_vmaxuh",
+ "llvm.hexagon.A2.vmaxuw" => "__builtin_HEXAGON_A2_vmaxuw",
+ "llvm.hexagon.A2.vmaxw" => "__builtin_HEXAGON_A2_vmaxw",
+ "llvm.hexagon.A2.vminb" => "__builtin_HEXAGON_A2_vminb",
+ "llvm.hexagon.A2.vminh" => "__builtin_HEXAGON_A2_vminh",
+ "llvm.hexagon.A2.vminub" => "__builtin_HEXAGON_A2_vminub",
+ "llvm.hexagon.A2.vminuh" => "__builtin_HEXAGON_A2_vminuh",
+ "llvm.hexagon.A2.vminuw" => "__builtin_HEXAGON_A2_vminuw",
+ "llvm.hexagon.A2.vminw" => "__builtin_HEXAGON_A2_vminw",
+ "llvm.hexagon.A2.vnavgh" => "__builtin_HEXAGON_A2_vnavgh",
+ "llvm.hexagon.A2.vnavghcr" => "__builtin_HEXAGON_A2_vnavghcr",
+ "llvm.hexagon.A2.vnavghr" => "__builtin_HEXAGON_A2_vnavghr",
+ "llvm.hexagon.A2.vnavgw" => "__builtin_HEXAGON_A2_vnavgw",
+ "llvm.hexagon.A2.vnavgwcr" => "__builtin_HEXAGON_A2_vnavgwcr",
+ "llvm.hexagon.A2.vnavgwr" => "__builtin_HEXAGON_A2_vnavgwr",
+ "llvm.hexagon.A2.vraddub" => "__builtin_HEXAGON_A2_vraddub",
+ "llvm.hexagon.A2.vraddub.acc" => "__builtin_HEXAGON_A2_vraddub_acc",
+ "llvm.hexagon.A2.vrsadub" => "__builtin_HEXAGON_A2_vrsadub",
+ "llvm.hexagon.A2.vrsadub.acc" => "__builtin_HEXAGON_A2_vrsadub_acc",
+ "llvm.hexagon.A2.vsubb.map" => "__builtin_HEXAGON_A2_vsubb_map",
+ "llvm.hexagon.A2.vsubh" => "__builtin_HEXAGON_A2_vsubh",
+ "llvm.hexagon.A2.vsubhs" => "__builtin_HEXAGON_A2_vsubhs",
+ "llvm.hexagon.A2.vsubub" => "__builtin_HEXAGON_A2_vsubub",
+ "llvm.hexagon.A2.vsububs" => "__builtin_HEXAGON_A2_vsububs",
+ "llvm.hexagon.A2.vsubuhs" => "__builtin_HEXAGON_A2_vsubuhs",
+ "llvm.hexagon.A2.vsubw" => "__builtin_HEXAGON_A2_vsubw",
+ "llvm.hexagon.A2.vsubws" => "__builtin_HEXAGON_A2_vsubws",
+ "llvm.hexagon.A2.xor" => "__builtin_HEXAGON_A2_xor",
+ "llvm.hexagon.A2.xorp" => "__builtin_HEXAGON_A2_xorp",
+ "llvm.hexagon.A2.zxtb" => "__builtin_HEXAGON_A2_zxtb",
+ "llvm.hexagon.A2.zxth" => "__builtin_HEXAGON_A2_zxth",
+ "llvm.hexagon.A4.andn" => "__builtin_HEXAGON_A4_andn",
+ "llvm.hexagon.A4.andnp" => "__builtin_HEXAGON_A4_andnp",
+ "llvm.hexagon.A4.bitsplit" => "__builtin_HEXAGON_A4_bitsplit",
+ "llvm.hexagon.A4.bitspliti" => "__builtin_HEXAGON_A4_bitspliti",
+ "llvm.hexagon.A4.boundscheck" => "__builtin_HEXAGON_A4_boundscheck",
+ "llvm.hexagon.A4.cmpbeq" => "__builtin_HEXAGON_A4_cmpbeq",
+ "llvm.hexagon.A4.cmpbeqi" => "__builtin_HEXAGON_A4_cmpbeqi",
+ "llvm.hexagon.A4.cmpbgt" => "__builtin_HEXAGON_A4_cmpbgt",
+ "llvm.hexagon.A4.cmpbgti" => "__builtin_HEXAGON_A4_cmpbgti",
+ "llvm.hexagon.A4.cmpbgtu" => "__builtin_HEXAGON_A4_cmpbgtu",
+ "llvm.hexagon.A4.cmpbgtui" => "__builtin_HEXAGON_A4_cmpbgtui",
+ "llvm.hexagon.A4.cmpheq" => "__builtin_HEXAGON_A4_cmpheq",
+ "llvm.hexagon.A4.cmpheqi" => "__builtin_HEXAGON_A4_cmpheqi",
+ "llvm.hexagon.A4.cmphgt" => "__builtin_HEXAGON_A4_cmphgt",
+ "llvm.hexagon.A4.cmphgti" => "__builtin_HEXAGON_A4_cmphgti",
+ "llvm.hexagon.A4.cmphgtu" => "__builtin_HEXAGON_A4_cmphgtu",
+ "llvm.hexagon.A4.cmphgtui" => "__builtin_HEXAGON_A4_cmphgtui",
+ "llvm.hexagon.A4.combineir" => "__builtin_HEXAGON_A4_combineir",
+ "llvm.hexagon.A4.combineri" => "__builtin_HEXAGON_A4_combineri",
+ "llvm.hexagon.A4.cround.ri" => "__builtin_HEXAGON_A4_cround_ri",
+ "llvm.hexagon.A4.cround.rr" => "__builtin_HEXAGON_A4_cround_rr",
+ "llvm.hexagon.A4.modwrapu" => "__builtin_HEXAGON_A4_modwrapu",
+ "llvm.hexagon.A4.orn" => "__builtin_HEXAGON_A4_orn",
+ "llvm.hexagon.A4.ornp" => "__builtin_HEXAGON_A4_ornp",
+ "llvm.hexagon.A4.rcmpeq" => "__builtin_HEXAGON_A4_rcmpeq",
+ "llvm.hexagon.A4.rcmpeqi" => "__builtin_HEXAGON_A4_rcmpeqi",
+ "llvm.hexagon.A4.rcmpneq" => "__builtin_HEXAGON_A4_rcmpneq",
+ "llvm.hexagon.A4.rcmpneqi" => "__builtin_HEXAGON_A4_rcmpneqi",
+ "llvm.hexagon.A4.round.ri" => "__builtin_HEXAGON_A4_round_ri",
+ "llvm.hexagon.A4.round.ri.sat" => "__builtin_HEXAGON_A4_round_ri_sat",
+ "llvm.hexagon.A4.round.rr" => "__builtin_HEXAGON_A4_round_rr",
+ "llvm.hexagon.A4.round.rr.sat" => "__builtin_HEXAGON_A4_round_rr_sat",
+ "llvm.hexagon.A4.tlbmatch" => "__builtin_HEXAGON_A4_tlbmatch",
+ "llvm.hexagon.A4.vcmpbeq.any" => "__builtin_HEXAGON_A4_vcmpbeq_any",
+ "llvm.hexagon.A4.vcmpbeqi" => "__builtin_HEXAGON_A4_vcmpbeqi",
+ "llvm.hexagon.A4.vcmpbgt" => "__builtin_HEXAGON_A4_vcmpbgt",
+ "llvm.hexagon.A4.vcmpbgti" => "__builtin_HEXAGON_A4_vcmpbgti",
+ "llvm.hexagon.A4.vcmpbgtui" => "__builtin_HEXAGON_A4_vcmpbgtui",
+ "llvm.hexagon.A4.vcmpheqi" => "__builtin_HEXAGON_A4_vcmpheqi",
+ "llvm.hexagon.A4.vcmphgti" => "__builtin_HEXAGON_A4_vcmphgti",
+ "llvm.hexagon.A4.vcmphgtui" => "__builtin_HEXAGON_A4_vcmphgtui",
+ "llvm.hexagon.A4.vcmpweqi" => "__builtin_HEXAGON_A4_vcmpweqi",
+ "llvm.hexagon.A4.vcmpwgti" => "__builtin_HEXAGON_A4_vcmpwgti",
+ "llvm.hexagon.A4.vcmpwgtui" => "__builtin_HEXAGON_A4_vcmpwgtui",
+ "llvm.hexagon.A4.vrmaxh" => "__builtin_HEXAGON_A4_vrmaxh",
+ "llvm.hexagon.A4.vrmaxuh" => "__builtin_HEXAGON_A4_vrmaxuh",
+ "llvm.hexagon.A4.vrmaxuw" => "__builtin_HEXAGON_A4_vrmaxuw",
+ "llvm.hexagon.A4.vrmaxw" => "__builtin_HEXAGON_A4_vrmaxw",
+ "llvm.hexagon.A4.vrminh" => "__builtin_HEXAGON_A4_vrminh",
+ "llvm.hexagon.A4.vrminuh" => "__builtin_HEXAGON_A4_vrminuh",
+ "llvm.hexagon.A4.vrminuw" => "__builtin_HEXAGON_A4_vrminuw",
+ "llvm.hexagon.A4.vrminw" => "__builtin_HEXAGON_A4_vrminw",
+ "llvm.hexagon.A5.vaddhubs" => "__builtin_HEXAGON_A5_vaddhubs",
+ "llvm.hexagon.C2.all8" => "__builtin_HEXAGON_C2_all8",
+ "llvm.hexagon.C2.and" => "__builtin_HEXAGON_C2_and",
+ "llvm.hexagon.C2.andn" => "__builtin_HEXAGON_C2_andn",
+ "llvm.hexagon.C2.any8" => "__builtin_HEXAGON_C2_any8",
+ "llvm.hexagon.C2.bitsclr" => "__builtin_HEXAGON_C2_bitsclr",
+ "llvm.hexagon.C2.bitsclri" => "__builtin_HEXAGON_C2_bitsclri",
+ "llvm.hexagon.C2.bitsset" => "__builtin_HEXAGON_C2_bitsset",
+ "llvm.hexagon.C2.cmpeq" => "__builtin_HEXAGON_C2_cmpeq",
+ "llvm.hexagon.C2.cmpeqi" => "__builtin_HEXAGON_C2_cmpeqi",
+ "llvm.hexagon.C2.cmpeqp" => "__builtin_HEXAGON_C2_cmpeqp",
+ "llvm.hexagon.C2.cmpgei" => "__builtin_HEXAGON_C2_cmpgei",
+ "llvm.hexagon.C2.cmpgeui" => "__builtin_HEXAGON_C2_cmpgeui",
+ "llvm.hexagon.C2.cmpgt" => "__builtin_HEXAGON_C2_cmpgt",
+ "llvm.hexagon.C2.cmpgti" => "__builtin_HEXAGON_C2_cmpgti",
+ "llvm.hexagon.C2.cmpgtp" => "__builtin_HEXAGON_C2_cmpgtp",
+ "llvm.hexagon.C2.cmpgtu" => "__builtin_HEXAGON_C2_cmpgtu",
+ "llvm.hexagon.C2.cmpgtui" => "__builtin_HEXAGON_C2_cmpgtui",
+ "llvm.hexagon.C2.cmpgtup" => "__builtin_HEXAGON_C2_cmpgtup",
+ "llvm.hexagon.C2.cmplt" => "__builtin_HEXAGON_C2_cmplt",
+ "llvm.hexagon.C2.cmpltu" => "__builtin_HEXAGON_C2_cmpltu",
+ "llvm.hexagon.C2.mask" => "__builtin_HEXAGON_C2_mask",
+ "llvm.hexagon.C2.mux" => "__builtin_HEXAGON_C2_mux",
+ "llvm.hexagon.C2.muxii" => "__builtin_HEXAGON_C2_muxii",
+ "llvm.hexagon.C2.muxir" => "__builtin_HEXAGON_C2_muxir",
+ "llvm.hexagon.C2.muxri" => "__builtin_HEXAGON_C2_muxri",
+ "llvm.hexagon.C2.not" => "__builtin_HEXAGON_C2_not",
+ "llvm.hexagon.C2.or" => "__builtin_HEXAGON_C2_or",
+ "llvm.hexagon.C2.orn" => "__builtin_HEXAGON_C2_orn",
+ "llvm.hexagon.C2.pxfer.map" => "__builtin_HEXAGON_C2_pxfer_map",
+ "llvm.hexagon.C2.tfrpr" => "__builtin_HEXAGON_C2_tfrpr",
+ "llvm.hexagon.C2.tfrrp" => "__builtin_HEXAGON_C2_tfrrp",
+ "llvm.hexagon.C2.vitpack" => "__builtin_HEXAGON_C2_vitpack",
+ "llvm.hexagon.C2.vmux" => "__builtin_HEXAGON_C2_vmux",
+ "llvm.hexagon.C2.xor" => "__builtin_HEXAGON_C2_xor",
+ "llvm.hexagon.C4.and.and" => "__builtin_HEXAGON_C4_and_and",
+ "llvm.hexagon.C4.and.andn" => "__builtin_HEXAGON_C4_and_andn",
+ "llvm.hexagon.C4.and.or" => "__builtin_HEXAGON_C4_and_or",
+ "llvm.hexagon.C4.and.orn" => "__builtin_HEXAGON_C4_and_orn",
+ "llvm.hexagon.C4.cmplte" => "__builtin_HEXAGON_C4_cmplte",
+ "llvm.hexagon.C4.cmpltei" => "__builtin_HEXAGON_C4_cmpltei",
+ "llvm.hexagon.C4.cmplteu" => "__builtin_HEXAGON_C4_cmplteu",
+ "llvm.hexagon.C4.cmplteui" => "__builtin_HEXAGON_C4_cmplteui",
+ "llvm.hexagon.C4.cmpneq" => "__builtin_HEXAGON_C4_cmpneq",
+ "llvm.hexagon.C4.cmpneqi" => "__builtin_HEXAGON_C4_cmpneqi",
+ "llvm.hexagon.C4.fastcorner9" => "__builtin_HEXAGON_C4_fastcorner9",
+ "llvm.hexagon.C4.fastcorner9.not" => "__builtin_HEXAGON_C4_fastcorner9_not",
+ "llvm.hexagon.C4.nbitsclr" => "__builtin_HEXAGON_C4_nbitsclr",
+ "llvm.hexagon.C4.nbitsclri" => "__builtin_HEXAGON_C4_nbitsclri",
+ "llvm.hexagon.C4.nbitsset" => "__builtin_HEXAGON_C4_nbitsset",
+ "llvm.hexagon.C4.or.and" => "__builtin_HEXAGON_C4_or_and",
+ "llvm.hexagon.C4.or.andn" => "__builtin_HEXAGON_C4_or_andn",
+ "llvm.hexagon.C4.or.or" => "__builtin_HEXAGON_C4_or_or",
+ "llvm.hexagon.C4.or.orn" => "__builtin_HEXAGON_C4_or_orn",
+ "llvm.hexagon.F2.conv.d2df" => "__builtin_HEXAGON_F2_conv_d2df",
+ "llvm.hexagon.F2.conv.d2sf" => "__builtin_HEXAGON_F2_conv_d2sf",
+ "llvm.hexagon.F2.conv.df2d" => "__builtin_HEXAGON_F2_conv_df2d",
+ "llvm.hexagon.F2.conv.df2d.chop" => "__builtin_HEXAGON_F2_conv_df2d_chop",
+ "llvm.hexagon.F2.conv.df2sf" => "__builtin_HEXAGON_F2_conv_df2sf",
+ "llvm.hexagon.F2.conv.df2ud" => "__builtin_HEXAGON_F2_conv_df2ud",
+ "llvm.hexagon.F2.conv.df2ud.chop" => "__builtin_HEXAGON_F2_conv_df2ud_chop",
+ "llvm.hexagon.F2.conv.df2uw" => "__builtin_HEXAGON_F2_conv_df2uw",
+ "llvm.hexagon.F2.conv.df2uw.chop" => "__builtin_HEXAGON_F2_conv_df2uw_chop",
+ "llvm.hexagon.F2.conv.df2w" => "__builtin_HEXAGON_F2_conv_df2w",
+ "llvm.hexagon.F2.conv.df2w.chop" => "__builtin_HEXAGON_F2_conv_df2w_chop",
+ "llvm.hexagon.F2.conv.sf2d" => "__builtin_HEXAGON_F2_conv_sf2d",
+ "llvm.hexagon.F2.conv.sf2d.chop" => "__builtin_HEXAGON_F2_conv_sf2d_chop",
+ "llvm.hexagon.F2.conv.sf2df" => "__builtin_HEXAGON_F2_conv_sf2df",
+ "llvm.hexagon.F2.conv.sf2ud" => "__builtin_HEXAGON_F2_conv_sf2ud",
+ "llvm.hexagon.F2.conv.sf2ud.chop" => "__builtin_HEXAGON_F2_conv_sf2ud_chop",
+ "llvm.hexagon.F2.conv.sf2uw" => "__builtin_HEXAGON_F2_conv_sf2uw",
+ "llvm.hexagon.F2.conv.sf2uw.chop" => "__builtin_HEXAGON_F2_conv_sf2uw_chop",
+ "llvm.hexagon.F2.conv.sf2w" => "__builtin_HEXAGON_F2_conv_sf2w",
+ "llvm.hexagon.F2.conv.sf2w.chop" => "__builtin_HEXAGON_F2_conv_sf2w_chop",
+ "llvm.hexagon.F2.conv.ud2df" => "__builtin_HEXAGON_F2_conv_ud2df",
+ "llvm.hexagon.F2.conv.ud2sf" => "__builtin_HEXAGON_F2_conv_ud2sf",
+ "llvm.hexagon.F2.conv.uw2df" => "__builtin_HEXAGON_F2_conv_uw2df",
+ "llvm.hexagon.F2.conv.uw2sf" => "__builtin_HEXAGON_F2_conv_uw2sf",
+ "llvm.hexagon.F2.conv.w2df" => "__builtin_HEXAGON_F2_conv_w2df",
+ "llvm.hexagon.F2.conv.w2sf" => "__builtin_HEXAGON_F2_conv_w2sf",
+ "llvm.hexagon.F2.dfadd" => "__builtin_HEXAGON_F2_dfadd",
+ "llvm.hexagon.F2.dfclass" => "__builtin_HEXAGON_F2_dfclass",
+ "llvm.hexagon.F2.dfcmpeq" => "__builtin_HEXAGON_F2_dfcmpeq",
+ "llvm.hexagon.F2.dfcmpge" => "__builtin_HEXAGON_F2_dfcmpge",
+ "llvm.hexagon.F2.dfcmpgt" => "__builtin_HEXAGON_F2_dfcmpgt",
+ "llvm.hexagon.F2.dfcmpuo" => "__builtin_HEXAGON_F2_dfcmpuo",
+ "llvm.hexagon.F2.dffixupd" => "__builtin_HEXAGON_F2_dffixupd",
+ "llvm.hexagon.F2.dffixupn" => "__builtin_HEXAGON_F2_dffixupn",
+ "llvm.hexagon.F2.dffixupr" => "__builtin_HEXAGON_F2_dffixupr",
+ "llvm.hexagon.F2.dffma" => "__builtin_HEXAGON_F2_dffma",
+ "llvm.hexagon.F2.dffma.lib" => "__builtin_HEXAGON_F2_dffma_lib",
+ "llvm.hexagon.F2.dffma.sc" => "__builtin_HEXAGON_F2_dffma_sc",
+ "llvm.hexagon.F2.dffms" => "__builtin_HEXAGON_F2_dffms",
+ "llvm.hexagon.F2.dffms.lib" => "__builtin_HEXAGON_F2_dffms_lib",
+ "llvm.hexagon.F2.dfimm.n" => "__builtin_HEXAGON_F2_dfimm_n",
+ "llvm.hexagon.F2.dfimm.p" => "__builtin_HEXAGON_F2_dfimm_p",
+ "llvm.hexagon.F2.dfmax" => "__builtin_HEXAGON_F2_dfmax",
+ "llvm.hexagon.F2.dfmin" => "__builtin_HEXAGON_F2_dfmin",
+ "llvm.hexagon.F2.dfmpy" => "__builtin_HEXAGON_F2_dfmpy",
+ "llvm.hexagon.F2.dfsub" => "__builtin_HEXAGON_F2_dfsub",
+ "llvm.hexagon.F2.sfadd" => "__builtin_HEXAGON_F2_sfadd",
+ "llvm.hexagon.F2.sfclass" => "__builtin_HEXAGON_F2_sfclass",
+ "llvm.hexagon.F2.sfcmpeq" => "__builtin_HEXAGON_F2_sfcmpeq",
+ "llvm.hexagon.F2.sfcmpge" => "__builtin_HEXAGON_F2_sfcmpge",
+ "llvm.hexagon.F2.sfcmpgt" => "__builtin_HEXAGON_F2_sfcmpgt",
+ "llvm.hexagon.F2.sfcmpuo" => "__builtin_HEXAGON_F2_sfcmpuo",
+ "llvm.hexagon.F2.sffixupd" => "__builtin_HEXAGON_F2_sffixupd",
+ "llvm.hexagon.F2.sffixupn" => "__builtin_HEXAGON_F2_sffixupn",
+ "llvm.hexagon.F2.sffixupr" => "__builtin_HEXAGON_F2_sffixupr",
+ "llvm.hexagon.F2.sffma" => "__builtin_HEXAGON_F2_sffma",
+ "llvm.hexagon.F2.sffma.lib" => "__builtin_HEXAGON_F2_sffma_lib",
+ "llvm.hexagon.F2.sffma.sc" => "__builtin_HEXAGON_F2_sffma_sc",
+ "llvm.hexagon.F2.sffms" => "__builtin_HEXAGON_F2_sffms",
+ "llvm.hexagon.F2.sffms.lib" => "__builtin_HEXAGON_F2_sffms_lib",
+ "llvm.hexagon.F2.sfimm.n" => "__builtin_HEXAGON_F2_sfimm_n",
+ "llvm.hexagon.F2.sfimm.p" => "__builtin_HEXAGON_F2_sfimm_p",
+ "llvm.hexagon.F2.sfmax" => "__builtin_HEXAGON_F2_sfmax",
+ "llvm.hexagon.F2.sfmin" => "__builtin_HEXAGON_F2_sfmin",
+ "llvm.hexagon.F2.sfmpy" => "__builtin_HEXAGON_F2_sfmpy",
+ "llvm.hexagon.F2.sfsub" => "__builtin_HEXAGON_F2_sfsub",
+ "llvm.hexagon.M2.acci" => "__builtin_HEXAGON_M2_acci",
+ "llvm.hexagon.M2.accii" => "__builtin_HEXAGON_M2_accii",
+ "llvm.hexagon.M2.cmaci.s0" => "__builtin_HEXAGON_M2_cmaci_s0",
+ "llvm.hexagon.M2.cmacr.s0" => "__builtin_HEXAGON_M2_cmacr_s0",
+ "llvm.hexagon.M2.cmacs.s0" => "__builtin_HEXAGON_M2_cmacs_s0",
+ "llvm.hexagon.M2.cmacs.s1" => "__builtin_HEXAGON_M2_cmacs_s1",
+ "llvm.hexagon.M2.cmacsc.s0" => "__builtin_HEXAGON_M2_cmacsc_s0",
+ "llvm.hexagon.M2.cmacsc.s1" => "__builtin_HEXAGON_M2_cmacsc_s1",
+ "llvm.hexagon.M2.cmpyi.s0" => "__builtin_HEXAGON_M2_cmpyi_s0",
+ "llvm.hexagon.M2.cmpyr.s0" => "__builtin_HEXAGON_M2_cmpyr_s0",
+ "llvm.hexagon.M2.cmpyrs.s0" => "__builtin_HEXAGON_M2_cmpyrs_s0",
+ "llvm.hexagon.M2.cmpyrs.s1" => "__builtin_HEXAGON_M2_cmpyrs_s1",
+ "llvm.hexagon.M2.cmpyrsc.s0" => "__builtin_HEXAGON_M2_cmpyrsc_s0",
+ "llvm.hexagon.M2.cmpyrsc.s1" => "__builtin_HEXAGON_M2_cmpyrsc_s1",
+ "llvm.hexagon.M2.cmpys.s0" => "__builtin_HEXAGON_M2_cmpys_s0",
+ "llvm.hexagon.M2.cmpys.s1" => "__builtin_HEXAGON_M2_cmpys_s1",
+ "llvm.hexagon.M2.cmpysc.s0" => "__builtin_HEXAGON_M2_cmpysc_s0",
+ "llvm.hexagon.M2.cmpysc.s1" => "__builtin_HEXAGON_M2_cmpysc_s1",
+ "llvm.hexagon.M2.cnacs.s0" => "__builtin_HEXAGON_M2_cnacs_s0",
+ "llvm.hexagon.M2.cnacs.s1" => "__builtin_HEXAGON_M2_cnacs_s1",
+ "llvm.hexagon.M2.cnacsc.s0" => "__builtin_HEXAGON_M2_cnacsc_s0",
+ "llvm.hexagon.M2.cnacsc.s1" => "__builtin_HEXAGON_M2_cnacsc_s1",
+ "llvm.hexagon.M2.dpmpyss.acc.s0" => "__builtin_HEXAGON_M2_dpmpyss_acc_s0",
+ "llvm.hexagon.M2.dpmpyss.nac.s0" => "__builtin_HEXAGON_M2_dpmpyss_nac_s0",
+ "llvm.hexagon.M2.dpmpyss.rnd.s0" => "__builtin_HEXAGON_M2_dpmpyss_rnd_s0",
+ "llvm.hexagon.M2.dpmpyss.s0" => "__builtin_HEXAGON_M2_dpmpyss_s0",
+ "llvm.hexagon.M2.dpmpyuu.acc.s0" => "__builtin_HEXAGON_M2_dpmpyuu_acc_s0",
+ "llvm.hexagon.M2.dpmpyuu.nac.s0" => "__builtin_HEXAGON_M2_dpmpyuu_nac_s0",
+ "llvm.hexagon.M2.dpmpyuu.s0" => "__builtin_HEXAGON_M2_dpmpyuu_s0",
+ "llvm.hexagon.M2.hmmpyh.rs1" => "__builtin_HEXAGON_M2_hmmpyh_rs1",
+ "llvm.hexagon.M2.hmmpyh.s1" => "__builtin_HEXAGON_M2_hmmpyh_s1",
+ "llvm.hexagon.M2.hmmpyl.rs1" => "__builtin_HEXAGON_M2_hmmpyl_rs1",
+ "llvm.hexagon.M2.hmmpyl.s1" => "__builtin_HEXAGON_M2_hmmpyl_s1",
+ "llvm.hexagon.M2.maci" => "__builtin_HEXAGON_M2_maci",
+ "llvm.hexagon.M2.macsin" => "__builtin_HEXAGON_M2_macsin",
+ "llvm.hexagon.M2.macsip" => "__builtin_HEXAGON_M2_macsip",
+ "llvm.hexagon.M2.mmachs.rs0" => "__builtin_HEXAGON_M2_mmachs_rs0",
+ "llvm.hexagon.M2.mmachs.rs1" => "__builtin_HEXAGON_M2_mmachs_rs1",
+ "llvm.hexagon.M2.mmachs.s0" => "__builtin_HEXAGON_M2_mmachs_s0",
+ "llvm.hexagon.M2.mmachs.s1" => "__builtin_HEXAGON_M2_mmachs_s1",
+ "llvm.hexagon.M2.mmacls.rs0" => "__builtin_HEXAGON_M2_mmacls_rs0",
+ "llvm.hexagon.M2.mmacls.rs1" => "__builtin_HEXAGON_M2_mmacls_rs1",
+ "llvm.hexagon.M2.mmacls.s0" => "__builtin_HEXAGON_M2_mmacls_s0",
+ "llvm.hexagon.M2.mmacls.s1" => "__builtin_HEXAGON_M2_mmacls_s1",
+ "llvm.hexagon.M2.mmacuhs.rs0" => "__builtin_HEXAGON_M2_mmacuhs_rs0",
+ "llvm.hexagon.M2.mmacuhs.rs1" => "__builtin_HEXAGON_M2_mmacuhs_rs1",
+ "llvm.hexagon.M2.mmacuhs.s0" => "__builtin_HEXAGON_M2_mmacuhs_s0",
+ "llvm.hexagon.M2.mmacuhs.s1" => "__builtin_HEXAGON_M2_mmacuhs_s1",
+ "llvm.hexagon.M2.mmaculs.rs0" => "__builtin_HEXAGON_M2_mmaculs_rs0",
+ "llvm.hexagon.M2.mmaculs.rs1" => "__builtin_HEXAGON_M2_mmaculs_rs1",
+ "llvm.hexagon.M2.mmaculs.s0" => "__builtin_HEXAGON_M2_mmaculs_s0",
+ "llvm.hexagon.M2.mmaculs.s1" => "__builtin_HEXAGON_M2_mmaculs_s1",
+ "llvm.hexagon.M2.mmpyh.rs0" => "__builtin_HEXAGON_M2_mmpyh_rs0",
+ "llvm.hexagon.M2.mmpyh.rs1" => "__builtin_HEXAGON_M2_mmpyh_rs1",
+ "llvm.hexagon.M2.mmpyh.s0" => "__builtin_HEXAGON_M2_mmpyh_s0",
+ "llvm.hexagon.M2.mmpyh.s1" => "__builtin_HEXAGON_M2_mmpyh_s1",
+ "llvm.hexagon.M2.mmpyl.rs0" => "__builtin_HEXAGON_M2_mmpyl_rs0",
+ "llvm.hexagon.M2.mmpyl.rs1" => "__builtin_HEXAGON_M2_mmpyl_rs1",
+ "llvm.hexagon.M2.mmpyl.s0" => "__builtin_HEXAGON_M2_mmpyl_s0",
+ "llvm.hexagon.M2.mmpyl.s1" => "__builtin_HEXAGON_M2_mmpyl_s1",
+ "llvm.hexagon.M2.mmpyuh.rs0" => "__builtin_HEXAGON_M2_mmpyuh_rs0",
+ "llvm.hexagon.M2.mmpyuh.rs1" => "__builtin_HEXAGON_M2_mmpyuh_rs1",
+ "llvm.hexagon.M2.mmpyuh.s0" => "__builtin_HEXAGON_M2_mmpyuh_s0",
+ "llvm.hexagon.M2.mmpyuh.s1" => "__builtin_HEXAGON_M2_mmpyuh_s1",
+ "llvm.hexagon.M2.mmpyul.rs0" => "__builtin_HEXAGON_M2_mmpyul_rs0",
+ "llvm.hexagon.M2.mmpyul.rs1" => "__builtin_HEXAGON_M2_mmpyul_rs1",
+ "llvm.hexagon.M2.mmpyul.s0" => "__builtin_HEXAGON_M2_mmpyul_s0",
+ "llvm.hexagon.M2.mmpyul.s1" => "__builtin_HEXAGON_M2_mmpyul_s1",
+ "llvm.hexagon.M2.mpy.acc.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_hh_s0",
+ "llvm.hexagon.M2.mpy.acc.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_hh_s1",
+ "llvm.hexagon.M2.mpy.acc.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_hl_s0",
+ "llvm.hexagon.M2.mpy.acc.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_hl_s1",
+ "llvm.hexagon.M2.mpy.acc.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_lh_s0",
+ "llvm.hexagon.M2.mpy.acc.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_lh_s1",
+ "llvm.hexagon.M2.mpy.acc.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_ll_s0",
+ "llvm.hexagon.M2.mpy.acc.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_ll_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1",
+ "llvm.hexagon.M2.mpy.hh.s0" => "__builtin_HEXAGON_M2_mpy_hh_s0",
+ "llvm.hexagon.M2.mpy.hh.s1" => "__builtin_HEXAGON_M2_mpy_hh_s1",
+ "llvm.hexagon.M2.mpy.hl.s0" => "__builtin_HEXAGON_M2_mpy_hl_s0",
+ "llvm.hexagon.M2.mpy.hl.s1" => "__builtin_HEXAGON_M2_mpy_hl_s1",
+ "llvm.hexagon.M2.mpy.lh.s0" => "__builtin_HEXAGON_M2_mpy_lh_s0",
+ "llvm.hexagon.M2.mpy.lh.s1" => "__builtin_HEXAGON_M2_mpy_lh_s1",
+ "llvm.hexagon.M2.mpy.ll.s0" => "__builtin_HEXAGON_M2_mpy_ll_s0",
+ "llvm.hexagon.M2.mpy.ll.s1" => "__builtin_HEXAGON_M2_mpy_ll_s1",
+ "llvm.hexagon.M2.mpy.nac.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_hh_s0",
+ "llvm.hexagon.M2.mpy.nac.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_hh_s1",
+ "llvm.hexagon.M2.mpy.nac.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_hl_s0",
+ "llvm.hexagon.M2.mpy.nac.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_hl_s1",
+ "llvm.hexagon.M2.mpy.nac.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_lh_s0",
+ "llvm.hexagon.M2.mpy.nac.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_lh_s1",
+ "llvm.hexagon.M2.mpy.nac.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_ll_s0",
+ "llvm.hexagon.M2.mpy.nac.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_ll_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1",
+ "llvm.hexagon.M2.mpy.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s0",
+ "llvm.hexagon.M2.mpy.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s1",
+ "llvm.hexagon.M2.mpy.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s0",
+ "llvm.hexagon.M2.mpy.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s1",
+ "llvm.hexagon.M2.mpy.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s0",
+ "llvm.hexagon.M2.mpy.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s1",
+ "llvm.hexagon.M2.mpy.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s0",
+ "llvm.hexagon.M2.mpy.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s1",
+ "llvm.hexagon.M2.mpy.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_hh_s0",
+ "llvm.hexagon.M2.mpy.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_hh_s1",
+ "llvm.hexagon.M2.mpy.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_hl_s0",
+ "llvm.hexagon.M2.mpy.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_hl_s1",
+ "llvm.hexagon.M2.mpy.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_lh_s0",
+ "llvm.hexagon.M2.mpy.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_lh_s1",
+ "llvm.hexagon.M2.mpy.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_ll_s0",
+ "llvm.hexagon.M2.mpy.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_ll_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1",
+ "llvm.hexagon.M2.mpy.up" => "__builtin_HEXAGON_M2_mpy_up",
+ "llvm.hexagon.M2.mpy.up.s1" => "__builtin_HEXAGON_M2_mpy_up_s1",
+ "llvm.hexagon.M2.mpy.up.s1.sat" => "__builtin_HEXAGON_M2_mpy_up_s1_sat",
+ "llvm.hexagon.M2.mpyd.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s0",
+ "llvm.hexagon.M2.mpyd.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s1",
+ "llvm.hexagon.M2.mpyd.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s0",
+ "llvm.hexagon.M2.mpyd.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s1",
+ "llvm.hexagon.M2.mpyd.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s0",
+ "llvm.hexagon.M2.mpyd.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s1",
+ "llvm.hexagon.M2.mpyd.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s0",
+ "llvm.hexagon.M2.mpyd.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s1",
+ "llvm.hexagon.M2.mpyd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_hh_s0",
+ "llvm.hexagon.M2.mpyd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_hh_s1",
+ "llvm.hexagon.M2.mpyd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_hl_s0",
+ "llvm.hexagon.M2.mpyd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_hl_s1",
+ "llvm.hexagon.M2.mpyd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_lh_s0",
+ "llvm.hexagon.M2.mpyd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_lh_s1",
+ "llvm.hexagon.M2.mpyd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_ll_s0",
+ "llvm.hexagon.M2.mpyd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_ll_s1",
+ "llvm.hexagon.M2.mpyd.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s0",
+ "llvm.hexagon.M2.mpyd.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s1",
+ "llvm.hexagon.M2.mpyd.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s0",
+ "llvm.hexagon.M2.mpyd.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s1",
+ "llvm.hexagon.M2.mpyd.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s0",
+ "llvm.hexagon.M2.mpyd.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s1",
+ "llvm.hexagon.M2.mpyd.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s0",
+ "llvm.hexagon.M2.mpyd.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s1",
+ "llvm.hexagon.M2.mpyd.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s0",
+ "llvm.hexagon.M2.mpyd.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s1",
+ "llvm.hexagon.M2.mpyd.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s0",
+ "llvm.hexagon.M2.mpyd.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s1",
+ "llvm.hexagon.M2.mpyd.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s0",
+ "llvm.hexagon.M2.mpyd.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s1",
+ "llvm.hexagon.M2.mpyd.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s0",
+ "llvm.hexagon.M2.mpyd.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s1",
+ "llvm.hexagon.M2.mpyi" => "__builtin_HEXAGON_M2_mpyi",
+ "llvm.hexagon.M2.mpysmi" => "__builtin_HEXAGON_M2_mpysmi",
+ "llvm.hexagon.M2.mpysu.up" => "__builtin_HEXAGON_M2_mpysu_up",
+ "llvm.hexagon.M2.mpyu.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s0",
+ "llvm.hexagon.M2.mpyu.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s1",
+ "llvm.hexagon.M2.mpyu.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s0",
+ "llvm.hexagon.M2.mpyu.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s1",
+ "llvm.hexagon.M2.mpyu.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s0",
+ "llvm.hexagon.M2.mpyu.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s1",
+ "llvm.hexagon.M2.mpyu.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s0",
+ "llvm.hexagon.M2.mpyu.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s1",
+ "llvm.hexagon.M2.mpyu.hh.s0" => "__builtin_HEXAGON_M2_mpyu_hh_s0",
+ "llvm.hexagon.M2.mpyu.hh.s1" => "__builtin_HEXAGON_M2_mpyu_hh_s1",
+ "llvm.hexagon.M2.mpyu.hl.s0" => "__builtin_HEXAGON_M2_mpyu_hl_s0",
+ "llvm.hexagon.M2.mpyu.hl.s1" => "__builtin_HEXAGON_M2_mpyu_hl_s1",
+ "llvm.hexagon.M2.mpyu.lh.s0" => "__builtin_HEXAGON_M2_mpyu_lh_s0",
+ "llvm.hexagon.M2.mpyu.lh.s1" => "__builtin_HEXAGON_M2_mpyu_lh_s1",
+ "llvm.hexagon.M2.mpyu.ll.s0" => "__builtin_HEXAGON_M2_mpyu_ll_s0",
+ "llvm.hexagon.M2.mpyu.ll.s1" => "__builtin_HEXAGON_M2_mpyu_ll_s1",
+ "llvm.hexagon.M2.mpyu.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s0",
+ "llvm.hexagon.M2.mpyu.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s1",
+ "llvm.hexagon.M2.mpyu.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s0",
+ "llvm.hexagon.M2.mpyu.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s1",
+ "llvm.hexagon.M2.mpyu.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s0",
+ "llvm.hexagon.M2.mpyu.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s1",
+ "llvm.hexagon.M2.mpyu.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s0",
+ "llvm.hexagon.M2.mpyu.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s1",
+ "llvm.hexagon.M2.mpyu.up" => "__builtin_HEXAGON_M2_mpyu_up",
+ "llvm.hexagon.M2.mpyud.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s0",
+ "llvm.hexagon.M2.mpyud.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s1",
+ "llvm.hexagon.M2.mpyud.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s0",
+ "llvm.hexagon.M2.mpyud.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s1",
+ "llvm.hexagon.M2.mpyud.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s0",
+ "llvm.hexagon.M2.mpyud.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s1",
+ "llvm.hexagon.M2.mpyud.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s0",
+ "llvm.hexagon.M2.mpyud.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s1",
+ "llvm.hexagon.M2.mpyud.hh.s0" => "__builtin_HEXAGON_M2_mpyud_hh_s0",
+ "llvm.hexagon.M2.mpyud.hh.s1" => "__builtin_HEXAGON_M2_mpyud_hh_s1",
+ "llvm.hexagon.M2.mpyud.hl.s0" => "__builtin_HEXAGON_M2_mpyud_hl_s0",
+ "llvm.hexagon.M2.mpyud.hl.s1" => "__builtin_HEXAGON_M2_mpyud_hl_s1",
+ "llvm.hexagon.M2.mpyud.lh.s0" => "__builtin_HEXAGON_M2_mpyud_lh_s0",
+ "llvm.hexagon.M2.mpyud.lh.s1" => "__builtin_HEXAGON_M2_mpyud_lh_s1",
+ "llvm.hexagon.M2.mpyud.ll.s0" => "__builtin_HEXAGON_M2_mpyud_ll_s0",
+ "llvm.hexagon.M2.mpyud.ll.s1" => "__builtin_HEXAGON_M2_mpyud_ll_s1",
+ "llvm.hexagon.M2.mpyud.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s0",
+ "llvm.hexagon.M2.mpyud.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s1",
+ "llvm.hexagon.M2.mpyud.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s0",
+ "llvm.hexagon.M2.mpyud.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s1",
+ "llvm.hexagon.M2.mpyud.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s0",
+ "llvm.hexagon.M2.mpyud.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s1",
+ "llvm.hexagon.M2.mpyud.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s0",
+ "llvm.hexagon.M2.mpyud.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s1",
+ "llvm.hexagon.M2.mpyui" => "__builtin_HEXAGON_M2_mpyui",
+ "llvm.hexagon.M2.nacci" => "__builtin_HEXAGON_M2_nacci",
+ "llvm.hexagon.M2.naccii" => "__builtin_HEXAGON_M2_naccii",
+ "llvm.hexagon.M2.subacc" => "__builtin_HEXAGON_M2_subacc",
+ "llvm.hexagon.M2.vabsdiffh" => "__builtin_HEXAGON_M2_vabsdiffh",
+ "llvm.hexagon.M2.vabsdiffw" => "__builtin_HEXAGON_M2_vabsdiffw",
+ "llvm.hexagon.M2.vcmac.s0.sat.i" => "__builtin_HEXAGON_M2_vcmac_s0_sat_i",
+ "llvm.hexagon.M2.vcmac.s0.sat.r" => "__builtin_HEXAGON_M2_vcmac_s0_sat_r",
+ "llvm.hexagon.M2.vcmpy.s0.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_i",
+ "llvm.hexagon.M2.vcmpy.s0.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_r",
+ "llvm.hexagon.M2.vcmpy.s1.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_i",
+ "llvm.hexagon.M2.vcmpy.s1.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_r",
+ "llvm.hexagon.M2.vdmacs.s0" => "__builtin_HEXAGON_M2_vdmacs_s0",
+ "llvm.hexagon.M2.vdmacs.s1" => "__builtin_HEXAGON_M2_vdmacs_s1",
+ "llvm.hexagon.M2.vdmpyrs.s0" => "__builtin_HEXAGON_M2_vdmpyrs_s0",
+ "llvm.hexagon.M2.vdmpyrs.s1" => "__builtin_HEXAGON_M2_vdmpyrs_s1",
+ "llvm.hexagon.M2.vdmpys.s0" => "__builtin_HEXAGON_M2_vdmpys_s0",
+ "llvm.hexagon.M2.vdmpys.s1" => "__builtin_HEXAGON_M2_vdmpys_s1",
+ "llvm.hexagon.M2.vmac2" => "__builtin_HEXAGON_M2_vmac2",
+ "llvm.hexagon.M2.vmac2es" => "__builtin_HEXAGON_M2_vmac2es",
+ "llvm.hexagon.M2.vmac2es.s0" => "__builtin_HEXAGON_M2_vmac2es_s0",
+ "llvm.hexagon.M2.vmac2es.s1" => "__builtin_HEXAGON_M2_vmac2es_s1",
+ "llvm.hexagon.M2.vmac2s.s0" => "__builtin_HEXAGON_M2_vmac2s_s0",
+ "llvm.hexagon.M2.vmac2s.s1" => "__builtin_HEXAGON_M2_vmac2s_s1",
+ "llvm.hexagon.M2.vmac2su.s0" => "__builtin_HEXAGON_M2_vmac2su_s0",
+ "llvm.hexagon.M2.vmac2su.s1" => "__builtin_HEXAGON_M2_vmac2su_s1",
+ "llvm.hexagon.M2.vmpy2es.s0" => "__builtin_HEXAGON_M2_vmpy2es_s0",
+ "llvm.hexagon.M2.vmpy2es.s1" => "__builtin_HEXAGON_M2_vmpy2es_s1",
+ "llvm.hexagon.M2.vmpy2s.s0" => "__builtin_HEXAGON_M2_vmpy2s_s0",
+ "llvm.hexagon.M2.vmpy2s.s0pack" => "__builtin_HEXAGON_M2_vmpy2s_s0pack",
+ "llvm.hexagon.M2.vmpy2s.s1" => "__builtin_HEXAGON_M2_vmpy2s_s1",
+ "llvm.hexagon.M2.vmpy2s.s1pack" => "__builtin_HEXAGON_M2_vmpy2s_s1pack",
+ "llvm.hexagon.M2.vmpy2su.s0" => "__builtin_HEXAGON_M2_vmpy2su_s0",
+ "llvm.hexagon.M2.vmpy2su.s1" => "__builtin_HEXAGON_M2_vmpy2su_s1",
+ "llvm.hexagon.M2.vraddh" => "__builtin_HEXAGON_M2_vraddh",
+ "llvm.hexagon.M2.vradduh" => "__builtin_HEXAGON_M2_vradduh",
+ "llvm.hexagon.M2.vrcmaci.s0" => "__builtin_HEXAGON_M2_vrcmaci_s0",
+ "llvm.hexagon.M2.vrcmaci.s0c" => "__builtin_HEXAGON_M2_vrcmaci_s0c",
+ "llvm.hexagon.M2.vrcmacr.s0" => "__builtin_HEXAGON_M2_vrcmacr_s0",
+ "llvm.hexagon.M2.vrcmacr.s0c" => "__builtin_HEXAGON_M2_vrcmacr_s0c",
+ "llvm.hexagon.M2.vrcmpyi.s0" => "__builtin_HEXAGON_M2_vrcmpyi_s0",
+ "llvm.hexagon.M2.vrcmpyi.s0c" => "__builtin_HEXAGON_M2_vrcmpyi_s0c",
+ "llvm.hexagon.M2.vrcmpyr.s0" => "__builtin_HEXAGON_M2_vrcmpyr_s0",
+ "llvm.hexagon.M2.vrcmpyr.s0c" => "__builtin_HEXAGON_M2_vrcmpyr_s0c",
+ "llvm.hexagon.M2.vrcmpys.acc.s1" => "__builtin_HEXAGON_M2_vrcmpys_acc_s1",
+ "llvm.hexagon.M2.vrcmpys.s1" => "__builtin_HEXAGON_M2_vrcmpys_s1",
+ "llvm.hexagon.M2.vrcmpys.s1rp" => "__builtin_HEXAGON_M2_vrcmpys_s1rp",
+ "llvm.hexagon.M2.vrmac.s0" => "__builtin_HEXAGON_M2_vrmac_s0",
+ "llvm.hexagon.M2.vrmpy.s0" => "__builtin_HEXAGON_M2_vrmpy_s0",
+ "llvm.hexagon.M2.xor.xacc" => "__builtin_HEXAGON_M2_xor_xacc",
+ "llvm.hexagon.M4.and.and" => "__builtin_HEXAGON_M4_and_and",
+ "llvm.hexagon.M4.and.andn" => "__builtin_HEXAGON_M4_and_andn",
+ "llvm.hexagon.M4.and.or" => "__builtin_HEXAGON_M4_and_or",
+ "llvm.hexagon.M4.and.xor" => "__builtin_HEXAGON_M4_and_xor",
+ "llvm.hexagon.M4.cmpyi.wh" => "__builtin_HEXAGON_M4_cmpyi_wh",
+ "llvm.hexagon.M4.cmpyi.whc" => "__builtin_HEXAGON_M4_cmpyi_whc",
+ "llvm.hexagon.M4.cmpyr.wh" => "__builtin_HEXAGON_M4_cmpyr_wh",
+ "llvm.hexagon.M4.cmpyr.whc" => "__builtin_HEXAGON_M4_cmpyr_whc",
+ "llvm.hexagon.M4.mac.up.s1.sat" => "__builtin_HEXAGON_M4_mac_up_s1_sat",
+ "llvm.hexagon.M4.mpyri.addi" => "__builtin_HEXAGON_M4_mpyri_addi",
+ "llvm.hexagon.M4.mpyri.addr" => "__builtin_HEXAGON_M4_mpyri_addr",
+ "llvm.hexagon.M4.mpyri.addr.u2" => "__builtin_HEXAGON_M4_mpyri_addr_u2",
+ "llvm.hexagon.M4.mpyrr.addi" => "__builtin_HEXAGON_M4_mpyrr_addi",
+ "llvm.hexagon.M4.mpyrr.addr" => "__builtin_HEXAGON_M4_mpyrr_addr",
+ "llvm.hexagon.M4.nac.up.s1.sat" => "__builtin_HEXAGON_M4_nac_up_s1_sat",
+ "llvm.hexagon.M4.or.and" => "__builtin_HEXAGON_M4_or_and",
+ "llvm.hexagon.M4.or.andn" => "__builtin_HEXAGON_M4_or_andn",
+ "llvm.hexagon.M4.or.or" => "__builtin_HEXAGON_M4_or_or",
+ "llvm.hexagon.M4.or.xor" => "__builtin_HEXAGON_M4_or_xor",
+ "llvm.hexagon.M4.pmpyw" => "__builtin_HEXAGON_M4_pmpyw",
+ "llvm.hexagon.M4.pmpyw.acc" => "__builtin_HEXAGON_M4_pmpyw_acc",
+ "llvm.hexagon.M4.vpmpyh" => "__builtin_HEXAGON_M4_vpmpyh",
+ "llvm.hexagon.M4.vpmpyh.acc" => "__builtin_HEXAGON_M4_vpmpyh_acc",
+ "llvm.hexagon.M4.vrmpyeh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s0",
+ "llvm.hexagon.M4.vrmpyeh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s1",
+ "llvm.hexagon.M4.vrmpyeh.s0" => "__builtin_HEXAGON_M4_vrmpyeh_s0",
+ "llvm.hexagon.M4.vrmpyeh.s1" => "__builtin_HEXAGON_M4_vrmpyeh_s1",
+ "llvm.hexagon.M4.vrmpyoh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s0",
+ "llvm.hexagon.M4.vrmpyoh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s1",
+ "llvm.hexagon.M4.vrmpyoh.s0" => "__builtin_HEXAGON_M4_vrmpyoh_s0",
+ "llvm.hexagon.M4.vrmpyoh.s1" => "__builtin_HEXAGON_M4_vrmpyoh_s1",
+ "llvm.hexagon.M4.xor.and" => "__builtin_HEXAGON_M4_xor_and",
+ "llvm.hexagon.M4.xor.andn" => "__builtin_HEXAGON_M4_xor_andn",
+ "llvm.hexagon.M4.xor.or" => "__builtin_HEXAGON_M4_xor_or",
+ "llvm.hexagon.M4.xor.xacc" => "__builtin_HEXAGON_M4_xor_xacc",
+ "llvm.hexagon.M5.vdmacbsu" => "__builtin_HEXAGON_M5_vdmacbsu",
+ "llvm.hexagon.M5.vdmpybsu" => "__builtin_HEXAGON_M5_vdmpybsu",
+ "llvm.hexagon.M5.vmacbsu" => "__builtin_HEXAGON_M5_vmacbsu",
+ "llvm.hexagon.M5.vmacbuu" => "__builtin_HEXAGON_M5_vmacbuu",
+ "llvm.hexagon.M5.vmpybsu" => "__builtin_HEXAGON_M5_vmpybsu",
+ "llvm.hexagon.M5.vmpybuu" => "__builtin_HEXAGON_M5_vmpybuu",
+ "llvm.hexagon.M5.vrmacbsu" => "__builtin_HEXAGON_M5_vrmacbsu",
+ "llvm.hexagon.M5.vrmacbuu" => "__builtin_HEXAGON_M5_vrmacbuu",
+ "llvm.hexagon.M5.vrmpybsu" => "__builtin_HEXAGON_M5_vrmpybsu",
+ "llvm.hexagon.M5.vrmpybuu" => "__builtin_HEXAGON_M5_vrmpybuu",
+ "llvm.hexagon.M6.vabsdiffb" => "__builtin_HEXAGON_M6_vabsdiffb",
+ "llvm.hexagon.M6.vabsdiffub" => "__builtin_HEXAGON_M6_vabsdiffub",
+ "llvm.hexagon.S2.addasl.rrri" => "__builtin_HEXAGON_S2_addasl_rrri",
+ "llvm.hexagon.S2.asl.i.p" => "__builtin_HEXAGON_S2_asl_i_p",
+ "llvm.hexagon.S2.asl.i.p.acc" => "__builtin_HEXAGON_S2_asl_i_p_acc",
+ "llvm.hexagon.S2.asl.i.p.and" => "__builtin_HEXAGON_S2_asl_i_p_and",
+ "llvm.hexagon.S2.asl.i.p.nac" => "__builtin_HEXAGON_S2_asl_i_p_nac",
+ "llvm.hexagon.S2.asl.i.p.or" => "__builtin_HEXAGON_S2_asl_i_p_or",
+ "llvm.hexagon.S2.asl.i.p.xacc" => "__builtin_HEXAGON_S2_asl_i_p_xacc",
+ "llvm.hexagon.S2.asl.i.r" => "__builtin_HEXAGON_S2_asl_i_r",
+ "llvm.hexagon.S2.asl.i.r.acc" => "__builtin_HEXAGON_S2_asl_i_r_acc",
+ "llvm.hexagon.S2.asl.i.r.and" => "__builtin_HEXAGON_S2_asl_i_r_and",
+ "llvm.hexagon.S2.asl.i.r.nac" => "__builtin_HEXAGON_S2_asl_i_r_nac",
+ "llvm.hexagon.S2.asl.i.r.or" => "__builtin_HEXAGON_S2_asl_i_r_or",
+ "llvm.hexagon.S2.asl.i.r.sat" => "__builtin_HEXAGON_S2_asl_i_r_sat",
+ "llvm.hexagon.S2.asl.i.r.xacc" => "__builtin_HEXAGON_S2_asl_i_r_xacc",
+ "llvm.hexagon.S2.asl.i.vh" => "__builtin_HEXAGON_S2_asl_i_vh",
+ "llvm.hexagon.S2.asl.i.vw" => "__builtin_HEXAGON_S2_asl_i_vw",
+ "llvm.hexagon.S2.asl.r.p" => "__builtin_HEXAGON_S2_asl_r_p",
+ "llvm.hexagon.S2.asl.r.p.acc" => "__builtin_HEXAGON_S2_asl_r_p_acc",
+ "llvm.hexagon.S2.asl.r.p.and" => "__builtin_HEXAGON_S2_asl_r_p_and",
+ "llvm.hexagon.S2.asl.r.p.nac" => "__builtin_HEXAGON_S2_asl_r_p_nac",
+ "llvm.hexagon.S2.asl.r.p.or" => "__builtin_HEXAGON_S2_asl_r_p_or",
+ "llvm.hexagon.S2.asl.r.p.xor" => "__builtin_HEXAGON_S2_asl_r_p_xor",
+ "llvm.hexagon.S2.asl.r.r" => "__builtin_HEXAGON_S2_asl_r_r",
+ "llvm.hexagon.S2.asl.r.r.acc" => "__builtin_HEXAGON_S2_asl_r_r_acc",
+ "llvm.hexagon.S2.asl.r.r.and" => "__builtin_HEXAGON_S2_asl_r_r_and",
+ "llvm.hexagon.S2.asl.r.r.nac" => "__builtin_HEXAGON_S2_asl_r_r_nac",
+ "llvm.hexagon.S2.asl.r.r.or" => "__builtin_HEXAGON_S2_asl_r_r_or",
+ "llvm.hexagon.S2.asl.r.r.sat" => "__builtin_HEXAGON_S2_asl_r_r_sat",
+ "llvm.hexagon.S2.asl.r.vh" => "__builtin_HEXAGON_S2_asl_r_vh",
+ "llvm.hexagon.S2.asl.r.vw" => "__builtin_HEXAGON_S2_asl_r_vw",
+ "llvm.hexagon.S2.asr.i.p" => "__builtin_HEXAGON_S2_asr_i_p",
+ "llvm.hexagon.S2.asr.i.p.acc" => "__builtin_HEXAGON_S2_asr_i_p_acc",
+ "llvm.hexagon.S2.asr.i.p.and" => "__builtin_HEXAGON_S2_asr_i_p_and",
+ "llvm.hexagon.S2.asr.i.p.nac" => "__builtin_HEXAGON_S2_asr_i_p_nac",
+ "llvm.hexagon.S2.asr.i.p.or" => "__builtin_HEXAGON_S2_asr_i_p_or",
+ "llvm.hexagon.S2.asr.i.p.rnd" => "__builtin_HEXAGON_S2_asr_i_p_rnd",
+ "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax",
+ "llvm.hexagon.S2.asr.i.r" => "__builtin_HEXAGON_S2_asr_i_r",
+ "llvm.hexagon.S2.asr.i.r.acc" => "__builtin_HEXAGON_S2_asr_i_r_acc",
+ "llvm.hexagon.S2.asr.i.r.and" => "__builtin_HEXAGON_S2_asr_i_r_and",
+ "llvm.hexagon.S2.asr.i.r.nac" => "__builtin_HEXAGON_S2_asr_i_r_nac",
+ "llvm.hexagon.S2.asr.i.r.or" => "__builtin_HEXAGON_S2_asr_i_r_or",
+ "llvm.hexagon.S2.asr.i.r.rnd" => "__builtin_HEXAGON_S2_asr_i_r_rnd",
+ "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax",
+ "llvm.hexagon.S2.asr.i.svw.trun" => "__builtin_HEXAGON_S2_asr_i_svw_trun",
+ "llvm.hexagon.S2.asr.i.vh" => "__builtin_HEXAGON_S2_asr_i_vh",
+ "llvm.hexagon.S2.asr.i.vw" => "__builtin_HEXAGON_S2_asr_i_vw",
+ "llvm.hexagon.S2.asr.r.p" => "__builtin_HEXAGON_S2_asr_r_p",
+ "llvm.hexagon.S2.asr.r.p.acc" => "__builtin_HEXAGON_S2_asr_r_p_acc",
+ "llvm.hexagon.S2.asr.r.p.and" => "__builtin_HEXAGON_S2_asr_r_p_and",
+ "llvm.hexagon.S2.asr.r.p.nac" => "__builtin_HEXAGON_S2_asr_r_p_nac",
+ "llvm.hexagon.S2.asr.r.p.or" => "__builtin_HEXAGON_S2_asr_r_p_or",
+ "llvm.hexagon.S2.asr.r.p.xor" => "__builtin_HEXAGON_S2_asr_r_p_xor",
+ "llvm.hexagon.S2.asr.r.r" => "__builtin_HEXAGON_S2_asr_r_r",
+ "llvm.hexagon.S2.asr.r.r.acc" => "__builtin_HEXAGON_S2_asr_r_r_acc",
+ "llvm.hexagon.S2.asr.r.r.and" => "__builtin_HEXAGON_S2_asr_r_r_and",
+ "llvm.hexagon.S2.asr.r.r.nac" => "__builtin_HEXAGON_S2_asr_r_r_nac",
+ "llvm.hexagon.S2.asr.r.r.or" => "__builtin_HEXAGON_S2_asr_r_r_or",
+ "llvm.hexagon.S2.asr.r.r.sat" => "__builtin_HEXAGON_S2_asr_r_r_sat",
+ "llvm.hexagon.S2.asr.r.svw.trun" => "__builtin_HEXAGON_S2_asr_r_svw_trun",
+ "llvm.hexagon.S2.asr.r.vh" => "__builtin_HEXAGON_S2_asr_r_vh",
+ "llvm.hexagon.S2.asr.r.vw" => "__builtin_HEXAGON_S2_asr_r_vw",
+ "llvm.hexagon.S2.brev" => "__builtin_HEXAGON_S2_brev",
+ "llvm.hexagon.S2.brevp" => "__builtin_HEXAGON_S2_brevp",
+ "llvm.hexagon.S2.cabacencbin" => "__builtin_HEXAGON_S2_cabacencbin",
+ "llvm.hexagon.S2.cl0" => "__builtin_HEXAGON_S2_cl0",
+ "llvm.hexagon.S2.cl0p" => "__builtin_HEXAGON_S2_cl0p",
+ "llvm.hexagon.S2.cl1" => "__builtin_HEXAGON_S2_cl1",
+ "llvm.hexagon.S2.cl1p" => "__builtin_HEXAGON_S2_cl1p",
+ "llvm.hexagon.S2.clb" => "__builtin_HEXAGON_S2_clb",
+ "llvm.hexagon.S2.clbnorm" => "__builtin_HEXAGON_S2_clbnorm",
+ "llvm.hexagon.S2.clbp" => "__builtin_HEXAGON_S2_clbp",
+ "llvm.hexagon.S2.clrbit.i" => "__builtin_HEXAGON_S2_clrbit_i",
+ "llvm.hexagon.S2.clrbit.r" => "__builtin_HEXAGON_S2_clrbit_r",
+ "llvm.hexagon.S2.ct0" => "__builtin_HEXAGON_S2_ct0",
+ "llvm.hexagon.S2.ct0p" => "__builtin_HEXAGON_S2_ct0p",
+ "llvm.hexagon.S2.ct1" => "__builtin_HEXAGON_S2_ct1",
+ "llvm.hexagon.S2.ct1p" => "__builtin_HEXAGON_S2_ct1p",
+ "llvm.hexagon.S2.deinterleave" => "__builtin_HEXAGON_S2_deinterleave",
+ "llvm.hexagon.S2.extractu" => "__builtin_HEXAGON_S2_extractu",
+ "llvm.hexagon.S2.extractu.rp" => "__builtin_HEXAGON_S2_extractu_rp",
+ "llvm.hexagon.S2.extractup" => "__builtin_HEXAGON_S2_extractup",
+ "llvm.hexagon.S2.extractup.rp" => "__builtin_HEXAGON_S2_extractup_rp",
+ "llvm.hexagon.S2.insert" => "__builtin_HEXAGON_S2_insert",
+ "llvm.hexagon.S2.insert.rp" => "__builtin_HEXAGON_S2_insert_rp",
+ "llvm.hexagon.S2.insertp" => "__builtin_HEXAGON_S2_insertp",
+ "llvm.hexagon.S2.insertp.rp" => "__builtin_HEXAGON_S2_insertp_rp",
+ "llvm.hexagon.S2.interleave" => "__builtin_HEXAGON_S2_interleave",
+ "llvm.hexagon.S2.lfsp" => "__builtin_HEXAGON_S2_lfsp",
+ "llvm.hexagon.S2.lsl.r.p" => "__builtin_HEXAGON_S2_lsl_r_p",
+ "llvm.hexagon.S2.lsl.r.p.acc" => "__builtin_HEXAGON_S2_lsl_r_p_acc",
+ "llvm.hexagon.S2.lsl.r.p.and" => "__builtin_HEXAGON_S2_lsl_r_p_and",
+ "llvm.hexagon.S2.lsl.r.p.nac" => "__builtin_HEXAGON_S2_lsl_r_p_nac",
+ "llvm.hexagon.S2.lsl.r.p.or" => "__builtin_HEXAGON_S2_lsl_r_p_or",
+ "llvm.hexagon.S2.lsl.r.p.xor" => "__builtin_HEXAGON_S2_lsl_r_p_xor",
+ "llvm.hexagon.S2.lsl.r.r" => "__builtin_HEXAGON_S2_lsl_r_r",
+ "llvm.hexagon.S2.lsl.r.r.acc" => "__builtin_HEXAGON_S2_lsl_r_r_acc",
+ "llvm.hexagon.S2.lsl.r.r.and" => "__builtin_HEXAGON_S2_lsl_r_r_and",
+ "llvm.hexagon.S2.lsl.r.r.nac" => "__builtin_HEXAGON_S2_lsl_r_r_nac",
+ "llvm.hexagon.S2.lsl.r.r.or" => "__builtin_HEXAGON_S2_lsl_r_r_or",
+ "llvm.hexagon.S2.lsl.r.vh" => "__builtin_HEXAGON_S2_lsl_r_vh",
+ "llvm.hexagon.S2.lsl.r.vw" => "__builtin_HEXAGON_S2_lsl_r_vw",
+ "llvm.hexagon.S2.lsr.i.p" => "__builtin_HEXAGON_S2_lsr_i_p",
+ "llvm.hexagon.S2.lsr.i.p.acc" => "__builtin_HEXAGON_S2_lsr_i_p_acc",
+ "llvm.hexagon.S2.lsr.i.p.and" => "__builtin_HEXAGON_S2_lsr_i_p_and",
+ "llvm.hexagon.S2.lsr.i.p.nac" => "__builtin_HEXAGON_S2_lsr_i_p_nac",
+ "llvm.hexagon.S2.lsr.i.p.or" => "__builtin_HEXAGON_S2_lsr_i_p_or",
+ "llvm.hexagon.S2.lsr.i.p.xacc" => "__builtin_HEXAGON_S2_lsr_i_p_xacc",
+ "llvm.hexagon.S2.lsr.i.r" => "__builtin_HEXAGON_S2_lsr_i_r",
+ "llvm.hexagon.S2.lsr.i.r.acc" => "__builtin_HEXAGON_S2_lsr_i_r_acc",
+ "llvm.hexagon.S2.lsr.i.r.and" => "__builtin_HEXAGON_S2_lsr_i_r_and",
+ "llvm.hexagon.S2.lsr.i.r.nac" => "__builtin_HEXAGON_S2_lsr_i_r_nac",
+ "llvm.hexagon.S2.lsr.i.r.or" => "__builtin_HEXAGON_S2_lsr_i_r_or",
+ "llvm.hexagon.S2.lsr.i.r.xacc" => "__builtin_HEXAGON_S2_lsr_i_r_xacc",
+ "llvm.hexagon.S2.lsr.i.vh" => "__builtin_HEXAGON_S2_lsr_i_vh",
+ "llvm.hexagon.S2.lsr.i.vw" => "__builtin_HEXAGON_S2_lsr_i_vw",
+ "llvm.hexagon.S2.lsr.r.p" => "__builtin_HEXAGON_S2_lsr_r_p",
+ "llvm.hexagon.S2.lsr.r.p.acc" => "__builtin_HEXAGON_S2_lsr_r_p_acc",
+ "llvm.hexagon.S2.lsr.r.p.and" => "__builtin_HEXAGON_S2_lsr_r_p_and",
+ "llvm.hexagon.S2.lsr.r.p.nac" => "__builtin_HEXAGON_S2_lsr_r_p_nac",
+ "llvm.hexagon.S2.lsr.r.p.or" => "__builtin_HEXAGON_S2_lsr_r_p_or",
+ "llvm.hexagon.S2.lsr.r.p.xor" => "__builtin_HEXAGON_S2_lsr_r_p_xor",
+ "llvm.hexagon.S2.lsr.r.r" => "__builtin_HEXAGON_S2_lsr_r_r",
+ "llvm.hexagon.S2.lsr.r.r.acc" => "__builtin_HEXAGON_S2_lsr_r_r_acc",
+ "llvm.hexagon.S2.lsr.r.r.and" => "__builtin_HEXAGON_S2_lsr_r_r_and",
+ "llvm.hexagon.S2.lsr.r.r.nac" => "__builtin_HEXAGON_S2_lsr_r_r_nac",
+ "llvm.hexagon.S2.lsr.r.r.or" => "__builtin_HEXAGON_S2_lsr_r_r_or",
+ "llvm.hexagon.S2.lsr.r.vh" => "__builtin_HEXAGON_S2_lsr_r_vh",
+ "llvm.hexagon.S2.lsr.r.vw" => "__builtin_HEXAGON_S2_lsr_r_vw",
+ "llvm.hexagon.S2.packhl" => "__builtin_HEXAGON_S2_packhl",
+ "llvm.hexagon.S2.parityp" => "__builtin_HEXAGON_S2_parityp",
+ "llvm.hexagon.S2.setbit.i" => "__builtin_HEXAGON_S2_setbit_i",
+ "llvm.hexagon.S2.setbit.r" => "__builtin_HEXAGON_S2_setbit_r",
+ "llvm.hexagon.S2.shuffeb" => "__builtin_HEXAGON_S2_shuffeb",
+ "llvm.hexagon.S2.shuffeh" => "__builtin_HEXAGON_S2_shuffeh",
+ "llvm.hexagon.S2.shuffob" => "__builtin_HEXAGON_S2_shuffob",
+ "llvm.hexagon.S2.shuffoh" => "__builtin_HEXAGON_S2_shuffoh",
+ "llvm.hexagon.S2.svsathb" => "__builtin_HEXAGON_S2_svsathb",
+ "llvm.hexagon.S2.svsathub" => "__builtin_HEXAGON_S2_svsathub",
+ "llvm.hexagon.S2.tableidxb.goodsyntax" => "__builtin_HEXAGON_S2_tableidxb_goodsyntax",
+ "llvm.hexagon.S2.tableidxd.goodsyntax" => "__builtin_HEXAGON_S2_tableidxd_goodsyntax",
+ "llvm.hexagon.S2.tableidxh.goodsyntax" => "__builtin_HEXAGON_S2_tableidxh_goodsyntax",
+ "llvm.hexagon.S2.tableidxw.goodsyntax" => "__builtin_HEXAGON_S2_tableidxw_goodsyntax",
+ "llvm.hexagon.S2.togglebit.i" => "__builtin_HEXAGON_S2_togglebit_i",
+ "llvm.hexagon.S2.togglebit.r" => "__builtin_HEXAGON_S2_togglebit_r",
+ "llvm.hexagon.S2.tstbit.i" => "__builtin_HEXAGON_S2_tstbit_i",
+ "llvm.hexagon.S2.tstbit.r" => "__builtin_HEXAGON_S2_tstbit_r",
+ "llvm.hexagon.S2.valignib" => "__builtin_HEXAGON_S2_valignib",
+ "llvm.hexagon.S2.valignrb" => "__builtin_HEXAGON_S2_valignrb",
+ "llvm.hexagon.S2.vcnegh" => "__builtin_HEXAGON_S2_vcnegh",
+ "llvm.hexagon.S2.vcrotate" => "__builtin_HEXAGON_S2_vcrotate",
+ "llvm.hexagon.S2.vrcnegh" => "__builtin_HEXAGON_S2_vrcnegh",
+ "llvm.hexagon.S2.vrndpackwh" => "__builtin_HEXAGON_S2_vrndpackwh",
+ "llvm.hexagon.S2.vrndpackwhs" => "__builtin_HEXAGON_S2_vrndpackwhs",
+ "llvm.hexagon.S2.vsathb" => "__builtin_HEXAGON_S2_vsathb",
+ "llvm.hexagon.S2.vsathb.nopack" => "__builtin_HEXAGON_S2_vsathb_nopack",
+ "llvm.hexagon.S2.vsathub" => "__builtin_HEXAGON_S2_vsathub",
+ "llvm.hexagon.S2.vsathub.nopack" => "__builtin_HEXAGON_S2_vsathub_nopack",
+ "llvm.hexagon.S2.vsatwh" => "__builtin_HEXAGON_S2_vsatwh",
+ "llvm.hexagon.S2.vsatwh.nopack" => "__builtin_HEXAGON_S2_vsatwh_nopack",
+ "llvm.hexagon.S2.vsatwuh" => "__builtin_HEXAGON_S2_vsatwuh",
+ "llvm.hexagon.S2.vsatwuh.nopack" => "__builtin_HEXAGON_S2_vsatwuh_nopack",
+ "llvm.hexagon.S2.vsplatrb" => "__builtin_HEXAGON_S2_vsplatrb",
+ "llvm.hexagon.S2.vsplatrh" => "__builtin_HEXAGON_S2_vsplatrh",
+ "llvm.hexagon.S2.vspliceib" => "__builtin_HEXAGON_S2_vspliceib",
+ "llvm.hexagon.S2.vsplicerb" => "__builtin_HEXAGON_S2_vsplicerb",
+ "llvm.hexagon.S2.vsxtbh" => "__builtin_HEXAGON_S2_vsxtbh",
+ "llvm.hexagon.S2.vsxthw" => "__builtin_HEXAGON_S2_vsxthw",
+ "llvm.hexagon.S2.vtrunehb" => "__builtin_HEXAGON_S2_vtrunehb",
+ "llvm.hexagon.S2.vtrunewh" => "__builtin_HEXAGON_S2_vtrunewh",
+ "llvm.hexagon.S2.vtrunohb" => "__builtin_HEXAGON_S2_vtrunohb",
+ "llvm.hexagon.S2.vtrunowh" => "__builtin_HEXAGON_S2_vtrunowh",
+ "llvm.hexagon.S2.vzxtbh" => "__builtin_HEXAGON_S2_vzxtbh",
+ "llvm.hexagon.S2.vzxthw" => "__builtin_HEXAGON_S2_vzxthw",
+ "llvm.hexagon.S4.addaddi" => "__builtin_HEXAGON_S4_addaddi",
+ "llvm.hexagon.S4.addi.asl.ri" => "__builtin_HEXAGON_S4_addi_asl_ri",
+ "llvm.hexagon.S4.addi.lsr.ri" => "__builtin_HEXAGON_S4_addi_lsr_ri",
+ "llvm.hexagon.S4.andi.asl.ri" => "__builtin_HEXAGON_S4_andi_asl_ri",
+ "llvm.hexagon.S4.andi.lsr.ri" => "__builtin_HEXAGON_S4_andi_lsr_ri",
+ "llvm.hexagon.S4.clbaddi" => "__builtin_HEXAGON_S4_clbaddi",
+ "llvm.hexagon.S4.clbpaddi" => "__builtin_HEXAGON_S4_clbpaddi",
+ "llvm.hexagon.S4.clbpnorm" => "__builtin_HEXAGON_S4_clbpnorm",
+ "llvm.hexagon.S4.extract" => "__builtin_HEXAGON_S4_extract",
+ "llvm.hexagon.S4.extract.rp" => "__builtin_HEXAGON_S4_extract_rp",
+ "llvm.hexagon.S4.extractp" => "__builtin_HEXAGON_S4_extractp",
+ "llvm.hexagon.S4.extractp.rp" => "__builtin_HEXAGON_S4_extractp_rp",
+ "llvm.hexagon.S4.lsli" => "__builtin_HEXAGON_S4_lsli",
+ "llvm.hexagon.S4.ntstbit.i" => "__builtin_HEXAGON_S4_ntstbit_i",
+ "llvm.hexagon.S4.ntstbit.r" => "__builtin_HEXAGON_S4_ntstbit_r",
+ "llvm.hexagon.S4.or.andi" => "__builtin_HEXAGON_S4_or_andi",
+ "llvm.hexagon.S4.or.andix" => "__builtin_HEXAGON_S4_or_andix",
+ "llvm.hexagon.S4.or.ori" => "__builtin_HEXAGON_S4_or_ori",
+ "llvm.hexagon.S4.ori.asl.ri" => "__builtin_HEXAGON_S4_ori_asl_ri",
+ "llvm.hexagon.S4.ori.lsr.ri" => "__builtin_HEXAGON_S4_ori_lsr_ri",
+ "llvm.hexagon.S4.parity" => "__builtin_HEXAGON_S4_parity",
+ "llvm.hexagon.S4.subaddi" => "__builtin_HEXAGON_S4_subaddi",
+ "llvm.hexagon.S4.subi.asl.ri" => "__builtin_HEXAGON_S4_subi_asl_ri",
+ "llvm.hexagon.S4.subi.lsr.ri" => "__builtin_HEXAGON_S4_subi_lsr_ri",
+ "llvm.hexagon.S4.vrcrotate" => "__builtin_HEXAGON_S4_vrcrotate",
+ "llvm.hexagon.S4.vrcrotate.acc" => "__builtin_HEXAGON_S4_vrcrotate_acc",
+ "llvm.hexagon.S4.vxaddsubh" => "__builtin_HEXAGON_S4_vxaddsubh",
+ "llvm.hexagon.S4.vxaddsubhr" => "__builtin_HEXAGON_S4_vxaddsubhr",
+ "llvm.hexagon.S4.vxaddsubw" => "__builtin_HEXAGON_S4_vxaddsubw",
+ "llvm.hexagon.S4.vxsubaddh" => "__builtin_HEXAGON_S4_vxsubaddh",
+ "llvm.hexagon.S4.vxsubaddhr" => "__builtin_HEXAGON_S4_vxsubaddhr",
+ "llvm.hexagon.S4.vxsubaddw" => "__builtin_HEXAGON_S4_vxsubaddw",
+ "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax" => "__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax",
+ "llvm.hexagon.S5.asrhub.sat" => "__builtin_HEXAGON_S5_asrhub_sat",
+ "llvm.hexagon.S5.popcountp" => "__builtin_HEXAGON_S5_popcountp",
+ "llvm.hexagon.S5.vasrhrnd.goodsyntax" => "__builtin_HEXAGON_S5_vasrhrnd_goodsyntax",
+ "llvm.hexagon.S6.rol.i.p" => "__builtin_HEXAGON_S6_rol_i_p",
+ "llvm.hexagon.S6.rol.i.p.acc" => "__builtin_HEXAGON_S6_rol_i_p_acc",
+ "llvm.hexagon.S6.rol.i.p.and" => "__builtin_HEXAGON_S6_rol_i_p_and",
+ "llvm.hexagon.S6.rol.i.p.nac" => "__builtin_HEXAGON_S6_rol_i_p_nac",
+ "llvm.hexagon.S6.rol.i.p.or" => "__builtin_HEXAGON_S6_rol_i_p_or",
+ "llvm.hexagon.S6.rol.i.p.xacc" => "__builtin_HEXAGON_S6_rol_i_p_xacc",
+ "llvm.hexagon.S6.rol.i.r" => "__builtin_HEXAGON_S6_rol_i_r",
+ "llvm.hexagon.S6.rol.i.r.acc" => "__builtin_HEXAGON_S6_rol_i_r_acc",
+ "llvm.hexagon.S6.rol.i.r.and" => "__builtin_HEXAGON_S6_rol_i_r_and",
+ "llvm.hexagon.S6.rol.i.r.nac" => "__builtin_HEXAGON_S6_rol_i_r_nac",
+ "llvm.hexagon.S6.rol.i.r.or" => "__builtin_HEXAGON_S6_rol_i_r_or",
+ "llvm.hexagon.S6.rol.i.r.xacc" => "__builtin_HEXAGON_S6_rol_i_r_xacc",
+ "llvm.hexagon.S6.vsplatrbp" => "__builtin_HEXAGON_S6_vsplatrbp",
+ "llvm.hexagon.S6.vtrunehb.ppp" => "__builtin_HEXAGON_S6_vtrunehb_ppp",
+ "llvm.hexagon.S6.vtrunohb.ppp" => "__builtin_HEXAGON_S6_vtrunohb_ppp",
+ "llvm.hexagon.SI.to.SXTHI.asrh" => "__builtin_SI_to_SXTHI_asrh",
+ "llvm.hexagon.V6.extractw" => "__builtin_HEXAGON_V6_extractw",
+ "llvm.hexagon.V6.extractw.128B" => "__builtin_HEXAGON_V6_extractw_128B",
+ "llvm.hexagon.V6.hi" => "__builtin_HEXAGON_V6_hi",
+ "llvm.hexagon.V6.hi.128B" => "__builtin_HEXAGON_V6_hi_128B",
+ "llvm.hexagon.V6.lo" => "__builtin_HEXAGON_V6_lo",
+ "llvm.hexagon.V6.lo.128B" => "__builtin_HEXAGON_V6_lo_128B",
+ "llvm.hexagon.V6.lvsplatw" => "__builtin_HEXAGON_V6_lvsplatw",
+ "llvm.hexagon.V6.lvsplatw.128B" => "__builtin_HEXAGON_V6_lvsplatw_128B",
+ "llvm.hexagon.V6.vabsdiffh" => "__builtin_HEXAGON_V6_vabsdiffh",
+ "llvm.hexagon.V6.vabsdiffh.128B" => "__builtin_HEXAGON_V6_vabsdiffh_128B",
+ "llvm.hexagon.V6.vabsdiffub" => "__builtin_HEXAGON_V6_vabsdiffub",
+ "llvm.hexagon.V6.vabsdiffub.128B" => "__builtin_HEXAGON_V6_vabsdiffub_128B",
+ "llvm.hexagon.V6.vabsdiffuh" => "__builtin_HEXAGON_V6_vabsdiffuh",
+ "llvm.hexagon.V6.vabsdiffuh.128B" => "__builtin_HEXAGON_V6_vabsdiffuh_128B",
+ "llvm.hexagon.V6.vabsdiffw" => "__builtin_HEXAGON_V6_vabsdiffw",
+ "llvm.hexagon.V6.vabsdiffw.128B" => "__builtin_HEXAGON_V6_vabsdiffw_128B",
+ "llvm.hexagon.V6.vabsh" => "__builtin_HEXAGON_V6_vabsh",
+ "llvm.hexagon.V6.vabsh.128B" => "__builtin_HEXAGON_V6_vabsh_128B",
+ "llvm.hexagon.V6.vabsh.sat" => "__builtin_HEXAGON_V6_vabsh_sat",
+ "llvm.hexagon.V6.vabsh.sat.128B" => "__builtin_HEXAGON_V6_vabsh_sat_128B",
+ "llvm.hexagon.V6.vabsw" => "__builtin_HEXAGON_V6_vabsw",
+ "llvm.hexagon.V6.vabsw.128B" => "__builtin_HEXAGON_V6_vabsw_128B",
+ "llvm.hexagon.V6.vabsw.sat" => "__builtin_HEXAGON_V6_vabsw_sat",
+ "llvm.hexagon.V6.vabsw.sat.128B" => "__builtin_HEXAGON_V6_vabsw_sat_128B",
+ "llvm.hexagon.V6.vaddb" => "__builtin_HEXAGON_V6_vaddb",
+ "llvm.hexagon.V6.vaddb.128B" => "__builtin_HEXAGON_V6_vaddb_128B",
+ "llvm.hexagon.V6.vaddb.dv" => "__builtin_HEXAGON_V6_vaddb_dv",
+ "llvm.hexagon.V6.vaddb.dv.128B" => "__builtin_HEXAGON_V6_vaddb_dv_128B",
+ "llvm.hexagon.V6.vaddh" => "__builtin_HEXAGON_V6_vaddh",
+ "llvm.hexagon.V6.vaddh.128B" => "__builtin_HEXAGON_V6_vaddh_128B",
+ "llvm.hexagon.V6.vaddh.dv" => "__builtin_HEXAGON_V6_vaddh_dv",
+ "llvm.hexagon.V6.vaddh.dv.128B" => "__builtin_HEXAGON_V6_vaddh_dv_128B",
+ "llvm.hexagon.V6.vaddhsat" => "__builtin_HEXAGON_V6_vaddhsat",
+ "llvm.hexagon.V6.vaddhsat.128B" => "__builtin_HEXAGON_V6_vaddhsat_128B",
+ "llvm.hexagon.V6.vaddhsat.dv" => "__builtin_HEXAGON_V6_vaddhsat_dv",
+ "llvm.hexagon.V6.vaddhsat.dv.128B" => "__builtin_HEXAGON_V6_vaddhsat_dv_128B",
+ "llvm.hexagon.V6.vaddhw" => "__builtin_HEXAGON_V6_vaddhw",
+ "llvm.hexagon.V6.vaddhw.128B" => "__builtin_HEXAGON_V6_vaddhw_128B",
+ "llvm.hexagon.V6.vaddubh" => "__builtin_HEXAGON_V6_vaddubh",
+ "llvm.hexagon.V6.vaddubh.128B" => "__builtin_HEXAGON_V6_vaddubh_128B",
+ "llvm.hexagon.V6.vaddubsat" => "__builtin_HEXAGON_V6_vaddubsat",
+ "llvm.hexagon.V6.vaddubsat.128B" => "__builtin_HEXAGON_V6_vaddubsat_128B",
+ "llvm.hexagon.V6.vaddubsat.dv" => "__builtin_HEXAGON_V6_vaddubsat_dv",
+ "llvm.hexagon.V6.vaddubsat.dv.128B" => "__builtin_HEXAGON_V6_vaddubsat_dv_128B",
+ "llvm.hexagon.V6.vadduhsat" => "__builtin_HEXAGON_V6_vadduhsat",
+ "llvm.hexagon.V6.vadduhsat.128B" => "__builtin_HEXAGON_V6_vadduhsat_128B",
+ "llvm.hexagon.V6.vadduhsat.dv" => "__builtin_HEXAGON_V6_vadduhsat_dv",
+ "llvm.hexagon.V6.vadduhsat.dv.128B" => "__builtin_HEXAGON_V6_vadduhsat_dv_128B",
+ "llvm.hexagon.V6.vadduhw" => "__builtin_HEXAGON_V6_vadduhw",
+ "llvm.hexagon.V6.vadduhw.128B" => "__builtin_HEXAGON_V6_vadduhw_128B",
+ "llvm.hexagon.V6.vaddw" => "__builtin_HEXAGON_V6_vaddw",
+ "llvm.hexagon.V6.vaddw.128B" => "__builtin_HEXAGON_V6_vaddw_128B",
+ "llvm.hexagon.V6.vaddw.dv" => "__builtin_HEXAGON_V6_vaddw_dv",
+ "llvm.hexagon.V6.vaddw.dv.128B" => "__builtin_HEXAGON_V6_vaddw_dv_128B",
+ "llvm.hexagon.V6.vaddwsat" => "__builtin_HEXAGON_V6_vaddwsat",
+ "llvm.hexagon.V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B",
+ "llvm.hexagon.V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv",
+ "llvm.hexagon.V6.vaddwsat.dv.128B" => "__builtin_HEXAGON_V6_vaddwsat_dv_128B",
+ "llvm.hexagon.V6.valignb" => "__builtin_HEXAGON_V6_valignb",
+ "llvm.hexagon.V6.valignb.128B" => "__builtin_HEXAGON_V6_valignb_128B",
+ "llvm.hexagon.V6.valignbi" => "__builtin_HEXAGON_V6_valignbi",
+ "llvm.hexagon.V6.valignbi.128B" => "__builtin_HEXAGON_V6_valignbi_128B",
+ "llvm.hexagon.V6.vand" => "__builtin_HEXAGON_V6_vand",
+ "llvm.hexagon.V6.vand.128B" => "__builtin_HEXAGON_V6_vand_128B",
+ "llvm.hexagon.V6.vaslh" => "__builtin_HEXAGON_V6_vaslh",
+ "llvm.hexagon.V6.vaslh.128B" => "__builtin_HEXAGON_V6_vaslh_128B",
+ "llvm.hexagon.V6.vaslhv" => "__builtin_HEXAGON_V6_vaslhv",
+ "llvm.hexagon.V6.vaslhv.128B" => "__builtin_HEXAGON_V6_vaslhv_128B",
+ "llvm.hexagon.V6.vaslw" => "__builtin_HEXAGON_V6_vaslw",
+ "llvm.hexagon.V6.vaslw.128B" => "__builtin_HEXAGON_V6_vaslw_128B",
+ "llvm.hexagon.V6.vaslw.acc" => "__builtin_HEXAGON_V6_vaslw_acc",
+ "llvm.hexagon.V6.vaslw.acc.128B" => "__builtin_HEXAGON_V6_vaslw_acc_128B",
+ "llvm.hexagon.V6.vaslwv" => "__builtin_HEXAGON_V6_vaslwv",
+ "llvm.hexagon.V6.vaslwv.128B" => "__builtin_HEXAGON_V6_vaslwv_128B",
+ "llvm.hexagon.V6.vasrh" => "__builtin_HEXAGON_V6_vasrh",
+ "llvm.hexagon.V6.vasrh.128B" => "__builtin_HEXAGON_V6_vasrh_128B",
+ "llvm.hexagon.V6.vasrhbrndsat" => "__builtin_HEXAGON_V6_vasrhbrndsat",
+ "llvm.hexagon.V6.vasrhbrndsat.128B" => "__builtin_HEXAGON_V6_vasrhbrndsat_128B",
+ "llvm.hexagon.V6.vasrhubrndsat" => "__builtin_HEXAGON_V6_vasrhubrndsat",
+ "llvm.hexagon.V6.vasrhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrhubrndsat_128B",
+ "llvm.hexagon.V6.vasrhubsat" => "__builtin_HEXAGON_V6_vasrhubsat",
+ "llvm.hexagon.V6.vasrhubsat.128B" => "__builtin_HEXAGON_V6_vasrhubsat_128B",
+ "llvm.hexagon.V6.vasrhv" => "__builtin_HEXAGON_V6_vasrhv",
+ "llvm.hexagon.V6.vasrhv.128B" => "__builtin_HEXAGON_V6_vasrhv_128B",
+ "llvm.hexagon.V6.vasrw" => "__builtin_HEXAGON_V6_vasrw",
+ "llvm.hexagon.V6.vasrw.128B" => "__builtin_HEXAGON_V6_vasrw_128B",
+ "llvm.hexagon.V6.vasrw.acc" => "__builtin_HEXAGON_V6_vasrw_acc",
+ "llvm.hexagon.V6.vasrw.acc.128B" => "__builtin_HEXAGON_V6_vasrw_acc_128B",
+ "llvm.hexagon.V6.vasrwh" => "__builtin_HEXAGON_V6_vasrwh",
+ "llvm.hexagon.V6.vasrwh.128B" => "__builtin_HEXAGON_V6_vasrwh_128B",
+ "llvm.hexagon.V6.vasrwhrndsat" => "__builtin_HEXAGON_V6_vasrwhrndsat",
+ "llvm.hexagon.V6.vasrwhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwhrndsat_128B",
+ "llvm.hexagon.V6.vasrwhsat" => "__builtin_HEXAGON_V6_vasrwhsat",
+ "llvm.hexagon.V6.vasrwhsat.128B" => "__builtin_HEXAGON_V6_vasrwhsat_128B",
+ "llvm.hexagon.V6.vasrwuhsat" => "__builtin_HEXAGON_V6_vasrwuhsat",
+ "llvm.hexagon.V6.vasrwuhsat.128B" => "__builtin_HEXAGON_V6_vasrwuhsat_128B",
+ "llvm.hexagon.V6.vasrwv" => "__builtin_HEXAGON_V6_vasrwv",
+ "llvm.hexagon.V6.vasrwv.128B" => "__builtin_HEXAGON_V6_vasrwv_128B",
+ "llvm.hexagon.V6.vassign" => "__builtin_HEXAGON_V6_vassign",
+ "llvm.hexagon.V6.vassign.128B" => "__builtin_HEXAGON_V6_vassign_128B",
+ "llvm.hexagon.V6.vassignp" => "__builtin_HEXAGON_V6_vassignp",
+ "llvm.hexagon.V6.vassignp.128B" => "__builtin_HEXAGON_V6_vassignp_128B",
+ "llvm.hexagon.V6.vavgh" => "__builtin_HEXAGON_V6_vavgh",
+ "llvm.hexagon.V6.vavgh.128B" => "__builtin_HEXAGON_V6_vavgh_128B",
+ "llvm.hexagon.V6.vavghrnd" => "__builtin_HEXAGON_V6_vavghrnd",
+ "llvm.hexagon.V6.vavghrnd.128B" => "__builtin_HEXAGON_V6_vavghrnd_128B",
+ "llvm.hexagon.V6.vavgub" => "__builtin_HEXAGON_V6_vavgub",
+ "llvm.hexagon.V6.vavgub.128B" => "__builtin_HEXAGON_V6_vavgub_128B",
+ "llvm.hexagon.V6.vavgubrnd" => "__builtin_HEXAGON_V6_vavgubrnd",
+ "llvm.hexagon.V6.vavgubrnd.128B" => "__builtin_HEXAGON_V6_vavgubrnd_128B",
+ "llvm.hexagon.V6.vavguh" => "__builtin_HEXAGON_V6_vavguh",
+ "llvm.hexagon.V6.vavguh.128B" => "__builtin_HEXAGON_V6_vavguh_128B",
+ "llvm.hexagon.V6.vavguhrnd" => "__builtin_HEXAGON_V6_vavguhrnd",
+ "llvm.hexagon.V6.vavguhrnd.128B" => "__builtin_HEXAGON_V6_vavguhrnd_128B",
+ "llvm.hexagon.V6.vavgw" => "__builtin_HEXAGON_V6_vavgw",
+ "llvm.hexagon.V6.vavgw.128B" => "__builtin_HEXAGON_V6_vavgw_128B",
+ "llvm.hexagon.V6.vavgwrnd" => "__builtin_HEXAGON_V6_vavgwrnd",
+ "llvm.hexagon.V6.vavgwrnd.128B" => "__builtin_HEXAGON_V6_vavgwrnd_128B",
+ "llvm.hexagon.V6.vcl0h" => "__builtin_HEXAGON_V6_vcl0h",
+ "llvm.hexagon.V6.vcl0h.128B" => "__builtin_HEXAGON_V6_vcl0h_128B",
+ "llvm.hexagon.V6.vcl0w" => "__builtin_HEXAGON_V6_vcl0w",
+ "llvm.hexagon.V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B",
+ "llvm.hexagon.V6.vcombine" => "__builtin_HEXAGON_V6_vcombine",
+ "llvm.hexagon.V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B",
+ "llvm.hexagon.V6.vd0" => "__builtin_HEXAGON_V6_vd0",
+ "llvm.hexagon.V6.vd0.128B" => "__builtin_HEXAGON_V6_vd0_128B",
+ "llvm.hexagon.V6.vdealb" => "__builtin_HEXAGON_V6_vdealb",
+ "llvm.hexagon.V6.vdealb.128B" => "__builtin_HEXAGON_V6_vdealb_128B",
+ "llvm.hexagon.V6.vdealb4w" => "__builtin_HEXAGON_V6_vdealb4w",
+ "llvm.hexagon.V6.vdealb4w.128B" => "__builtin_HEXAGON_V6_vdealb4w_128B",
+ "llvm.hexagon.V6.vdealh" => "__builtin_HEXAGON_V6_vdealh",
+ "llvm.hexagon.V6.vdealh.128B" => "__builtin_HEXAGON_V6_vdealh_128B",
+ "llvm.hexagon.V6.vdealvdd" => "__builtin_HEXAGON_V6_vdealvdd",
+ "llvm.hexagon.V6.vdealvdd.128B" => "__builtin_HEXAGON_V6_vdealvdd_128B",
+ "llvm.hexagon.V6.vdelta" => "__builtin_HEXAGON_V6_vdelta",
+ "llvm.hexagon.V6.vdelta.128B" => "__builtin_HEXAGON_V6_vdelta_128B",
+ "llvm.hexagon.V6.vdmpybus" => "__builtin_HEXAGON_V6_vdmpybus",
+ "llvm.hexagon.V6.vdmpybus.128B" => "__builtin_HEXAGON_V6_vdmpybus_128B",
+ "llvm.hexagon.V6.vdmpybus.acc" => "__builtin_HEXAGON_V6_vdmpybus_acc",
+ "llvm.hexagon.V6.vdmpybus.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_acc_128B",
+ "llvm.hexagon.V6.vdmpybus.dv" => "__builtin_HEXAGON_V6_vdmpybus_dv",
+ "llvm.hexagon.V6.vdmpybus.dv.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_128B",
+ "llvm.hexagon.V6.vdmpybus.dv.acc" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc",
+ "llvm.hexagon.V6.vdmpybus.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B",
+ "llvm.hexagon.V6.vdmpyhb" => "__builtin_HEXAGON_V6_vdmpyhb",
+ "llvm.hexagon.V6.vdmpyhb.128B" => "__builtin_HEXAGON_V6_vdmpyhb_128B",
+ "llvm.hexagon.V6.vdmpyhb.acc" => "__builtin_HEXAGON_V6_vdmpyhb_acc",
+ "llvm.hexagon.V6.vdmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_acc_128B",
+ "llvm.hexagon.V6.vdmpyhb.dv" => "__builtin_HEXAGON_V6_vdmpyhb_dv",
+ "llvm.hexagon.V6.vdmpyhb.dv.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_128B",
+ "llvm.hexagon.V6.vdmpyhb.dv.acc" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc",
+ "llvm.hexagon.V6.vdmpyhb.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B",
+ "llvm.hexagon.V6.vdmpyhisat" => "__builtin_HEXAGON_V6_vdmpyhisat",
+ "llvm.hexagon.V6.vdmpyhisat.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_128B",
+ "llvm.hexagon.V6.vdmpyhisat.acc" => "__builtin_HEXAGON_V6_vdmpyhisat_acc",
+ "llvm.hexagon.V6.vdmpyhisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhsat" => "__builtin_HEXAGON_V6_vdmpyhsat",
+ "llvm.hexagon.V6.vdmpyhsat.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_128B",
+ "llvm.hexagon.V6.vdmpyhsat.acc" => "__builtin_HEXAGON_V6_vdmpyhsat_acc",
+ "llvm.hexagon.V6.vdmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhsuisat" => "__builtin_HEXAGON_V6_vdmpyhsuisat",
+ "llvm.hexagon.V6.vdmpyhsuisat.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_128B",
+ "llvm.hexagon.V6.vdmpyhsuisat.acc" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc",
+ "llvm.hexagon.V6.vdmpyhsuisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhsusat" => "__builtin_HEXAGON_V6_vdmpyhsusat",
+ "llvm.hexagon.V6.vdmpyhsusat.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_128B",
+ "llvm.hexagon.V6.vdmpyhsusat.acc" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc",
+ "llvm.hexagon.V6.vdmpyhsusat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhvsat" => "__builtin_HEXAGON_V6_vdmpyhvsat",
+ "llvm.hexagon.V6.vdmpyhvsat.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_128B",
+ "llvm.hexagon.V6.vdmpyhvsat.acc" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc",
+ "llvm.hexagon.V6.vdmpyhvsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B",
+ "llvm.hexagon.V6.vdsaduh" => "__builtin_HEXAGON_V6_vdsaduh",
+ "llvm.hexagon.V6.vdsaduh.128B" => "__builtin_HEXAGON_V6_vdsaduh_128B",
+ "llvm.hexagon.V6.vdsaduh.acc" => "__builtin_HEXAGON_V6_vdsaduh_acc",
+ "llvm.hexagon.V6.vdsaduh.acc.128B" => "__builtin_HEXAGON_V6_vdsaduh_acc_128B",
+ "llvm.hexagon.V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr",
+ "llvm.hexagon.V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B",
+ "llvm.hexagon.V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb",
+ "llvm.hexagon.V6.vlalignb.128B" => "__builtin_HEXAGON_V6_vlalignb_128B",
+ "llvm.hexagon.V6.vlalignbi" => "__builtin_HEXAGON_V6_vlalignbi",
+ "llvm.hexagon.V6.vlalignbi.128B" => "__builtin_HEXAGON_V6_vlalignbi_128B",
+ "llvm.hexagon.V6.vlsrh" => "__builtin_HEXAGON_V6_vlsrh",
+ "llvm.hexagon.V6.vlsrh.128B" => "__builtin_HEXAGON_V6_vlsrh_128B",
+ "llvm.hexagon.V6.vlsrhv" => "__builtin_HEXAGON_V6_vlsrhv",
+ "llvm.hexagon.V6.vlsrhv.128B" => "__builtin_HEXAGON_V6_vlsrhv_128B",
+ "llvm.hexagon.V6.vlsrw" => "__builtin_HEXAGON_V6_vlsrw",
+ "llvm.hexagon.V6.vlsrw.128B" => "__builtin_HEXAGON_V6_vlsrw_128B",
+ "llvm.hexagon.V6.vlsrwv" => "__builtin_HEXAGON_V6_vlsrwv",
+ "llvm.hexagon.V6.vlsrwv.128B" => "__builtin_HEXAGON_V6_vlsrwv_128B",
+ "llvm.hexagon.V6.vlutb" => "__builtin_HEXAGON_V6_vlutb",
+ "llvm.hexagon.V6.vlutb.128B" => "__builtin_HEXAGON_V6_vlutb_128B",
+ "llvm.hexagon.V6.vlutb.acc" => "__builtin_HEXAGON_V6_vlutb_acc",
+ "llvm.hexagon.V6.vlutb.acc.128B" => "__builtin_HEXAGON_V6_vlutb_acc_128B",
+ "llvm.hexagon.V6.vlutb.dv" => "__builtin_HEXAGON_V6_vlutb_dv",
+ "llvm.hexagon.V6.vlutb.dv.128B" => "__builtin_HEXAGON_V6_vlutb_dv_128B",
+ "llvm.hexagon.V6.vlutb.dv.acc" => "__builtin_HEXAGON_V6_vlutb_dv_acc",
+ "llvm.hexagon.V6.vlutb.dv.acc.128B" => "__builtin_HEXAGON_V6_vlutb_dv_acc_128B",
+ "llvm.hexagon.V6.vlutvvb" => "__builtin_HEXAGON_V6_vlutvvb",
+ "llvm.hexagon.V6.vlutvvb.128B" => "__builtin_HEXAGON_V6_vlutvvb_128B",
+ "llvm.hexagon.V6.vlutvvb.oracc" => "__builtin_HEXAGON_V6_vlutvvb_oracc",
+ "llvm.hexagon.V6.vlutvvb.oracc.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracc_128B",
+ "llvm.hexagon.V6.vlutvwh" => "__builtin_HEXAGON_V6_vlutvwh",
+ "llvm.hexagon.V6.vlutvwh.128B" => "__builtin_HEXAGON_V6_vlutvwh_128B",
+ "llvm.hexagon.V6.vlutvwh.oracc" => "__builtin_HEXAGON_V6_vlutvwh_oracc",
+ "llvm.hexagon.V6.vlutvwh.oracc.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracc_128B",
+ "llvm.hexagon.V6.vmaxh" => "__builtin_HEXAGON_V6_vmaxh",
+ "llvm.hexagon.V6.vmaxh.128B" => "__builtin_HEXAGON_V6_vmaxh_128B",
+ "llvm.hexagon.V6.vmaxub" => "__builtin_HEXAGON_V6_vmaxub",
+ "llvm.hexagon.V6.vmaxub.128B" => "__builtin_HEXAGON_V6_vmaxub_128B",
+ "llvm.hexagon.V6.vmaxuh" => "__builtin_HEXAGON_V6_vmaxuh",
+ "llvm.hexagon.V6.vmaxuh.128B" => "__builtin_HEXAGON_V6_vmaxuh_128B",
+ "llvm.hexagon.V6.vmaxw" => "__builtin_HEXAGON_V6_vmaxw",
+ "llvm.hexagon.V6.vmaxw.128B" => "__builtin_HEXAGON_V6_vmaxw_128B",
+ "llvm.hexagon.V6.vminh" => "__builtin_HEXAGON_V6_vminh",
+ "llvm.hexagon.V6.vminh.128B" => "__builtin_HEXAGON_V6_vminh_128B",
+ "llvm.hexagon.V6.vminub" => "__builtin_HEXAGON_V6_vminub",
+ "llvm.hexagon.V6.vminub.128B" => "__builtin_HEXAGON_V6_vminub_128B",
+ "llvm.hexagon.V6.vminuh" => "__builtin_HEXAGON_V6_vminuh",
+ "llvm.hexagon.V6.vminuh.128B" => "__builtin_HEXAGON_V6_vminuh_128B",
+ "llvm.hexagon.V6.vminw" => "__builtin_HEXAGON_V6_vminw",
+ "llvm.hexagon.V6.vminw.128B" => "__builtin_HEXAGON_V6_vminw_128B",
+ "llvm.hexagon.V6.vmpabus" => "__builtin_HEXAGON_V6_vmpabus",
+ "llvm.hexagon.V6.vmpabus.128B" => "__builtin_HEXAGON_V6_vmpabus_128B",
+ "llvm.hexagon.V6.vmpabus.acc" => "__builtin_HEXAGON_V6_vmpabus_acc",
+ "llvm.hexagon.V6.vmpabus.acc.128B" => "__builtin_HEXAGON_V6_vmpabus_acc_128B",
+ "llvm.hexagon.V6.vmpabusv" => "__builtin_HEXAGON_V6_vmpabusv",
+ "llvm.hexagon.V6.vmpabusv.128B" => "__builtin_HEXAGON_V6_vmpabusv_128B",
+ "llvm.hexagon.V6.vmpabuuv" => "__builtin_HEXAGON_V6_vmpabuuv",
+ "llvm.hexagon.V6.vmpabuuv.128B" => "__builtin_HEXAGON_V6_vmpabuuv_128B",
+ "llvm.hexagon.V6.vmpahb" => "__builtin_HEXAGON_V6_vmpahb",
+ "llvm.hexagon.V6.vmpahb.128B" => "__builtin_HEXAGON_V6_vmpahb_128B",
+ "llvm.hexagon.V6.vmpahb.acc" => "__builtin_HEXAGON_V6_vmpahb_acc",
+ "llvm.hexagon.V6.vmpahb.acc.128B" => "__builtin_HEXAGON_V6_vmpahb_acc_128B",
+ "llvm.hexagon.V6.vmpybus" => "__builtin_HEXAGON_V6_vmpybus",
+ "llvm.hexagon.V6.vmpybus.128B" => "__builtin_HEXAGON_V6_vmpybus_128B",
+ "llvm.hexagon.V6.vmpybus.acc" => "__builtin_HEXAGON_V6_vmpybus_acc",
+ "llvm.hexagon.V6.vmpybus.acc.128B" => "__builtin_HEXAGON_V6_vmpybus_acc_128B",
+ "llvm.hexagon.V6.vmpybusv" => "__builtin_HEXAGON_V6_vmpybusv",
+ "llvm.hexagon.V6.vmpybusv.128B" => "__builtin_HEXAGON_V6_vmpybusv_128B",
+ "llvm.hexagon.V6.vmpybusv.acc" => "__builtin_HEXAGON_V6_vmpybusv_acc",
+ "llvm.hexagon.V6.vmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vmpybusv_acc_128B",
+ "llvm.hexagon.V6.vmpybv" => "__builtin_HEXAGON_V6_vmpybv",
+ "llvm.hexagon.V6.vmpybv.128B" => "__builtin_HEXAGON_V6_vmpybv_128B",
+ "llvm.hexagon.V6.vmpybv.acc" => "__builtin_HEXAGON_V6_vmpybv_acc",
+ "llvm.hexagon.V6.vmpybv.acc.128B" => "__builtin_HEXAGON_V6_vmpybv_acc_128B",
+ "llvm.hexagon.V6.vmpyewuh" => "__builtin_HEXAGON_V6_vmpyewuh",
+ "llvm.hexagon.V6.vmpyewuh.128B" => "__builtin_HEXAGON_V6_vmpyewuh_128B",
+ "llvm.hexagon.V6.vmpyh" => "__builtin_HEXAGON_V6_vmpyh",
+ "llvm.hexagon.V6.vmpyh.128B" => "__builtin_HEXAGON_V6_vmpyh_128B",
+ "llvm.hexagon.V6.vmpyhsat.acc" => "__builtin_HEXAGON_V6_vmpyhsat_acc",
+ "llvm.hexagon.V6.vmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vmpyhsat_acc_128B",
+ "llvm.hexagon.V6.vmpyhsrs" => "__builtin_HEXAGON_V6_vmpyhsrs",
+ "llvm.hexagon.V6.vmpyhsrs.128B" => "__builtin_HEXAGON_V6_vmpyhsrs_128B",
+ "llvm.hexagon.V6.vmpyhss" => "__builtin_HEXAGON_V6_vmpyhss",
+ "llvm.hexagon.V6.vmpyhss.128B" => "__builtin_HEXAGON_V6_vmpyhss_128B",
+ "llvm.hexagon.V6.vmpyhus" => "__builtin_HEXAGON_V6_vmpyhus",
+ "llvm.hexagon.V6.vmpyhus.128B" => "__builtin_HEXAGON_V6_vmpyhus_128B",
+ "llvm.hexagon.V6.vmpyhus.acc" => "__builtin_HEXAGON_V6_vmpyhus_acc",
+ "llvm.hexagon.V6.vmpyhus.acc.128B" => "__builtin_HEXAGON_V6_vmpyhus_acc_128B",
+ "llvm.hexagon.V6.vmpyhv" => "__builtin_HEXAGON_V6_vmpyhv",
+ "llvm.hexagon.V6.vmpyhv.128B" => "__builtin_HEXAGON_V6_vmpyhv_128B",
+ "llvm.hexagon.V6.vmpyhv.acc" => "__builtin_HEXAGON_V6_vmpyhv_acc",
+ "llvm.hexagon.V6.vmpyhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyhv_acc_128B",
+ "llvm.hexagon.V6.vmpyhvsrs" => "__builtin_HEXAGON_V6_vmpyhvsrs",
+ "llvm.hexagon.V6.vmpyhvsrs.128B" => "__builtin_HEXAGON_V6_vmpyhvsrs_128B",
+ "llvm.hexagon.V6.vmpyieoh" => "__builtin_HEXAGON_V6_vmpyieoh",
+ "llvm.hexagon.V6.vmpyieoh.128B" => "__builtin_HEXAGON_V6_vmpyieoh_128B",
+ "llvm.hexagon.V6.vmpyiewh.acc" => "__builtin_HEXAGON_V6_vmpyiewh_acc",
+ "llvm.hexagon.V6.vmpyiewh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewh_acc_128B",
+ "llvm.hexagon.V6.vmpyiewuh" => "__builtin_HEXAGON_V6_vmpyiewuh",
+ "llvm.hexagon.V6.vmpyiewuh.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_128B",
+ "llvm.hexagon.V6.vmpyiewuh.acc" => "__builtin_HEXAGON_V6_vmpyiewuh_acc",
+ "llvm.hexagon.V6.vmpyiewuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_acc_128B",
+ "llvm.hexagon.V6.vmpyih" => "__builtin_HEXAGON_V6_vmpyih",
+ "llvm.hexagon.V6.vmpyih.128B" => "__builtin_HEXAGON_V6_vmpyih_128B",
+ "llvm.hexagon.V6.vmpyih.acc" => "__builtin_HEXAGON_V6_vmpyih_acc",
+ "llvm.hexagon.V6.vmpyih.acc.128B" => "__builtin_HEXAGON_V6_vmpyih_acc_128B",
+ "llvm.hexagon.V6.vmpyihb" => "__builtin_HEXAGON_V6_vmpyihb",
+ "llvm.hexagon.V6.vmpyihb.128B" => "__builtin_HEXAGON_V6_vmpyihb_128B",
+ "llvm.hexagon.V6.vmpyihb.acc" => "__builtin_HEXAGON_V6_vmpyihb_acc",
+ "llvm.hexagon.V6.vmpyihb.acc.128B" => "__builtin_HEXAGON_V6_vmpyihb_acc_128B",
+ "llvm.hexagon.V6.vmpyiowh" => "__builtin_HEXAGON_V6_vmpyiowh",
+ "llvm.hexagon.V6.vmpyiowh.128B" => "__builtin_HEXAGON_V6_vmpyiowh_128B",
+ "llvm.hexagon.V6.vmpyiwb" => "__builtin_HEXAGON_V6_vmpyiwb",
+ "llvm.hexagon.V6.vmpyiwb.128B" => "__builtin_HEXAGON_V6_vmpyiwb_128B",
+ "llvm.hexagon.V6.vmpyiwb.acc" => "__builtin_HEXAGON_V6_vmpyiwb_acc",
+ "llvm.hexagon.V6.vmpyiwb.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwb_acc_128B",
+ "llvm.hexagon.V6.vmpyiwh" => "__builtin_HEXAGON_V6_vmpyiwh",
+ "llvm.hexagon.V6.vmpyiwh.128B" => "__builtin_HEXAGON_V6_vmpyiwh_128B",
+ "llvm.hexagon.V6.vmpyiwh.acc" => "__builtin_HEXAGON_V6_vmpyiwh_acc",
+ "llvm.hexagon.V6.vmpyiwh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwh_acc_128B",
+ "llvm.hexagon.V6.vmpyowh" => "__builtin_HEXAGON_V6_vmpyowh",
+ "llvm.hexagon.V6.vmpyowh.128B" => "__builtin_HEXAGON_V6_vmpyowh_128B",
+ "llvm.hexagon.V6.vmpyowh.rnd" => "__builtin_HEXAGON_V6_vmpyowh_rnd",
+ "llvm.hexagon.V6.vmpyowh.rnd.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_128B",
+ "llvm.hexagon.V6.vmpyowh.rnd.sacc" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc",
+ "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B",
+ "llvm.hexagon.V6.vmpyowh.sacc" => "__builtin_HEXAGON_V6_vmpyowh_sacc",
+ "llvm.hexagon.V6.vmpyowh.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_sacc_128B",
+ "llvm.hexagon.V6.vmpyub" => "__builtin_HEXAGON_V6_vmpyub",
+ "llvm.hexagon.V6.vmpyub.128B" => "__builtin_HEXAGON_V6_vmpyub_128B",
+ "llvm.hexagon.V6.vmpyub.acc" => "__builtin_HEXAGON_V6_vmpyub_acc",
+ "llvm.hexagon.V6.vmpyub.acc.128B" => "__builtin_HEXAGON_V6_vmpyub_acc_128B",
+ "llvm.hexagon.V6.vmpyubv" => "__builtin_HEXAGON_V6_vmpyubv",
+ "llvm.hexagon.V6.vmpyubv.128B" => "__builtin_HEXAGON_V6_vmpyubv_128B",
+ "llvm.hexagon.V6.vmpyubv.acc" => "__builtin_HEXAGON_V6_vmpyubv_acc",
+ "llvm.hexagon.V6.vmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vmpyubv_acc_128B",
+ "llvm.hexagon.V6.vmpyuh" => "__builtin_HEXAGON_V6_vmpyuh",
+ "llvm.hexagon.V6.vmpyuh.128B" => "__builtin_HEXAGON_V6_vmpyuh_128B",
+ "llvm.hexagon.V6.vmpyuh.acc" => "__builtin_HEXAGON_V6_vmpyuh_acc",
+ "llvm.hexagon.V6.vmpyuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyuh_acc_128B",
+ "llvm.hexagon.V6.vmpyuhv" => "__builtin_HEXAGON_V6_vmpyuhv",
+ "llvm.hexagon.V6.vmpyuhv.128B" => "__builtin_HEXAGON_V6_vmpyuhv_128B",
+ "llvm.hexagon.V6.vmpyuhv.acc" => "__builtin_HEXAGON_V6_vmpyuhv_acc",
+ "llvm.hexagon.V6.vmpyuhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhv_acc_128B",
+ "llvm.hexagon.V6.vnavgh" => "__builtin_HEXAGON_V6_vnavgh",
+ "llvm.hexagon.V6.vnavgh.128B" => "__builtin_HEXAGON_V6_vnavgh_128B",
+ "llvm.hexagon.V6.vnavgub" => "__builtin_HEXAGON_V6_vnavgub",
+ "llvm.hexagon.V6.vnavgub.128B" => "__builtin_HEXAGON_V6_vnavgub_128B",
+ "llvm.hexagon.V6.vnavgw" => "__builtin_HEXAGON_V6_vnavgw",
+ "llvm.hexagon.V6.vnavgw.128B" => "__builtin_HEXAGON_V6_vnavgw_128B",
+ "llvm.hexagon.V6.vnormamth" => "__builtin_HEXAGON_V6_vnormamth",
+ "llvm.hexagon.V6.vnormamth.128B" => "__builtin_HEXAGON_V6_vnormamth_128B",
+ "llvm.hexagon.V6.vnormamtw" => "__builtin_HEXAGON_V6_vnormamtw",
+ "llvm.hexagon.V6.vnormamtw.128B" => "__builtin_HEXAGON_V6_vnormamtw_128B",
+ "llvm.hexagon.V6.vnot" => "__builtin_HEXAGON_V6_vnot",
+ "llvm.hexagon.V6.vnot.128B" => "__builtin_HEXAGON_V6_vnot_128B",
+ "llvm.hexagon.V6.vor" => "__builtin_HEXAGON_V6_vor",
+ "llvm.hexagon.V6.vor.128B" => "__builtin_HEXAGON_V6_vor_128B",
+ "llvm.hexagon.V6.vpackeb" => "__builtin_HEXAGON_V6_vpackeb",
+ "llvm.hexagon.V6.vpackeb.128B" => "__builtin_HEXAGON_V6_vpackeb_128B",
+ "llvm.hexagon.V6.vpackeh" => "__builtin_HEXAGON_V6_vpackeh",
+ "llvm.hexagon.V6.vpackeh.128B" => "__builtin_HEXAGON_V6_vpackeh_128B",
+ "llvm.hexagon.V6.vpackhb.sat" => "__builtin_HEXAGON_V6_vpackhb_sat",
+ "llvm.hexagon.V6.vpackhb.sat.128B" => "__builtin_HEXAGON_V6_vpackhb_sat_128B",
+ "llvm.hexagon.V6.vpackhub.sat" => "__builtin_HEXAGON_V6_vpackhub_sat",
+ "llvm.hexagon.V6.vpackhub.sat.128B" => "__builtin_HEXAGON_V6_vpackhub_sat_128B",
+ "llvm.hexagon.V6.vpackob" => "__builtin_HEXAGON_V6_vpackob",
+ "llvm.hexagon.V6.vpackob.128B" => "__builtin_HEXAGON_V6_vpackob_128B",
+ "llvm.hexagon.V6.vpackoh" => "__builtin_HEXAGON_V6_vpackoh",
+ "llvm.hexagon.V6.vpackoh.128B" => "__builtin_HEXAGON_V6_vpackoh_128B",
+ "llvm.hexagon.V6.vpackwh.sat" => "__builtin_HEXAGON_V6_vpackwh_sat",
+ "llvm.hexagon.V6.vpackwh.sat.128B" => "__builtin_HEXAGON_V6_vpackwh_sat_128B",
+ "llvm.hexagon.V6.vpackwuh.sat" => "__builtin_HEXAGON_V6_vpackwuh_sat",
+ "llvm.hexagon.V6.vpackwuh.sat.128B" => "__builtin_HEXAGON_V6_vpackwuh_sat_128B",
+ "llvm.hexagon.V6.vpopcounth" => "__builtin_HEXAGON_V6_vpopcounth",
+ "llvm.hexagon.V6.vpopcounth.128B" => "__builtin_HEXAGON_V6_vpopcounth_128B",
+ "llvm.hexagon.V6.vrdelta" => "__builtin_HEXAGON_V6_vrdelta",
+ "llvm.hexagon.V6.vrdelta.128B" => "__builtin_HEXAGON_V6_vrdelta_128B",
+ "llvm.hexagon.V6.vrmpybus" => "__builtin_HEXAGON_V6_vrmpybus",
+ "llvm.hexagon.V6.vrmpybus.128B" => "__builtin_HEXAGON_V6_vrmpybus_128B",
+ "llvm.hexagon.V6.vrmpybus.acc" => "__builtin_HEXAGON_V6_vrmpybus_acc",
+ "llvm.hexagon.V6.vrmpybus.acc.128B" => "__builtin_HEXAGON_V6_vrmpybus_acc_128B",
+ "llvm.hexagon.V6.vrmpybusi" => "__builtin_HEXAGON_V6_vrmpybusi",
+ "llvm.hexagon.V6.vrmpybusi.128B" => "__builtin_HEXAGON_V6_vrmpybusi_128B",
+ "llvm.hexagon.V6.vrmpybusi.acc" => "__builtin_HEXAGON_V6_vrmpybusi_acc",
+ "llvm.hexagon.V6.vrmpybusi.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusi_acc_128B",
+ "llvm.hexagon.V6.vrmpybusv" => "__builtin_HEXAGON_V6_vrmpybusv",
+ "llvm.hexagon.V6.vrmpybusv.128B" => "__builtin_HEXAGON_V6_vrmpybusv_128B",
+ "llvm.hexagon.V6.vrmpybusv.acc" => "__builtin_HEXAGON_V6_vrmpybusv_acc",
+ "llvm.hexagon.V6.vrmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusv_acc_128B",
+ "llvm.hexagon.V6.vrmpybv" => "__builtin_HEXAGON_V6_vrmpybv",
+ "llvm.hexagon.V6.vrmpybv.128B" => "__builtin_HEXAGON_V6_vrmpybv_128B",
+ "llvm.hexagon.V6.vrmpybv.acc" => "__builtin_HEXAGON_V6_vrmpybv_acc",
+ "llvm.hexagon.V6.vrmpybv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybv_acc_128B",
+ "llvm.hexagon.V6.vrmpyub" => "__builtin_HEXAGON_V6_vrmpyub",
+ "llvm.hexagon.V6.vrmpyub.128B" => "__builtin_HEXAGON_V6_vrmpyub_128B",
+ "llvm.hexagon.V6.vrmpyub.acc" => "__builtin_HEXAGON_V6_vrmpyub_acc",
+ "llvm.hexagon.V6.vrmpyub.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_acc_128B",
+ "llvm.hexagon.V6.vrmpyubi" => "__builtin_HEXAGON_V6_vrmpyubi",
+ "llvm.hexagon.V6.vrmpyubi.128B" => "__builtin_HEXAGON_V6_vrmpyubi_128B",
+ "llvm.hexagon.V6.vrmpyubi.acc" => "__builtin_HEXAGON_V6_vrmpyubi_acc",
+ "llvm.hexagon.V6.vrmpyubi.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubi_acc_128B",
+ "llvm.hexagon.V6.vrmpyubv" => "__builtin_HEXAGON_V6_vrmpyubv",
+ "llvm.hexagon.V6.vrmpyubv.128B" => "__builtin_HEXAGON_V6_vrmpyubv_128B",
+ "llvm.hexagon.V6.vrmpyubv.acc" => "__builtin_HEXAGON_V6_vrmpyubv_acc",
+ "llvm.hexagon.V6.vrmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubv_acc_128B",
+ "llvm.hexagon.V6.vror" => "__builtin_HEXAGON_V6_vror",
+ "llvm.hexagon.V6.vror.128B" => "__builtin_HEXAGON_V6_vror_128B",
+ "llvm.hexagon.V6.vroundhb" => "__builtin_HEXAGON_V6_vroundhb",
+ "llvm.hexagon.V6.vroundhb.128B" => "__builtin_HEXAGON_V6_vroundhb_128B",
+ "llvm.hexagon.V6.vroundhub" => "__builtin_HEXAGON_V6_vroundhub",
+ "llvm.hexagon.V6.vroundhub.128B" => "__builtin_HEXAGON_V6_vroundhub_128B",
+ "llvm.hexagon.V6.vroundwh" => "__builtin_HEXAGON_V6_vroundwh",
+ "llvm.hexagon.V6.vroundwh.128B" => "__builtin_HEXAGON_V6_vroundwh_128B",
+ "llvm.hexagon.V6.vroundwuh" => "__builtin_HEXAGON_V6_vroundwuh",
+ "llvm.hexagon.V6.vroundwuh.128B" => "__builtin_HEXAGON_V6_vroundwuh_128B",
+ "llvm.hexagon.V6.vrsadubi" => "__builtin_HEXAGON_V6_vrsadubi",
+ "llvm.hexagon.V6.vrsadubi.128B" => "__builtin_HEXAGON_V6_vrsadubi_128B",
+ "llvm.hexagon.V6.vrsadubi.acc" => "__builtin_HEXAGON_V6_vrsadubi_acc",
+ "llvm.hexagon.V6.vrsadubi.acc.128B" => "__builtin_HEXAGON_V6_vrsadubi_acc_128B",
+ "llvm.hexagon.V6.vsathub" => "__builtin_HEXAGON_V6_vsathub",
+ "llvm.hexagon.V6.vsathub.128B" => "__builtin_HEXAGON_V6_vsathub_128B",
+ "llvm.hexagon.V6.vsatwh" => "__builtin_HEXAGON_V6_vsatwh",
+ "llvm.hexagon.V6.vsatwh.128B" => "__builtin_HEXAGON_V6_vsatwh_128B",
+ "llvm.hexagon.V6.vsb" => "__builtin_HEXAGON_V6_vsb",
+ "llvm.hexagon.V6.vsb.128B" => "__builtin_HEXAGON_V6_vsb_128B",
+ "llvm.hexagon.V6.vsh" => "__builtin_HEXAGON_V6_vsh",
+ "llvm.hexagon.V6.vsh.128B" => "__builtin_HEXAGON_V6_vsh_128B",
+ "llvm.hexagon.V6.vshufeh" => "__builtin_HEXAGON_V6_vshufeh",
+ "llvm.hexagon.V6.vshufeh.128B" => "__builtin_HEXAGON_V6_vshufeh_128B",
+ "llvm.hexagon.V6.vshuffb" => "__builtin_HEXAGON_V6_vshuffb",
+ "llvm.hexagon.V6.vshuffb.128B" => "__builtin_HEXAGON_V6_vshuffb_128B",
+ "llvm.hexagon.V6.vshuffeb" => "__builtin_HEXAGON_V6_vshuffeb",
+ "llvm.hexagon.V6.vshuffeb.128B" => "__builtin_HEXAGON_V6_vshuffeb_128B",
+ "llvm.hexagon.V6.vshuffh" => "__builtin_HEXAGON_V6_vshuffh",
+ "llvm.hexagon.V6.vshuffh.128B" => "__builtin_HEXAGON_V6_vshuffh_128B",
+ "llvm.hexagon.V6.vshuffob" => "__builtin_HEXAGON_V6_vshuffob",
+ "llvm.hexagon.V6.vshuffob.128B" => "__builtin_HEXAGON_V6_vshuffob_128B",
+ "llvm.hexagon.V6.vshuffvdd" => "__builtin_HEXAGON_V6_vshuffvdd",
+ "llvm.hexagon.V6.vshuffvdd.128B" => "__builtin_HEXAGON_V6_vshuffvdd_128B",
+ "llvm.hexagon.V6.vshufoeb" => "__builtin_HEXAGON_V6_vshufoeb",
+ "llvm.hexagon.V6.vshufoeb.128B" => "__builtin_HEXAGON_V6_vshufoeb_128B",
+ "llvm.hexagon.V6.vshufoeh" => "__builtin_HEXAGON_V6_vshufoeh",
+ "llvm.hexagon.V6.vshufoeh.128B" => "__builtin_HEXAGON_V6_vshufoeh_128B",
+ "llvm.hexagon.V6.vshufoh" => "__builtin_HEXAGON_V6_vshufoh",
+ "llvm.hexagon.V6.vshufoh.128B" => "__builtin_HEXAGON_V6_vshufoh_128B",
+ "llvm.hexagon.V6.vsubb" => "__builtin_HEXAGON_V6_vsubb",
+ "llvm.hexagon.V6.vsubb.128B" => "__builtin_HEXAGON_V6_vsubb_128B",
+ "llvm.hexagon.V6.vsubb.dv" => "__builtin_HEXAGON_V6_vsubb_dv",
+ "llvm.hexagon.V6.vsubb.dv.128B" => "__builtin_HEXAGON_V6_vsubb_dv_128B",
+ "llvm.hexagon.V6.vsubh" => "__builtin_HEXAGON_V6_vsubh",
+ "llvm.hexagon.V6.vsubh.128B" => "__builtin_HEXAGON_V6_vsubh_128B",
+ "llvm.hexagon.V6.vsubh.dv" => "__builtin_HEXAGON_V6_vsubh_dv",
+ "llvm.hexagon.V6.vsubh.dv.128B" => "__builtin_HEXAGON_V6_vsubh_dv_128B",
+ "llvm.hexagon.V6.vsubhsat" => "__builtin_HEXAGON_V6_vsubhsat",
+ "llvm.hexagon.V6.vsubhsat.128B" => "__builtin_HEXAGON_V6_vsubhsat_128B",
+ "llvm.hexagon.V6.vsubhsat.dv" => "__builtin_HEXAGON_V6_vsubhsat_dv",
+ "llvm.hexagon.V6.vsubhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubhsat_dv_128B",
+ "llvm.hexagon.V6.vsubhw" => "__builtin_HEXAGON_V6_vsubhw",
+ "llvm.hexagon.V6.vsubhw.128B" => "__builtin_HEXAGON_V6_vsubhw_128B",
+ "llvm.hexagon.V6.vsububh" => "__builtin_HEXAGON_V6_vsububh",
+ "llvm.hexagon.V6.vsububh.128B" => "__builtin_HEXAGON_V6_vsububh_128B",
+ "llvm.hexagon.V6.vsububsat" => "__builtin_HEXAGON_V6_vsububsat",
+ "llvm.hexagon.V6.vsububsat.128B" => "__builtin_HEXAGON_V6_vsububsat_128B",
+ "llvm.hexagon.V6.vsububsat.dv" => "__builtin_HEXAGON_V6_vsububsat_dv",
+ "llvm.hexagon.V6.vsububsat.dv.128B" => "__builtin_HEXAGON_V6_vsububsat_dv_128B",
+ "llvm.hexagon.V6.vsubuhsat" => "__builtin_HEXAGON_V6_vsubuhsat",
+ "llvm.hexagon.V6.vsubuhsat.128B" => "__builtin_HEXAGON_V6_vsubuhsat_128B",
+ "llvm.hexagon.V6.vsubuhsat.dv" => "__builtin_HEXAGON_V6_vsubuhsat_dv",
+ "llvm.hexagon.V6.vsubuhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuhsat_dv_128B",
+ "llvm.hexagon.V6.vsubuhw" => "__builtin_HEXAGON_V6_vsubuhw",
+ "llvm.hexagon.V6.vsubuhw.128B" => "__builtin_HEXAGON_V6_vsubuhw_128B",
+ "llvm.hexagon.V6.vsubw" => "__builtin_HEXAGON_V6_vsubw",
+ "llvm.hexagon.V6.vsubw.128B" => "__builtin_HEXAGON_V6_vsubw_128B",
+ "llvm.hexagon.V6.vsubw.dv" => "__builtin_HEXAGON_V6_vsubw_dv",
+ "llvm.hexagon.V6.vsubw.dv.128B" => "__builtin_HEXAGON_V6_vsubw_dv_128B",
+ "llvm.hexagon.V6.vsubwsat" => "__builtin_HEXAGON_V6_vsubwsat",
+ "llvm.hexagon.V6.vsubwsat.128B" => "__builtin_HEXAGON_V6_vsubwsat_128B",
+ "llvm.hexagon.V6.vsubwsat.dv" => "__builtin_HEXAGON_V6_vsubwsat_dv",
+ "llvm.hexagon.V6.vsubwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubwsat_dv_128B",
+ "llvm.hexagon.V6.vtmpyb" => "__builtin_HEXAGON_V6_vtmpyb",
+ "llvm.hexagon.V6.vtmpyb.128B" => "__builtin_HEXAGON_V6_vtmpyb_128B",
+ "llvm.hexagon.V6.vtmpyb.acc" => "__builtin_HEXAGON_V6_vtmpyb_acc",
+ "llvm.hexagon.V6.vtmpyb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyb_acc_128B",
+ "llvm.hexagon.V6.vtmpybus" => "__builtin_HEXAGON_V6_vtmpybus",
+ "llvm.hexagon.V6.vtmpybus.128B" => "__builtin_HEXAGON_V6_vtmpybus_128B",
+ "llvm.hexagon.V6.vtmpybus.acc" => "__builtin_HEXAGON_V6_vtmpybus_acc",
+ "llvm.hexagon.V6.vtmpybus.acc.128B" => "__builtin_HEXAGON_V6_vtmpybus_acc_128B",
+ "llvm.hexagon.V6.vtmpyhb" => "__builtin_HEXAGON_V6_vtmpyhb",
+ "llvm.hexagon.V6.vtmpyhb.128B" => "__builtin_HEXAGON_V6_vtmpyhb_128B",
+ "llvm.hexagon.V6.vtmpyhb.acc" => "__builtin_HEXAGON_V6_vtmpyhb_acc",
+ "llvm.hexagon.V6.vtmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyhb_acc_128B",
+ "llvm.hexagon.V6.vunpackb" => "__builtin_HEXAGON_V6_vunpackb",
+ "llvm.hexagon.V6.vunpackb.128B" => "__builtin_HEXAGON_V6_vunpackb_128B",
+ "llvm.hexagon.V6.vunpackh" => "__builtin_HEXAGON_V6_vunpackh",
+ "llvm.hexagon.V6.vunpackh.128B" => "__builtin_HEXAGON_V6_vunpackh_128B",
+ "llvm.hexagon.V6.vunpackob" => "__builtin_HEXAGON_V6_vunpackob",
+ "llvm.hexagon.V6.vunpackob.128B" => "__builtin_HEXAGON_V6_vunpackob_128B",
+ "llvm.hexagon.V6.vunpackoh" => "__builtin_HEXAGON_V6_vunpackoh",
+ "llvm.hexagon.V6.vunpackoh.128B" => "__builtin_HEXAGON_V6_vunpackoh_128B",
+ "llvm.hexagon.V6.vunpackub" => "__builtin_HEXAGON_V6_vunpackub",
+ "llvm.hexagon.V6.vunpackub.128B" => "__builtin_HEXAGON_V6_vunpackub_128B",
+ "llvm.hexagon.V6.vunpackuh" => "__builtin_HEXAGON_V6_vunpackuh",
+ "llvm.hexagon.V6.vunpackuh.128B" => "__builtin_HEXAGON_V6_vunpackuh_128B",
+ "llvm.hexagon.V6.vxor" => "__builtin_HEXAGON_V6_vxor",
+ "llvm.hexagon.V6.vxor.128B" => "__builtin_HEXAGON_V6_vxor_128B",
+ "llvm.hexagon.V6.vzb" => "__builtin_HEXAGON_V6_vzb",
+ "llvm.hexagon.V6.vzb.128B" => "__builtin_HEXAGON_V6_vzb_128B",
+ "llvm.hexagon.V6.vzh" => "__builtin_HEXAGON_V6_vzh",
+ "llvm.hexagon.V6.vzh.128B" => "__builtin_HEXAGON_V6_vzh_128B",
+ "llvm.hexagon.brev.ldb" => "__builtin_brev_ldb",
+ "llvm.hexagon.brev.ldd" => "__builtin_brev_ldd",
+ "llvm.hexagon.brev.ldh" => "__builtin_brev_ldh",
+ "llvm.hexagon.brev.ldub" => "__builtin_brev_ldub",
+ "llvm.hexagon.brev.lduh" => "__builtin_brev_lduh",
+ "llvm.hexagon.brev.ldw" => "__builtin_brev_ldw",
+ "llvm.hexagon.brev.stb" => "__builtin_brev_stb",
+ "llvm.hexagon.brev.std" => "__builtin_brev_std",
+ "llvm.hexagon.brev.sth" => "__builtin_brev_sth",
+ "llvm.hexagon.brev.sthhi" => "__builtin_brev_sthhi",
+ "llvm.hexagon.brev.stw" => "__builtin_brev_stw",
+ "llvm.hexagon.circ.ldb" => "__builtin_circ_ldb",
+ "llvm.hexagon.circ.ldd" => "__builtin_circ_ldd",
+ "llvm.hexagon.circ.ldh" => "__builtin_circ_ldh",
+ "llvm.hexagon.circ.ldub" => "__builtin_circ_ldub",
+ "llvm.hexagon.circ.lduh" => "__builtin_circ_lduh",
+ "llvm.hexagon.circ.ldw" => "__builtin_circ_ldw",
+ "llvm.hexagon.circ.stb" => "__builtin_circ_stb",
+ "llvm.hexagon.circ.std" => "__builtin_circ_std",
+ "llvm.hexagon.circ.sth" => "__builtin_circ_sth",
+ "llvm.hexagon.circ.sthhi" => "__builtin_circ_sthhi",
+ "llvm.hexagon.circ.stw" => "__builtin_circ_stw",
+ "llvm.hexagon.mm256i.vaddw" => "__builtin__mm256i_vaddw",
+ "llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch",
+ // mips
+ "llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph",
+ "llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb",
+ "llvm.mips.absq.s.w" => "__builtin_mips_absq_s_w",
+ "llvm.mips.add.a.b" => "__builtin_msa_add_a_b",
+ "llvm.mips.add.a.d" => "__builtin_msa_add_a_d",
+ "llvm.mips.add.a.h" => "__builtin_msa_add_a_h",
+ "llvm.mips.add.a.w" => "__builtin_msa_add_a_w",
+ "llvm.mips.addq.ph" => "__builtin_mips_addq_ph",
+ "llvm.mips.addq.s.ph" => "__builtin_mips_addq_s_ph",
+ "llvm.mips.addq.s.w" => "__builtin_mips_addq_s_w",
+ "llvm.mips.addqh.ph" => "__builtin_mips_addqh_ph",
+ "llvm.mips.addqh.r.ph" => "__builtin_mips_addqh_r_ph",
+ "llvm.mips.addqh.r.w" => "__builtin_mips_addqh_r_w",
+ "llvm.mips.addqh.w" => "__builtin_mips_addqh_w",
+ "llvm.mips.adds.a.b" => "__builtin_msa_adds_a_b",
+ "llvm.mips.adds.a.d" => "__builtin_msa_adds_a_d",
+ "llvm.mips.adds.a.h" => "__builtin_msa_adds_a_h",
+ "llvm.mips.adds.a.w" => "__builtin_msa_adds_a_w",
+ "llvm.mips.adds.s.b" => "__builtin_msa_adds_s_b",
+ "llvm.mips.adds.s.d" => "__builtin_msa_adds_s_d",
+ "llvm.mips.adds.s.h" => "__builtin_msa_adds_s_h",
+ "llvm.mips.adds.s.w" => "__builtin_msa_adds_s_w",
+ "llvm.mips.adds.u.b" => "__builtin_msa_adds_u_b",
+ "llvm.mips.adds.u.d" => "__builtin_msa_adds_u_d",
+ "llvm.mips.adds.u.h" => "__builtin_msa_adds_u_h",
+ "llvm.mips.adds.u.w" => "__builtin_msa_adds_u_w",
+ "llvm.mips.addsc" => "__builtin_mips_addsc",
+ "llvm.mips.addu.ph" => "__builtin_mips_addu_ph",
+ "llvm.mips.addu.qb" => "__builtin_mips_addu_qb",
+ "llvm.mips.addu.s.ph" => "__builtin_mips_addu_s_ph",
+ "llvm.mips.addu.s.qb" => "__builtin_mips_addu_s_qb",
+ "llvm.mips.adduh.qb" => "__builtin_mips_adduh_qb",
+ "llvm.mips.adduh.r.qb" => "__builtin_mips_adduh_r_qb",
+ "llvm.mips.addv.b" => "__builtin_msa_addv_b",
+ "llvm.mips.addv.d" => "__builtin_msa_addv_d",
+ "llvm.mips.addv.h" => "__builtin_msa_addv_h",
+ "llvm.mips.addv.w" => "__builtin_msa_addv_w",
+ "llvm.mips.addvi.b" => "__builtin_msa_addvi_b",
+ "llvm.mips.addvi.d" => "__builtin_msa_addvi_d",
+ "llvm.mips.addvi.h" => "__builtin_msa_addvi_h",
+ "llvm.mips.addvi.w" => "__builtin_msa_addvi_w",
+ "llvm.mips.addwc" => "__builtin_mips_addwc",
+ "llvm.mips.and.v" => "__builtin_msa_and_v",
+ "llvm.mips.andi.b" => "__builtin_msa_andi_b",
+ "llvm.mips.append" => "__builtin_mips_append",
+ "llvm.mips.asub.s.b" => "__builtin_msa_asub_s_b",
+ "llvm.mips.asub.s.d" => "__builtin_msa_asub_s_d",
+ "llvm.mips.asub.s.h" => "__builtin_msa_asub_s_h",
+ "llvm.mips.asub.s.w" => "__builtin_msa_asub_s_w",
+ "llvm.mips.asub.u.b" => "__builtin_msa_asub_u_b",
+ "llvm.mips.asub.u.d" => "__builtin_msa_asub_u_d",
+ "llvm.mips.asub.u.h" => "__builtin_msa_asub_u_h",
+ "llvm.mips.asub.u.w" => "__builtin_msa_asub_u_w",
+ "llvm.mips.ave.s.b" => "__builtin_msa_ave_s_b",
+ "llvm.mips.ave.s.d" => "__builtin_msa_ave_s_d",
+ "llvm.mips.ave.s.h" => "__builtin_msa_ave_s_h",
+ "llvm.mips.ave.s.w" => "__builtin_msa_ave_s_w",
+ "llvm.mips.ave.u.b" => "__builtin_msa_ave_u_b",
+ "llvm.mips.ave.u.d" => "__builtin_msa_ave_u_d",
+ "llvm.mips.ave.u.h" => "__builtin_msa_ave_u_h",
+ "llvm.mips.ave.u.w" => "__builtin_msa_ave_u_w",
+ "llvm.mips.aver.s.b" => "__builtin_msa_aver_s_b",
+ "llvm.mips.aver.s.d" => "__builtin_msa_aver_s_d",
+ "llvm.mips.aver.s.h" => "__builtin_msa_aver_s_h",
+ "llvm.mips.aver.s.w" => "__builtin_msa_aver_s_w",
+ "llvm.mips.aver.u.b" => "__builtin_msa_aver_u_b",
+ "llvm.mips.aver.u.d" => "__builtin_msa_aver_u_d",
+ "llvm.mips.aver.u.h" => "__builtin_msa_aver_u_h",
+ "llvm.mips.aver.u.w" => "__builtin_msa_aver_u_w",
+ "llvm.mips.balign" => "__builtin_mips_balign",
+ "llvm.mips.bclr.b" => "__builtin_msa_bclr_b",
+ "llvm.mips.bclr.d" => "__builtin_msa_bclr_d",
+ "llvm.mips.bclr.h" => "__builtin_msa_bclr_h",
+ "llvm.mips.bclr.w" => "__builtin_msa_bclr_w",
+ "llvm.mips.bclri.b" => "__builtin_msa_bclri_b",
+ "llvm.mips.bclri.d" => "__builtin_msa_bclri_d",
+ "llvm.mips.bclri.h" => "__builtin_msa_bclri_h",
+ "llvm.mips.bclri.w" => "__builtin_msa_bclri_w",
+ "llvm.mips.binsl.b" => "__builtin_msa_binsl_b",
+ "llvm.mips.binsl.d" => "__builtin_msa_binsl_d",
+ "llvm.mips.binsl.h" => "__builtin_msa_binsl_h",
+ "llvm.mips.binsl.w" => "__builtin_msa_binsl_w",
+ "llvm.mips.binsli.b" => "__builtin_msa_binsli_b",
+ "llvm.mips.binsli.d" => "__builtin_msa_binsli_d",
+ "llvm.mips.binsli.h" => "__builtin_msa_binsli_h",
+ "llvm.mips.binsli.w" => "__builtin_msa_binsli_w",
+ "llvm.mips.binsr.b" => "__builtin_msa_binsr_b",
+ "llvm.mips.binsr.d" => "__builtin_msa_binsr_d",
+ "llvm.mips.binsr.h" => "__builtin_msa_binsr_h",
+ "llvm.mips.binsr.w" => "__builtin_msa_binsr_w",
+ "llvm.mips.binsri.b" => "__builtin_msa_binsri_b",
+ "llvm.mips.binsri.d" => "__builtin_msa_binsri_d",
+ "llvm.mips.binsri.h" => "__builtin_msa_binsri_h",
+ "llvm.mips.binsri.w" => "__builtin_msa_binsri_w",
+ "llvm.mips.bitrev" => "__builtin_mips_bitrev",
+ "llvm.mips.bmnz.v" => "__builtin_msa_bmnz_v",
+ "llvm.mips.bmnzi.b" => "__builtin_msa_bmnzi_b",
+ "llvm.mips.bmz.v" => "__builtin_msa_bmz_v",
+ "llvm.mips.bmzi.b" => "__builtin_msa_bmzi_b",
+ "llvm.mips.bneg.b" => "__builtin_msa_bneg_b",
+ "llvm.mips.bneg.d" => "__builtin_msa_bneg_d",
+ "llvm.mips.bneg.h" => "__builtin_msa_bneg_h",
+ "llvm.mips.bneg.w" => "__builtin_msa_bneg_w",
+ "llvm.mips.bnegi.b" => "__builtin_msa_bnegi_b",
+ "llvm.mips.bnegi.d" => "__builtin_msa_bnegi_d",
+ "llvm.mips.bnegi.h" => "__builtin_msa_bnegi_h",
+ "llvm.mips.bnegi.w" => "__builtin_msa_bnegi_w",
+ "llvm.mips.bnz.b" => "__builtin_msa_bnz_b",
+ "llvm.mips.bnz.d" => "__builtin_msa_bnz_d",
+ "llvm.mips.bnz.h" => "__builtin_msa_bnz_h",
+ "llvm.mips.bnz.v" => "__builtin_msa_bnz_v",
+ "llvm.mips.bnz.w" => "__builtin_msa_bnz_w",
+ "llvm.mips.bposge32" => "__builtin_mips_bposge32",
+ "llvm.mips.bsel.v" => "__builtin_msa_bsel_v",
+ "llvm.mips.bseli.b" => "__builtin_msa_bseli_b",
+ "llvm.mips.bset.b" => "__builtin_msa_bset_b",
+ "llvm.mips.bset.d" => "__builtin_msa_bset_d",
+ "llvm.mips.bset.h" => "__builtin_msa_bset_h",
+ "llvm.mips.bset.w" => "__builtin_msa_bset_w",
+ "llvm.mips.bseti.b" => "__builtin_msa_bseti_b",
+ "llvm.mips.bseti.d" => "__builtin_msa_bseti_d",
+ "llvm.mips.bseti.h" => "__builtin_msa_bseti_h",
+ "llvm.mips.bseti.w" => "__builtin_msa_bseti_w",
+ "llvm.mips.bz.b" => "__builtin_msa_bz_b",
+ "llvm.mips.bz.d" => "__builtin_msa_bz_d",
+ "llvm.mips.bz.h" => "__builtin_msa_bz_h",
+ "llvm.mips.bz.v" => "__builtin_msa_bz_v",
+ "llvm.mips.bz.w" => "__builtin_msa_bz_w",
+ "llvm.mips.ceq.b" => "__builtin_msa_ceq_b",
+ "llvm.mips.ceq.d" => "__builtin_msa_ceq_d",
+ "llvm.mips.ceq.h" => "__builtin_msa_ceq_h",
+ "llvm.mips.ceq.w" => "__builtin_msa_ceq_w",
+ "llvm.mips.ceqi.b" => "__builtin_msa_ceqi_b",
+ "llvm.mips.ceqi.d" => "__builtin_msa_ceqi_d",
+ "llvm.mips.ceqi.h" => "__builtin_msa_ceqi_h",
+ "llvm.mips.ceqi.w" => "__builtin_msa_ceqi_w",
+ "llvm.mips.cfcmsa" => "__builtin_msa_cfcmsa",
+ "llvm.mips.cle.s.b" => "__builtin_msa_cle_s_b",
+ "llvm.mips.cle.s.d" => "__builtin_msa_cle_s_d",
+ "llvm.mips.cle.s.h" => "__builtin_msa_cle_s_h",
+ "llvm.mips.cle.s.w" => "__builtin_msa_cle_s_w",
+ "llvm.mips.cle.u.b" => "__builtin_msa_cle_u_b",
+ "llvm.mips.cle.u.d" => "__builtin_msa_cle_u_d",
+ "llvm.mips.cle.u.h" => "__builtin_msa_cle_u_h",
+ "llvm.mips.cle.u.w" => "__builtin_msa_cle_u_w",
+ "llvm.mips.clei.s.b" => "__builtin_msa_clei_s_b",
+ "llvm.mips.clei.s.d" => "__builtin_msa_clei_s_d",
+ "llvm.mips.clei.s.h" => "__builtin_msa_clei_s_h",
+ "llvm.mips.clei.s.w" => "__builtin_msa_clei_s_w",
+ "llvm.mips.clei.u.b" => "__builtin_msa_clei_u_b",
+ "llvm.mips.clei.u.d" => "__builtin_msa_clei_u_d",
+ "llvm.mips.clei.u.h" => "__builtin_msa_clei_u_h",
+ "llvm.mips.clei.u.w" => "__builtin_msa_clei_u_w",
+ "llvm.mips.clt.s.b" => "__builtin_msa_clt_s_b",
+ "llvm.mips.clt.s.d" => "__builtin_msa_clt_s_d",
+ "llvm.mips.clt.s.h" => "__builtin_msa_clt_s_h",
+ "llvm.mips.clt.s.w" => "__builtin_msa_clt_s_w",
+ "llvm.mips.clt.u.b" => "__builtin_msa_clt_u_b",
+ "llvm.mips.clt.u.d" => "__builtin_msa_clt_u_d",
+ "llvm.mips.clt.u.h" => "__builtin_msa_clt_u_h",
+ "llvm.mips.clt.u.w" => "__builtin_msa_clt_u_w",
+ "llvm.mips.clti.s.b" => "__builtin_msa_clti_s_b",
+ "llvm.mips.clti.s.d" => "__builtin_msa_clti_s_d",
+ "llvm.mips.clti.s.h" => "__builtin_msa_clti_s_h",
+ "llvm.mips.clti.s.w" => "__builtin_msa_clti_s_w",
+ "llvm.mips.clti.u.b" => "__builtin_msa_clti_u_b",
+ "llvm.mips.clti.u.d" => "__builtin_msa_clti_u_d",
+ "llvm.mips.clti.u.h" => "__builtin_msa_clti_u_h",
+ "llvm.mips.clti.u.w" => "__builtin_msa_clti_u_w",
+ "llvm.mips.cmp.eq.ph" => "__builtin_mips_cmp_eq_ph",
+ "llvm.mips.cmp.le.ph" => "__builtin_mips_cmp_le_ph",
+ "llvm.mips.cmp.lt.ph" => "__builtin_mips_cmp_lt_ph",
+ "llvm.mips.cmpgdu.eq.qb" => "__builtin_mips_cmpgdu_eq_qb",
+ "llvm.mips.cmpgdu.le.qb" => "__builtin_mips_cmpgdu_le_qb",
+ "llvm.mips.cmpgdu.lt.qb" => "__builtin_mips_cmpgdu_lt_qb",
+ "llvm.mips.cmpgu.eq.qb" => "__builtin_mips_cmpgu_eq_qb",
+ "llvm.mips.cmpgu.le.qb" => "__builtin_mips_cmpgu_le_qb",
+ "llvm.mips.cmpgu.lt.qb" => "__builtin_mips_cmpgu_lt_qb",
+ "llvm.mips.cmpu.eq.qb" => "__builtin_mips_cmpu_eq_qb",
+ "llvm.mips.cmpu.le.qb" => "__builtin_mips_cmpu_le_qb",
+ "llvm.mips.cmpu.lt.qb" => "__builtin_mips_cmpu_lt_qb",
+ "llvm.mips.copy.s.b" => "__builtin_msa_copy_s_b",
+ "llvm.mips.copy.s.d" => "__builtin_msa_copy_s_d",
+ "llvm.mips.copy.s.h" => "__builtin_msa_copy_s_h",
+ "llvm.mips.copy.s.w" => "__builtin_msa_copy_s_w",
+ "llvm.mips.copy.u.b" => "__builtin_msa_copy_u_b",
+ "llvm.mips.copy.u.d" => "__builtin_msa_copy_u_d",
+ "llvm.mips.copy.u.h" => "__builtin_msa_copy_u_h",
+ "llvm.mips.copy.u.w" => "__builtin_msa_copy_u_w",
+ "llvm.mips.ctcmsa" => "__builtin_msa_ctcmsa",
+ "llvm.mips.div.s.b" => "__builtin_msa_div_s_b",
+ "llvm.mips.div.s.d" => "__builtin_msa_div_s_d",
+ "llvm.mips.div.s.h" => "__builtin_msa_div_s_h",
+ "llvm.mips.div.s.w" => "__builtin_msa_div_s_w",
+ "llvm.mips.div.u.b" => "__builtin_msa_div_u_b",
+ "llvm.mips.div.u.d" => "__builtin_msa_div_u_d",
+ "llvm.mips.div.u.h" => "__builtin_msa_div_u_h",
+ "llvm.mips.div.u.w" => "__builtin_msa_div_u_w",
+ "llvm.mips.dlsa" => "__builtin_mips_dlsa",
+ "llvm.mips.dotp.s.d" => "__builtin_msa_dotp_s_d",
+ "llvm.mips.dotp.s.h" => "__builtin_msa_dotp_s_h",
+ "llvm.mips.dotp.s.w" => "__builtin_msa_dotp_s_w",
+ "llvm.mips.dotp.u.d" => "__builtin_msa_dotp_u_d",
+ "llvm.mips.dotp.u.h" => "__builtin_msa_dotp_u_h",
+ "llvm.mips.dotp.u.w" => "__builtin_msa_dotp_u_w",
+ "llvm.mips.dpa.w.ph" => "__builtin_mips_dpa_w_ph",
+ "llvm.mips.dpadd.s.d" => "__builtin_msa_dpadd_s_d",
+ "llvm.mips.dpadd.s.h" => "__builtin_msa_dpadd_s_h",
+ "llvm.mips.dpadd.s.w" => "__builtin_msa_dpadd_s_w",
+ "llvm.mips.dpadd.u.d" => "__builtin_msa_dpadd_u_d",
+ "llvm.mips.dpadd.u.h" => "__builtin_msa_dpadd_u_h",
+ "llvm.mips.dpadd.u.w" => "__builtin_msa_dpadd_u_w",
+ "llvm.mips.dpaq.s.w.ph" => "__builtin_mips_dpaq_s_w_ph",
+ "llvm.mips.dpaq.sa.l.w" => "__builtin_mips_dpaq_sa_l_w",
+ "llvm.mips.dpaqx.s.w.ph" => "__builtin_mips_dpaqx_s_w_ph",
+ "llvm.mips.dpaqx.sa.w.ph" => "__builtin_mips_dpaqx_sa_w_ph",
+ "llvm.mips.dpau.h.qbl" => "__builtin_mips_dpau_h_qbl",
+ "llvm.mips.dpau.h.qbr" => "__builtin_mips_dpau_h_qbr",
+ "llvm.mips.dpax.w.ph" => "__builtin_mips_dpax_w_ph",
+ "llvm.mips.dps.w.ph" => "__builtin_mips_dps_w_ph",
+ "llvm.mips.dpsq.s.w.ph" => "__builtin_mips_dpsq_s_w_ph",
+ "llvm.mips.dpsq.sa.l.w" => "__builtin_mips_dpsq_sa_l_w",
+ "llvm.mips.dpsqx.s.w.ph" => "__builtin_mips_dpsqx_s_w_ph",
+ "llvm.mips.dpsqx.sa.w.ph" => "__builtin_mips_dpsqx_sa_w_ph",
+ "llvm.mips.dpsu.h.qbl" => "__builtin_mips_dpsu_h_qbl",
+ "llvm.mips.dpsu.h.qbr" => "__builtin_mips_dpsu_h_qbr",
+ "llvm.mips.dpsub.s.d" => "__builtin_msa_dpsub_s_d",
+ "llvm.mips.dpsub.s.h" => "__builtin_msa_dpsub_s_h",
+ "llvm.mips.dpsub.s.w" => "__builtin_msa_dpsub_s_w",
+ "llvm.mips.dpsub.u.d" => "__builtin_msa_dpsub_u_d",
+ "llvm.mips.dpsub.u.h" => "__builtin_msa_dpsub_u_h",
+ "llvm.mips.dpsub.u.w" => "__builtin_msa_dpsub_u_w",
+ "llvm.mips.dpsx.w.ph" => "__builtin_mips_dpsx_w_ph",
+ "llvm.mips.extp" => "__builtin_mips_extp",
+ "llvm.mips.extpdp" => "__builtin_mips_extpdp",
+ "llvm.mips.extr.r.w" => "__builtin_mips_extr_r_w",
+ "llvm.mips.extr.rs.w" => "__builtin_mips_extr_rs_w",
+ "llvm.mips.extr.s.h" => "__builtin_mips_extr_s_h",
+ "llvm.mips.extr.w" => "__builtin_mips_extr_w",
+ "llvm.mips.fadd.d" => "__builtin_msa_fadd_d",
+ "llvm.mips.fadd.w" => "__builtin_msa_fadd_w",
+ "llvm.mips.fcaf.d" => "__builtin_msa_fcaf_d",
+ "llvm.mips.fcaf.w" => "__builtin_msa_fcaf_w",
+ "llvm.mips.fceq.d" => "__builtin_msa_fceq_d",
+ "llvm.mips.fceq.w" => "__builtin_msa_fceq_w",
+ "llvm.mips.fclass.d" => "__builtin_msa_fclass_d",
+ "llvm.mips.fclass.w" => "__builtin_msa_fclass_w",
+ "llvm.mips.fcle.d" => "__builtin_msa_fcle_d",
+ "llvm.mips.fcle.w" => "__builtin_msa_fcle_w",
+ "llvm.mips.fclt.d" => "__builtin_msa_fclt_d",
+ "llvm.mips.fclt.w" => "__builtin_msa_fclt_w",
+ "llvm.mips.fcne.d" => "__builtin_msa_fcne_d",
+ "llvm.mips.fcne.w" => "__builtin_msa_fcne_w",
+ "llvm.mips.fcor.d" => "__builtin_msa_fcor_d",
+ "llvm.mips.fcor.w" => "__builtin_msa_fcor_w",
+ "llvm.mips.fcueq.d" => "__builtin_msa_fcueq_d",
+ "llvm.mips.fcueq.w" => "__builtin_msa_fcueq_w",
+ "llvm.mips.fcule.d" => "__builtin_msa_fcule_d",
+ "llvm.mips.fcule.w" => "__builtin_msa_fcule_w",
+ "llvm.mips.fcult.d" => "__builtin_msa_fcult_d",
+ "llvm.mips.fcult.w" => "__builtin_msa_fcult_w",
+ "llvm.mips.fcun.d" => "__builtin_msa_fcun_d",
+ "llvm.mips.fcun.w" => "__builtin_msa_fcun_w",
+ "llvm.mips.fcune.d" => "__builtin_msa_fcune_d",
+ "llvm.mips.fcune.w" => "__builtin_msa_fcune_w",
+ "llvm.mips.fdiv.d" => "__builtin_msa_fdiv_d",
+ "llvm.mips.fdiv.w" => "__builtin_msa_fdiv_w",
+ "llvm.mips.fexdo.h" => "__builtin_msa_fexdo_h",
+ "llvm.mips.fexdo.w" => "__builtin_msa_fexdo_w",
+ "llvm.mips.fexp2.d" => "__builtin_msa_fexp2_d",
+ "llvm.mips.fexp2.w" => "__builtin_msa_fexp2_w",
+ "llvm.mips.fexupl.d" => "__builtin_msa_fexupl_d",
+ "llvm.mips.fexupl.w" => "__builtin_msa_fexupl_w",
+ "llvm.mips.fexupr.d" => "__builtin_msa_fexupr_d",
+ "llvm.mips.fexupr.w" => "__builtin_msa_fexupr_w",
+ "llvm.mips.ffint.s.d" => "__builtin_msa_ffint_s_d",
+ "llvm.mips.ffint.s.w" => "__builtin_msa_ffint_s_w",
+ "llvm.mips.ffint.u.d" => "__builtin_msa_ffint_u_d",
+ "llvm.mips.ffint.u.w" => "__builtin_msa_ffint_u_w",
+ "llvm.mips.ffql.d" => "__builtin_msa_ffql_d",
+ "llvm.mips.ffql.w" => "__builtin_msa_ffql_w",
+ "llvm.mips.ffqr.d" => "__builtin_msa_ffqr_d",
+ "llvm.mips.ffqr.w" => "__builtin_msa_ffqr_w",
+ "llvm.mips.fill.b" => "__builtin_msa_fill_b",
+ "llvm.mips.fill.d" => "__builtin_msa_fill_d",
+ "llvm.mips.fill.h" => "__builtin_msa_fill_h",
+ "llvm.mips.fill.w" => "__builtin_msa_fill_w",
+ "llvm.mips.flog2.d" => "__builtin_msa_flog2_d",
+ "llvm.mips.flog2.w" => "__builtin_msa_flog2_w",
+ "llvm.mips.fmadd.d" => "__builtin_msa_fmadd_d",
+ "llvm.mips.fmadd.w" => "__builtin_msa_fmadd_w",
+ "llvm.mips.fmax.a.d" => "__builtin_msa_fmax_a_d",
+ "llvm.mips.fmax.a.w" => "__builtin_msa_fmax_a_w",
+ "llvm.mips.fmax.d" => "__builtin_msa_fmax_d",
+ "llvm.mips.fmax.w" => "__builtin_msa_fmax_w",
+ "llvm.mips.fmin.a.d" => "__builtin_msa_fmin_a_d",
+ "llvm.mips.fmin.a.w" => "__builtin_msa_fmin_a_w",
+ "llvm.mips.fmin.d" => "__builtin_msa_fmin_d",
+ "llvm.mips.fmin.w" => "__builtin_msa_fmin_w",
+ "llvm.mips.fmsub.d" => "__builtin_msa_fmsub_d",
+ "llvm.mips.fmsub.w" => "__builtin_msa_fmsub_w",
+ "llvm.mips.fmul.d" => "__builtin_msa_fmul_d",
+ "llvm.mips.fmul.w" => "__builtin_msa_fmul_w",
+ "llvm.mips.frcp.d" => "__builtin_msa_frcp_d",
+ "llvm.mips.frcp.w" => "__builtin_msa_frcp_w",
+ "llvm.mips.frint.d" => "__builtin_msa_frint_d",
+ "llvm.mips.frint.w" => "__builtin_msa_frint_w",
+ "llvm.mips.frsqrt.d" => "__builtin_msa_frsqrt_d",
+ "llvm.mips.frsqrt.w" => "__builtin_msa_frsqrt_w",
+ "llvm.mips.fsaf.d" => "__builtin_msa_fsaf_d",
+ "llvm.mips.fsaf.w" => "__builtin_msa_fsaf_w",
+ "llvm.mips.fseq.d" => "__builtin_msa_fseq_d",
+ "llvm.mips.fseq.w" => "__builtin_msa_fseq_w",
+ "llvm.mips.fsle.d" => "__builtin_msa_fsle_d",
+ "llvm.mips.fsle.w" => "__builtin_msa_fsle_w",
+ "llvm.mips.fslt.d" => "__builtin_msa_fslt_d",
+ "llvm.mips.fslt.w" => "__builtin_msa_fslt_w",
+ "llvm.mips.fsne.d" => "__builtin_msa_fsne_d",
+ "llvm.mips.fsne.w" => "__builtin_msa_fsne_w",
+ "llvm.mips.fsor.d" => "__builtin_msa_fsor_d",
+ "llvm.mips.fsor.w" => "__builtin_msa_fsor_w",
+ "llvm.mips.fsqrt.d" => "__builtin_msa_fsqrt_d",
+ "llvm.mips.fsqrt.w" => "__builtin_msa_fsqrt_w",
+ "llvm.mips.fsub.d" => "__builtin_msa_fsub_d",
+ "llvm.mips.fsub.w" => "__builtin_msa_fsub_w",
+ "llvm.mips.fsueq.d" => "__builtin_msa_fsueq_d",
+ "llvm.mips.fsueq.w" => "__builtin_msa_fsueq_w",
+ "llvm.mips.fsule.d" => "__builtin_msa_fsule_d",
+ "llvm.mips.fsule.w" => "__builtin_msa_fsule_w",
+ "llvm.mips.fsult.d" => "__builtin_msa_fsult_d",
+ "llvm.mips.fsult.w" => "__builtin_msa_fsult_w",
+ "llvm.mips.fsun.d" => "__builtin_msa_fsun_d",
+ "llvm.mips.fsun.w" => "__builtin_msa_fsun_w",
+ "llvm.mips.fsune.d" => "__builtin_msa_fsune_d",
+ "llvm.mips.fsune.w" => "__builtin_msa_fsune_w",
+ "llvm.mips.ftint.s.d" => "__builtin_msa_ftint_s_d",
+ "llvm.mips.ftint.s.w" => "__builtin_msa_ftint_s_w",
+ "llvm.mips.ftint.u.d" => "__builtin_msa_ftint_u_d",
+ "llvm.mips.ftint.u.w" => "__builtin_msa_ftint_u_w",
+ "llvm.mips.ftq.h" => "__builtin_msa_ftq_h",
+ "llvm.mips.ftq.w" => "__builtin_msa_ftq_w",
+ "llvm.mips.ftrunc.s.d" => "__builtin_msa_ftrunc_s_d",
+ "llvm.mips.ftrunc.s.w" => "__builtin_msa_ftrunc_s_w",
+ "llvm.mips.ftrunc.u.d" => "__builtin_msa_ftrunc_u_d",
+ "llvm.mips.ftrunc.u.w" => "__builtin_msa_ftrunc_u_w",
+ "llvm.mips.hadd.s.d" => "__builtin_msa_hadd_s_d",
+ "llvm.mips.hadd.s.h" => "__builtin_msa_hadd_s_h",
+ "llvm.mips.hadd.s.w" => "__builtin_msa_hadd_s_w",
+ "llvm.mips.hadd.u.d" => "__builtin_msa_hadd_u_d",
+ "llvm.mips.hadd.u.h" => "__builtin_msa_hadd_u_h",
+ "llvm.mips.hadd.u.w" => "__builtin_msa_hadd_u_w",
+ "llvm.mips.hsub.s.d" => "__builtin_msa_hsub_s_d",
+ "llvm.mips.hsub.s.h" => "__builtin_msa_hsub_s_h",
+ "llvm.mips.hsub.s.w" => "__builtin_msa_hsub_s_w",
+ "llvm.mips.hsub.u.d" => "__builtin_msa_hsub_u_d",
+ "llvm.mips.hsub.u.h" => "__builtin_msa_hsub_u_h",
+ "llvm.mips.hsub.u.w" => "__builtin_msa_hsub_u_w",
+ "llvm.mips.ilvev.b" => "__builtin_msa_ilvev_b",
+ "llvm.mips.ilvev.d" => "__builtin_msa_ilvev_d",
+ "llvm.mips.ilvev.h" => "__builtin_msa_ilvev_h",
+ "llvm.mips.ilvev.w" => "__builtin_msa_ilvev_w",
+ "llvm.mips.ilvl.b" => "__builtin_msa_ilvl_b",
+ "llvm.mips.ilvl.d" => "__builtin_msa_ilvl_d",
+ "llvm.mips.ilvl.h" => "__builtin_msa_ilvl_h",
+ "llvm.mips.ilvl.w" => "__builtin_msa_ilvl_w",
+ "llvm.mips.ilvod.b" => "__builtin_msa_ilvod_b",
+ "llvm.mips.ilvod.d" => "__builtin_msa_ilvod_d",
+ "llvm.mips.ilvod.h" => "__builtin_msa_ilvod_h",
+ "llvm.mips.ilvod.w" => "__builtin_msa_ilvod_w",
+ "llvm.mips.ilvr.b" => "__builtin_msa_ilvr_b",
+ "llvm.mips.ilvr.d" => "__builtin_msa_ilvr_d",
+ "llvm.mips.ilvr.h" => "__builtin_msa_ilvr_h",
+ "llvm.mips.ilvr.w" => "__builtin_msa_ilvr_w",
+ "llvm.mips.insert.b" => "__builtin_msa_insert_b",
+ "llvm.mips.insert.d" => "__builtin_msa_insert_d",
+ "llvm.mips.insert.h" => "__builtin_msa_insert_h",
+ "llvm.mips.insert.w" => "__builtin_msa_insert_w",
+ "llvm.mips.insv" => "__builtin_mips_insv",
+ "llvm.mips.insve.b" => "__builtin_msa_insve_b",
+ "llvm.mips.insve.d" => "__builtin_msa_insve_d",
+ "llvm.mips.insve.h" => "__builtin_msa_insve_h",
+ "llvm.mips.insve.w" => "__builtin_msa_insve_w",
+ "llvm.mips.lbux" => "__builtin_mips_lbux",
+ "llvm.mips.ld.b" => "__builtin_msa_ld_b",
+ "llvm.mips.ld.d" => "__builtin_msa_ld_d",
+ "llvm.mips.ld.h" => "__builtin_msa_ld_h",
+ "llvm.mips.ld.w" => "__builtin_msa_ld_w",
+ "llvm.mips.ldi.b" => "__builtin_msa_ldi_b",
+ "llvm.mips.ldi.d" => "__builtin_msa_ldi_d",
+ "llvm.mips.ldi.h" => "__builtin_msa_ldi_h",
+ "llvm.mips.ldi.w" => "__builtin_msa_ldi_w",
+ "llvm.mips.ldr.d" => "__builtin_msa_ldr_d",
+ "llvm.mips.ldr.w" => "__builtin_msa_ldr_w",
+ "llvm.mips.lhx" => "__builtin_mips_lhx",
+ "llvm.mips.lsa" => "__builtin_mips_lsa",
+ "llvm.mips.lwx" => "__builtin_mips_lwx",
+ "llvm.mips.madd" => "__builtin_mips_madd",
+ "llvm.mips.madd.q.h" => "__builtin_msa_madd_q_h",
+ "llvm.mips.madd.q.w" => "__builtin_msa_madd_q_w",
+ "llvm.mips.maddr.q.h" => "__builtin_msa_maddr_q_h",
+ "llvm.mips.maddr.q.w" => "__builtin_msa_maddr_q_w",
+ "llvm.mips.maddu" => "__builtin_mips_maddu",
+ "llvm.mips.maddv.b" => "__builtin_msa_maddv_b",
+ "llvm.mips.maddv.d" => "__builtin_msa_maddv_d",
+ "llvm.mips.maddv.h" => "__builtin_msa_maddv_h",
+ "llvm.mips.maddv.w" => "__builtin_msa_maddv_w",
+ "llvm.mips.maq.s.w.phl" => "__builtin_mips_maq_s_w_phl",
+ "llvm.mips.maq.s.w.phr" => "__builtin_mips_maq_s_w_phr",
+ "llvm.mips.maq.sa.w.phl" => "__builtin_mips_maq_sa_w_phl",
+ "llvm.mips.maq.sa.w.phr" => "__builtin_mips_maq_sa_w_phr",
+ "llvm.mips.max.a.b" => "__builtin_msa_max_a_b",
+ "llvm.mips.max.a.d" => "__builtin_msa_max_a_d",
+ "llvm.mips.max.a.h" => "__builtin_msa_max_a_h",
+ "llvm.mips.max.a.w" => "__builtin_msa_max_a_w",
+ "llvm.mips.max.s.b" => "__builtin_msa_max_s_b",
+ "llvm.mips.max.s.d" => "__builtin_msa_max_s_d",
+ "llvm.mips.max.s.h" => "__builtin_msa_max_s_h",
+ "llvm.mips.max.s.w" => "__builtin_msa_max_s_w",
+ "llvm.mips.max.u.b" => "__builtin_msa_max_u_b",
+ "llvm.mips.max.u.d" => "__builtin_msa_max_u_d",
+ "llvm.mips.max.u.h" => "__builtin_msa_max_u_h",
+ "llvm.mips.max.u.w" => "__builtin_msa_max_u_w",
+ "llvm.mips.maxi.s.b" => "__builtin_msa_maxi_s_b",
+ "llvm.mips.maxi.s.d" => "__builtin_msa_maxi_s_d",
+ "llvm.mips.maxi.s.h" => "__builtin_msa_maxi_s_h",
+ "llvm.mips.maxi.s.w" => "__builtin_msa_maxi_s_w",
+ "llvm.mips.maxi.u.b" => "__builtin_msa_maxi_u_b",
+ "llvm.mips.maxi.u.d" => "__builtin_msa_maxi_u_d",
+ "llvm.mips.maxi.u.h" => "__builtin_msa_maxi_u_h",
+ "llvm.mips.maxi.u.w" => "__builtin_msa_maxi_u_w",
+ "llvm.mips.min.a.b" => "__builtin_msa_min_a_b",
+ "llvm.mips.min.a.d" => "__builtin_msa_min_a_d",
+ "llvm.mips.min.a.h" => "__builtin_msa_min_a_h",
+ "llvm.mips.min.a.w" => "__builtin_msa_min_a_w",
+ "llvm.mips.min.s.b" => "__builtin_msa_min_s_b",
+ "llvm.mips.min.s.d" => "__builtin_msa_min_s_d",
+ "llvm.mips.min.s.h" => "__builtin_msa_min_s_h",
+ "llvm.mips.min.s.w" => "__builtin_msa_min_s_w",
+ "llvm.mips.min.u.b" => "__builtin_msa_min_u_b",
+ "llvm.mips.min.u.d" => "__builtin_msa_min_u_d",
+ "llvm.mips.min.u.h" => "__builtin_msa_min_u_h",
+ "llvm.mips.min.u.w" => "__builtin_msa_min_u_w",
+ "llvm.mips.mini.s.b" => "__builtin_msa_mini_s_b",
+ "llvm.mips.mini.s.d" => "__builtin_msa_mini_s_d",
+ "llvm.mips.mini.s.h" => "__builtin_msa_mini_s_h",
+ "llvm.mips.mini.s.w" => "__builtin_msa_mini_s_w",
+ "llvm.mips.mini.u.b" => "__builtin_msa_mini_u_b",
+ "llvm.mips.mini.u.d" => "__builtin_msa_mini_u_d",
+ "llvm.mips.mini.u.h" => "__builtin_msa_mini_u_h",
+ "llvm.mips.mini.u.w" => "__builtin_msa_mini_u_w",
+ "llvm.mips.mod.s.b" => "__builtin_msa_mod_s_b",
+ "llvm.mips.mod.s.d" => "__builtin_msa_mod_s_d",
+ "llvm.mips.mod.s.h" => "__builtin_msa_mod_s_h",
+ "llvm.mips.mod.s.w" => "__builtin_msa_mod_s_w",
+ "llvm.mips.mod.u.b" => "__builtin_msa_mod_u_b",
+ "llvm.mips.mod.u.d" => "__builtin_msa_mod_u_d",
+ "llvm.mips.mod.u.h" => "__builtin_msa_mod_u_h",
+ "llvm.mips.mod.u.w" => "__builtin_msa_mod_u_w",
+ "llvm.mips.modsub" => "__builtin_mips_modsub",
+ "llvm.mips.move.v" => "__builtin_msa_move_v",
+ "llvm.mips.msub" => "__builtin_mips_msub",
+ "llvm.mips.msub.q.h" => "__builtin_msa_msub_q_h",
+ "llvm.mips.msub.q.w" => "__builtin_msa_msub_q_w",
+ "llvm.mips.msubr.q.h" => "__builtin_msa_msubr_q_h",
+ "llvm.mips.msubr.q.w" => "__builtin_msa_msubr_q_w",
+ "llvm.mips.msubu" => "__builtin_mips_msubu",
+ "llvm.mips.msubv.b" => "__builtin_msa_msubv_b",
+ "llvm.mips.msubv.d" => "__builtin_msa_msubv_d",
+ "llvm.mips.msubv.h" => "__builtin_msa_msubv_h",
+ "llvm.mips.msubv.w" => "__builtin_msa_msubv_w",
+ "llvm.mips.mthlip" => "__builtin_mips_mthlip",
+ "llvm.mips.mul.ph" => "__builtin_mips_mul_ph",
+ "llvm.mips.mul.q.h" => "__builtin_msa_mul_q_h",
+ "llvm.mips.mul.q.w" => "__builtin_msa_mul_q_w",
+ "llvm.mips.mul.s.ph" => "__builtin_mips_mul_s_ph",
+ "llvm.mips.muleq.s.w.phl" => "__builtin_mips_muleq_s_w_phl",
+ "llvm.mips.muleq.s.w.phr" => "__builtin_mips_muleq_s_w_phr",
+ "llvm.mips.muleu.s.ph.qbl" => "__builtin_mips_muleu_s_ph_qbl",
+ "llvm.mips.muleu.s.ph.qbr" => "__builtin_mips_muleu_s_ph_qbr",
+ "llvm.mips.mulq.rs.ph" => "__builtin_mips_mulq_rs_ph",
+ "llvm.mips.mulq.rs.w" => "__builtin_mips_mulq_rs_w",
+ "llvm.mips.mulq.s.ph" => "__builtin_mips_mulq_s_ph",
+ "llvm.mips.mulq.s.w" => "__builtin_mips_mulq_s_w",
+ "llvm.mips.mulr.q.h" => "__builtin_msa_mulr_q_h",
+ "llvm.mips.mulr.q.w" => "__builtin_msa_mulr_q_w",
+ "llvm.mips.mulsa.w.ph" => "__builtin_mips_mulsa_w_ph",
+ "llvm.mips.mulsaq.s.w.ph" => "__builtin_mips_mulsaq_s_w_ph",
+ "llvm.mips.mult" => "__builtin_mips_mult",
+ "llvm.mips.multu" => "__builtin_mips_multu",
+ "llvm.mips.mulv.b" => "__builtin_msa_mulv_b",
+ "llvm.mips.mulv.d" => "__builtin_msa_mulv_d",
+ "llvm.mips.mulv.h" => "__builtin_msa_mulv_h",
+ "llvm.mips.mulv.w" => "__builtin_msa_mulv_w",
+ "llvm.mips.nloc.b" => "__builtin_msa_nloc_b",
+ "llvm.mips.nloc.d" => "__builtin_msa_nloc_d",
+ "llvm.mips.nloc.h" => "__builtin_msa_nloc_h",
+ "llvm.mips.nloc.w" => "__builtin_msa_nloc_w",
+ "llvm.mips.nlzc.b" => "__builtin_msa_nlzc_b",
+ "llvm.mips.nlzc.d" => "__builtin_msa_nlzc_d",
+ "llvm.mips.nlzc.h" => "__builtin_msa_nlzc_h",
+ "llvm.mips.nlzc.w" => "__builtin_msa_nlzc_w",
+ "llvm.mips.nor.v" => "__builtin_msa_nor_v",
+ "llvm.mips.nori.b" => "__builtin_msa_nori_b",
+ "llvm.mips.or.v" => "__builtin_msa_or_v",
+ "llvm.mips.ori.b" => "__builtin_msa_ori_b",
+ "llvm.mips.packrl.ph" => "__builtin_mips_packrl_ph",
+ "llvm.mips.pckev.b" => "__builtin_msa_pckev_b",
+ "llvm.mips.pckev.d" => "__builtin_msa_pckev_d",
+ "llvm.mips.pckev.h" => "__builtin_msa_pckev_h",
+ "llvm.mips.pckev.w" => "__builtin_msa_pckev_w",
+ "llvm.mips.pckod.b" => "__builtin_msa_pckod_b",
+ "llvm.mips.pckod.d" => "__builtin_msa_pckod_d",
+ "llvm.mips.pckod.h" => "__builtin_msa_pckod_h",
+ "llvm.mips.pckod.w" => "__builtin_msa_pckod_w",
+ "llvm.mips.pcnt.b" => "__builtin_msa_pcnt_b",
+ "llvm.mips.pcnt.d" => "__builtin_msa_pcnt_d",
+ "llvm.mips.pcnt.h" => "__builtin_msa_pcnt_h",
+ "llvm.mips.pcnt.w" => "__builtin_msa_pcnt_w",
+ "llvm.mips.pick.ph" => "__builtin_mips_pick_ph",
+ "llvm.mips.pick.qb" => "__builtin_mips_pick_qb",
+ "llvm.mips.preceq.w.phl" => "__builtin_mips_preceq_w_phl",
+ "llvm.mips.preceq.w.phr" => "__builtin_mips_preceq_w_phr",
+ "llvm.mips.precequ.ph.qbl" => "__builtin_mips_precequ_ph_qbl",
+ "llvm.mips.precequ.ph.qbla" => "__builtin_mips_precequ_ph_qbla",
+ "llvm.mips.precequ.ph.qbr" => "__builtin_mips_precequ_ph_qbr",
+ "llvm.mips.precequ.ph.qbra" => "__builtin_mips_precequ_ph_qbra",
+ "llvm.mips.preceu.ph.qbl" => "__builtin_mips_preceu_ph_qbl",
+ "llvm.mips.preceu.ph.qbla" => "__builtin_mips_preceu_ph_qbla",
+ "llvm.mips.preceu.ph.qbr" => "__builtin_mips_preceu_ph_qbr",
+ "llvm.mips.preceu.ph.qbra" => "__builtin_mips_preceu_ph_qbra",
+ "llvm.mips.precr.qb.ph" => "__builtin_mips_precr_qb_ph",
+ "llvm.mips.precr.sra.ph.w" => "__builtin_mips_precr_sra_ph_w",
+ "llvm.mips.precr.sra.r.ph.w" => "__builtin_mips_precr_sra_r_ph_w",
+ "llvm.mips.precrq.ph.w" => "__builtin_mips_precrq_ph_w",
+ "llvm.mips.precrq.qb.ph" => "__builtin_mips_precrq_qb_ph",
+ "llvm.mips.precrq.rs.ph.w" => "__builtin_mips_precrq_rs_ph_w",
+ "llvm.mips.precrqu.s.qb.ph" => "__builtin_mips_precrqu_s_qb_ph",
+ "llvm.mips.prepend" => "__builtin_mips_prepend",
+ "llvm.mips.raddu.w.qb" => "__builtin_mips_raddu_w_qb",
+ "llvm.mips.rddsp" => "__builtin_mips_rddsp",
+ "llvm.mips.repl.ph" => "__builtin_mips_repl_ph",
+ "llvm.mips.repl.qb" => "__builtin_mips_repl_qb",
+ "llvm.mips.sat.s.b" => "__builtin_msa_sat_s_b",
+ "llvm.mips.sat.s.d" => "__builtin_msa_sat_s_d",
+ "llvm.mips.sat.s.h" => "__builtin_msa_sat_s_h",
+ "llvm.mips.sat.s.w" => "__builtin_msa_sat_s_w",
+ "llvm.mips.sat.u.b" => "__builtin_msa_sat_u_b",
+ "llvm.mips.sat.u.d" => "__builtin_msa_sat_u_d",
+ "llvm.mips.sat.u.h" => "__builtin_msa_sat_u_h",
+ "llvm.mips.sat.u.w" => "__builtin_msa_sat_u_w",
+ "llvm.mips.shf.b" => "__builtin_msa_shf_b",
+ "llvm.mips.shf.h" => "__builtin_msa_shf_h",
+ "llvm.mips.shf.w" => "__builtin_msa_shf_w",
+ "llvm.mips.shilo" => "__builtin_mips_shilo",
+ "llvm.mips.shll.ph" => "__builtin_mips_shll_ph",
+ "llvm.mips.shll.qb" => "__builtin_mips_shll_qb",
+ "llvm.mips.shll.s.ph" => "__builtin_mips_shll_s_ph",
+ "llvm.mips.shll.s.w" => "__builtin_mips_shll_s_w",
+ "llvm.mips.shra.ph" => "__builtin_mips_shra_ph",
+ "llvm.mips.shra.qb" => "__builtin_mips_shra_qb",
+ "llvm.mips.shra.r.ph" => "__builtin_mips_shra_r_ph",
+ "llvm.mips.shra.r.qb" => "__builtin_mips_shra_r_qb",
+ "llvm.mips.shra.r.w" => "__builtin_mips_shra_r_w",
+ "llvm.mips.shrl.ph" => "__builtin_mips_shrl_ph",
+ "llvm.mips.shrl.qb" => "__builtin_mips_shrl_qb",
+ "llvm.mips.sld.b" => "__builtin_msa_sld_b",
+ "llvm.mips.sld.d" => "__builtin_msa_sld_d",
+ "llvm.mips.sld.h" => "__builtin_msa_sld_h",
+ "llvm.mips.sld.w" => "__builtin_msa_sld_w",
+ "llvm.mips.sldi.b" => "__builtin_msa_sldi_b",
+ "llvm.mips.sldi.d" => "__builtin_msa_sldi_d",
+ "llvm.mips.sldi.h" => "__builtin_msa_sldi_h",
+ "llvm.mips.sldi.w" => "__builtin_msa_sldi_w",
+ "llvm.mips.sll.b" => "__builtin_msa_sll_b",
+ "llvm.mips.sll.d" => "__builtin_msa_sll_d",
+ "llvm.mips.sll.h" => "__builtin_msa_sll_h",
+ "llvm.mips.sll.w" => "__builtin_msa_sll_w",
+ "llvm.mips.slli.b" => "__builtin_msa_slli_b",
+ "llvm.mips.slli.d" => "__builtin_msa_slli_d",
+ "llvm.mips.slli.h" => "__builtin_msa_slli_h",
+ "llvm.mips.slli.w" => "__builtin_msa_slli_w",
+ "llvm.mips.splat.b" => "__builtin_msa_splat_b",
+ "llvm.mips.splat.d" => "__builtin_msa_splat_d",
+ "llvm.mips.splat.h" => "__builtin_msa_splat_h",
+ "llvm.mips.splat.w" => "__builtin_msa_splat_w",
+ "llvm.mips.splati.b" => "__builtin_msa_splati_b",
+ "llvm.mips.splati.d" => "__builtin_msa_splati_d",
+ "llvm.mips.splati.h" => "__builtin_msa_splati_h",
+ "llvm.mips.splati.w" => "__builtin_msa_splati_w",
+ "llvm.mips.sra.b" => "__builtin_msa_sra_b",
+ "llvm.mips.sra.d" => "__builtin_msa_sra_d",
+ "llvm.mips.sra.h" => "__builtin_msa_sra_h",
+ "llvm.mips.sra.w" => "__builtin_msa_sra_w",
+ "llvm.mips.srai.b" => "__builtin_msa_srai_b",
+ "llvm.mips.srai.d" => "__builtin_msa_srai_d",
+ "llvm.mips.srai.h" => "__builtin_msa_srai_h",
+ "llvm.mips.srai.w" => "__builtin_msa_srai_w",
+ "llvm.mips.srar.b" => "__builtin_msa_srar_b",
+ "llvm.mips.srar.d" => "__builtin_msa_srar_d",
+ "llvm.mips.srar.h" => "__builtin_msa_srar_h",
+ "llvm.mips.srar.w" => "__builtin_msa_srar_w",
+ "llvm.mips.srari.b" => "__builtin_msa_srari_b",
+ "llvm.mips.srari.d" => "__builtin_msa_srari_d",
+ "llvm.mips.srari.h" => "__builtin_msa_srari_h",
+ "llvm.mips.srari.w" => "__builtin_msa_srari_w",
+ "llvm.mips.srl.b" => "__builtin_msa_srl_b",
+ "llvm.mips.srl.d" => "__builtin_msa_srl_d",
+ "llvm.mips.srl.h" => "__builtin_msa_srl_h",
+ "llvm.mips.srl.w" => "__builtin_msa_srl_w",
+ "llvm.mips.srli.b" => "__builtin_msa_srli_b",
+ "llvm.mips.srli.d" => "__builtin_msa_srli_d",
+ "llvm.mips.srli.h" => "__builtin_msa_srli_h",
+ "llvm.mips.srli.w" => "__builtin_msa_srli_w",
+ "llvm.mips.srlr.b" => "__builtin_msa_srlr_b",
+ "llvm.mips.srlr.d" => "__builtin_msa_srlr_d",
+ "llvm.mips.srlr.h" => "__builtin_msa_srlr_h",
+ "llvm.mips.srlr.w" => "__builtin_msa_srlr_w",
+ "llvm.mips.srlri.b" => "__builtin_msa_srlri_b",
+ "llvm.mips.srlri.d" => "__builtin_msa_srlri_d",
+ "llvm.mips.srlri.h" => "__builtin_msa_srlri_h",
+ "llvm.mips.srlri.w" => "__builtin_msa_srlri_w",
+ "llvm.mips.st.b" => "__builtin_msa_st_b",
+ "llvm.mips.st.d" => "__builtin_msa_st_d",
+ "llvm.mips.st.h" => "__builtin_msa_st_h",
+ "llvm.mips.st.w" => "__builtin_msa_st_w",
+ "llvm.mips.str.d" => "__builtin_msa_str_d",
+ "llvm.mips.str.w" => "__builtin_msa_str_w",
+ "llvm.mips.subq.ph" => "__builtin_mips_subq_ph",
+ "llvm.mips.subq.s.ph" => "__builtin_mips_subq_s_ph",
+ "llvm.mips.subq.s.w" => "__builtin_mips_subq_s_w",
+ "llvm.mips.subqh.ph" => "__builtin_mips_subqh_ph",
+ "llvm.mips.subqh.r.ph" => "__builtin_mips_subqh_r_ph",
+ "llvm.mips.subqh.r.w" => "__builtin_mips_subqh_r_w",
+ "llvm.mips.subqh.w" => "__builtin_mips_subqh_w",
+ "llvm.mips.subs.s.b" => "__builtin_msa_subs_s_b",
+ "llvm.mips.subs.s.d" => "__builtin_msa_subs_s_d",
+ "llvm.mips.subs.s.h" => "__builtin_msa_subs_s_h",
+ "llvm.mips.subs.s.w" => "__builtin_msa_subs_s_w",
+ "llvm.mips.subs.u.b" => "__builtin_msa_subs_u_b",
+ "llvm.mips.subs.u.d" => "__builtin_msa_subs_u_d",
+ "llvm.mips.subs.u.h" => "__builtin_msa_subs_u_h",
+ "llvm.mips.subs.u.w" => "__builtin_msa_subs_u_w",
+ "llvm.mips.subsus.u.b" => "__builtin_msa_subsus_u_b",
+ "llvm.mips.subsus.u.d" => "__builtin_msa_subsus_u_d",
+ "llvm.mips.subsus.u.h" => "__builtin_msa_subsus_u_h",
+ "llvm.mips.subsus.u.w" => "__builtin_msa_subsus_u_w",
+ "llvm.mips.subsuu.s.b" => "__builtin_msa_subsuu_s_b",
+ "llvm.mips.subsuu.s.d" => "__builtin_msa_subsuu_s_d",
+ "llvm.mips.subsuu.s.h" => "__builtin_msa_subsuu_s_h",
+ "llvm.mips.subsuu.s.w" => "__builtin_msa_subsuu_s_w",
+ "llvm.mips.subu.ph" => "__builtin_mips_subu_ph",
+ "llvm.mips.subu.qb" => "__builtin_mips_subu_qb",
+ "llvm.mips.subu.s.ph" => "__builtin_mips_subu_s_ph",
+ "llvm.mips.subu.s.qb" => "__builtin_mips_subu_s_qb",
+ "llvm.mips.subuh.qb" => "__builtin_mips_subuh_qb",
+ "llvm.mips.subuh.r.qb" => "__builtin_mips_subuh_r_qb",
+ "llvm.mips.subv.b" => "__builtin_msa_subv_b",
+ "llvm.mips.subv.d" => "__builtin_msa_subv_d",
+ "llvm.mips.subv.h" => "__builtin_msa_subv_h",
+ "llvm.mips.subv.w" => "__builtin_msa_subv_w",
+ "llvm.mips.subvi.b" => "__builtin_msa_subvi_b",
+ "llvm.mips.subvi.d" => "__builtin_msa_subvi_d",
+ "llvm.mips.subvi.h" => "__builtin_msa_subvi_h",
+ "llvm.mips.subvi.w" => "__builtin_msa_subvi_w",
+ "llvm.mips.vshf.b" => "__builtin_msa_vshf_b",
+ "llvm.mips.vshf.d" => "__builtin_msa_vshf_d",
+ "llvm.mips.vshf.h" => "__builtin_msa_vshf_h",
+ "llvm.mips.vshf.w" => "__builtin_msa_vshf_w",
+ "llvm.mips.wrdsp" => "__builtin_mips_wrdsp",
+ "llvm.mips.xor.v" => "__builtin_msa_xor_v",
+ "llvm.mips.xori.b" => "__builtin_msa_xori_b",
+ // nvvm
+ "llvm.nvvm.abs.i" => "__nvvm_abs_i",
+ "llvm.nvvm.abs.ll" => "__nvvm_abs_ll",
+ "llvm.nvvm.add.rm.d" => "__nvvm_add_rm_d",
+ "llvm.nvvm.add.rm.f" => "__nvvm_add_rm_f",
+ "llvm.nvvm.add.rm.ftz.f" => "__nvvm_add_rm_ftz_f",
+ "llvm.nvvm.add.rn.d" => "__nvvm_add_rn_d",
+ "llvm.nvvm.add.rn.f" => "__nvvm_add_rn_f",
+ "llvm.nvvm.add.rn.ftz.f" => "__nvvm_add_rn_ftz_f",
+ "llvm.nvvm.add.rp.d" => "__nvvm_add_rp_d",
+ "llvm.nvvm.add.rp.f" => "__nvvm_add_rp_f",
+ "llvm.nvvm.add.rp.ftz.f" => "__nvvm_add_rp_ftz_f",
+ "llvm.nvvm.add.rz.d" => "__nvvm_add_rz_d",
+ "llvm.nvvm.add.rz.f" => "__nvvm_add_rz_f",
+ "llvm.nvvm.add.rz.ftz.f" => "__nvvm_add_rz_ftz_f",
+ "llvm.nvvm.bar.sync" => "__nvvm_bar_sync",
+ "llvm.nvvm.barrier0" => "__nvvm_bar0",
+ // [DUPLICATE]: "llvm.nvvm.barrier0" => "__syncthreads",
+ "llvm.nvvm.barrier0.and" => "__nvvm_bar0_and",
+ "llvm.nvvm.barrier0.or" => "__nvvm_bar0_or",
+ "llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc",
+ "llvm.nvvm.bitcast.d2ll" => "__nvvm_bitcast_d2ll",
+ "llvm.nvvm.bitcast.f2i" => "__nvvm_bitcast_f2i",
+ "llvm.nvvm.bitcast.i2f" => "__nvvm_bitcast_i2f",
+ "llvm.nvvm.bitcast.ll2d" => "__nvvm_bitcast_ll2d",
+ "llvm.nvvm.brev32" => "__nvvm_brev32",
+ "llvm.nvvm.brev64" => "__nvvm_brev64",
+ "llvm.nvvm.ceil.d" => "__nvvm_ceil_d",
+ "llvm.nvvm.ceil.f" => "__nvvm_ceil_f",
+ "llvm.nvvm.ceil.ftz.f" => "__nvvm_ceil_ftz_f",
+ "llvm.nvvm.clz.i" => "__nvvm_clz_i",
+ "llvm.nvvm.clz.ll" => "__nvvm_clz_ll",
+ "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f",
+ "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f",
+ "llvm.nvvm.d2f.rm" => "__nvvm_d2f_rm",
+ "llvm.nvvm.d2f.rm.ftz" => "__nvvm_d2f_rm_ftz",
+ "llvm.nvvm.d2f.rn" => "__nvvm_d2f_rn",
+ "llvm.nvvm.d2f.rn.ftz" => "__nvvm_d2f_rn_ftz",
+ "llvm.nvvm.d2f.rp" => "__nvvm_d2f_rp",
+ "llvm.nvvm.d2f.rp.ftz" => "__nvvm_d2f_rp_ftz",
+ "llvm.nvvm.d2f.rz" => "__nvvm_d2f_rz",
+ "llvm.nvvm.d2f.rz.ftz" => "__nvvm_d2f_rz_ftz",
+ "llvm.nvvm.d2i.hi" => "__nvvm_d2i_hi",
+ "llvm.nvvm.d2i.lo" => "__nvvm_d2i_lo",
+ "llvm.nvvm.d2i.rm" => "__nvvm_d2i_rm",
+ "llvm.nvvm.d2i.rn" => "__nvvm_d2i_rn",
+ "llvm.nvvm.d2i.rp" => "__nvvm_d2i_rp",
+ "llvm.nvvm.d2i.rz" => "__nvvm_d2i_rz",
+ "llvm.nvvm.d2ll.rm" => "__nvvm_d2ll_rm",
+ "llvm.nvvm.d2ll.rn" => "__nvvm_d2ll_rn",
+ "llvm.nvvm.d2ll.rp" => "__nvvm_d2ll_rp",
+ "llvm.nvvm.d2ll.rz" => "__nvvm_d2ll_rz",
+ "llvm.nvvm.d2ui.rm" => "__nvvm_d2ui_rm",
+ "llvm.nvvm.d2ui.rn" => "__nvvm_d2ui_rn",
+ "llvm.nvvm.d2ui.rp" => "__nvvm_d2ui_rp",
+ "llvm.nvvm.d2ui.rz" => "__nvvm_d2ui_rz",
+ "llvm.nvvm.d2ull.rm" => "__nvvm_d2ull_rm",
+ "llvm.nvvm.d2ull.rn" => "__nvvm_d2ull_rn",
+ "llvm.nvvm.d2ull.rp" => "__nvvm_d2ull_rp",
+ "llvm.nvvm.d2ull.rz" => "__nvvm_d2ull_rz",
+ "llvm.nvvm.div.approx.f" => "__nvvm_div_approx_f",
+ "llvm.nvvm.div.approx.ftz.f" => "__nvvm_div_approx_ftz_f",
+ "llvm.nvvm.div.rm.d" => "__nvvm_div_rm_d",
+ "llvm.nvvm.div.rm.f" => "__nvvm_div_rm_f",
+ "llvm.nvvm.div.rm.ftz.f" => "__nvvm_div_rm_ftz_f",
+ "llvm.nvvm.div.rn.d" => "__nvvm_div_rn_d",
+ "llvm.nvvm.div.rn.f" => "__nvvm_div_rn_f",
+ "llvm.nvvm.div.rn.ftz.f" => "__nvvm_div_rn_ftz_f",
+ "llvm.nvvm.div.rp.d" => "__nvvm_div_rp_d",
+ "llvm.nvvm.div.rp.f" => "__nvvm_div_rp_f",
+ "llvm.nvvm.div.rp.ftz.f" => "__nvvm_div_rp_ftz_f",
+ "llvm.nvvm.div.rz.d" => "__nvvm_div_rz_d",
+ "llvm.nvvm.div.rz.f" => "__nvvm_div_rz_f",
+ "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f",
+ "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d",
+ "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f",
+ "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f",
+ "llvm.nvvm.f2h.rn" => "__nvvm_f2h_rn",
+ "llvm.nvvm.f2h.rn.ftz" => "__nvvm_f2h_rn_ftz",
+ "llvm.nvvm.f2i.rm" => "__nvvm_f2i_rm",
+ "llvm.nvvm.f2i.rm.ftz" => "__nvvm_f2i_rm_ftz",
+ "llvm.nvvm.f2i.rn" => "__nvvm_f2i_rn",
+ "llvm.nvvm.f2i.rn.ftz" => "__nvvm_f2i_rn_ftz",
+ "llvm.nvvm.f2i.rp" => "__nvvm_f2i_rp",
+ "llvm.nvvm.f2i.rp.ftz" => "__nvvm_f2i_rp_ftz",
+ "llvm.nvvm.f2i.rz" => "__nvvm_f2i_rz",
+ "llvm.nvvm.f2i.rz.ftz" => "__nvvm_f2i_rz_ftz",
+ "llvm.nvvm.f2ll.rm" => "__nvvm_f2ll_rm",
+ "llvm.nvvm.f2ll.rm.ftz" => "__nvvm_f2ll_rm_ftz",
+ "llvm.nvvm.f2ll.rn" => "__nvvm_f2ll_rn",
+ "llvm.nvvm.f2ll.rn.ftz" => "__nvvm_f2ll_rn_ftz",
+ "llvm.nvvm.f2ll.rp" => "__nvvm_f2ll_rp",
+ "llvm.nvvm.f2ll.rp.ftz" => "__nvvm_f2ll_rp_ftz",
+ "llvm.nvvm.f2ll.rz" => "__nvvm_f2ll_rz",
+ "llvm.nvvm.f2ll.rz.ftz" => "__nvvm_f2ll_rz_ftz",
+ "llvm.nvvm.f2ui.rm" => "__nvvm_f2ui_rm",
+ "llvm.nvvm.f2ui.rm.ftz" => "__nvvm_f2ui_rm_ftz",
+ "llvm.nvvm.f2ui.rn" => "__nvvm_f2ui_rn",
+ "llvm.nvvm.f2ui.rn.ftz" => "__nvvm_f2ui_rn_ftz",
+ "llvm.nvvm.f2ui.rp" => "__nvvm_f2ui_rp",
+ "llvm.nvvm.f2ui.rp.ftz" => "__nvvm_f2ui_rp_ftz",
+ "llvm.nvvm.f2ui.rz" => "__nvvm_f2ui_rz",
+ "llvm.nvvm.f2ui.rz.ftz" => "__nvvm_f2ui_rz_ftz",
+ "llvm.nvvm.f2ull.rm" => "__nvvm_f2ull_rm",
+ "llvm.nvvm.f2ull.rm.ftz" => "__nvvm_f2ull_rm_ftz",
+ "llvm.nvvm.f2ull.rn" => "__nvvm_f2ull_rn",
+ "llvm.nvvm.f2ull.rn.ftz" => "__nvvm_f2ull_rn_ftz",
+ "llvm.nvvm.f2ull.rp" => "__nvvm_f2ull_rp",
+ "llvm.nvvm.f2ull.rp.ftz" => "__nvvm_f2ull_rp_ftz",
+ "llvm.nvvm.f2ull.rz" => "__nvvm_f2ull_rz",
+ "llvm.nvvm.f2ull.rz.ftz" => "__nvvm_f2ull_rz_ftz",
+ "llvm.nvvm.fabs.d" => "__nvvm_fabs_d",
+ "llvm.nvvm.fabs.f" => "__nvvm_fabs_f",
+ "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f",
+ "llvm.nvvm.floor.d" => "__nvvm_floor_d",
+ "llvm.nvvm.floor.f" => "__nvvm_floor_f",
+ "llvm.nvvm.floor.ftz.f" => "__nvvm_floor_ftz_f",
+ "llvm.nvvm.fma.rm.d" => "__nvvm_fma_rm_d",
+ "llvm.nvvm.fma.rm.f" => "__nvvm_fma_rm_f",
+ "llvm.nvvm.fma.rm.ftz.f" => "__nvvm_fma_rm_ftz_f",
+ "llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d",
+ "llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f",
+ "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f",
+ "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d",
+ "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f",
+ "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f",
+ "llvm.nvvm.fma.rz.d" => "__nvvm_fma_rz_d",
+ "llvm.nvvm.fma.rz.f" => "__nvvm_fma_rz_f",
+ "llvm.nvvm.fma.rz.ftz.f" => "__nvvm_fma_rz_ftz_f",
+ "llvm.nvvm.fmax.d" => "__nvvm_fmax_d",
+ "llvm.nvvm.fmax.f" => "__nvvm_fmax_f",
+ "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f",
+ "llvm.nvvm.fmin.d" => "__nvvm_fmin_d",
+ "llvm.nvvm.fmin.f" => "__nvvm_fmin_f",
+ "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f",
+ "llvm.nvvm.h2f" => "__nvvm_h2f",
+ "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm",
+ "llvm.nvvm.i2d.rn" => "__nvvm_i2d_rn",
+ "llvm.nvvm.i2d.rp" => "__nvvm_i2d_rp",
+ "llvm.nvvm.i2d.rz" => "__nvvm_i2d_rz",
+ "llvm.nvvm.i2f.rm" => "__nvvm_i2f_rm",
+ "llvm.nvvm.i2f.rn" => "__nvvm_i2f_rn",
+ "llvm.nvvm.i2f.rp" => "__nvvm_i2f_rp",
+ "llvm.nvvm.i2f.rz" => "__nvvm_i2f_rz",
+ "llvm.nvvm.isspacep.const" => "__nvvm_isspacep_const",
+ "llvm.nvvm.isspacep.global" => "__nvvm_isspacep_global",
+ "llvm.nvvm.isspacep.local" => "__nvvm_isspacep_local",
+ "llvm.nvvm.isspacep.shared" => "__nvvm_isspacep_shared",
+ "llvm.nvvm.istypep.sampler" => "__nvvm_istypep_sampler",
+ "llvm.nvvm.istypep.surface" => "__nvvm_istypep_surface",
+ "llvm.nvvm.istypep.texture" => "__nvvm_istypep_texture",
+ "llvm.nvvm.lg2.approx.d" => "__nvvm_lg2_approx_d",
+ "llvm.nvvm.lg2.approx.f" => "__nvvm_lg2_approx_f",
+ "llvm.nvvm.lg2.approx.ftz.f" => "__nvvm_lg2_approx_ftz_f",
+ "llvm.nvvm.ll2d.rm" => "__nvvm_ll2d_rm",
+ "llvm.nvvm.ll2d.rn" => "__nvvm_ll2d_rn",
+ "llvm.nvvm.ll2d.rp" => "__nvvm_ll2d_rp",
+ "llvm.nvvm.ll2d.rz" => "__nvvm_ll2d_rz",
+ "llvm.nvvm.ll2f.rm" => "__nvvm_ll2f_rm",
+ "llvm.nvvm.ll2f.rn" => "__nvvm_ll2f_rn",
+ "llvm.nvvm.ll2f.rp" => "__nvvm_ll2f_rp",
+ "llvm.nvvm.ll2f.rz" => "__nvvm_ll2f_rz",
+ "llvm.nvvm.lohi.i2d" => "__nvvm_lohi_i2d",
+ "llvm.nvvm.max.i" => "__nvvm_max_i",
+ "llvm.nvvm.max.ll" => "__nvvm_max_ll",
+ "llvm.nvvm.max.ui" => "__nvvm_max_ui",
+ "llvm.nvvm.max.ull" => "__nvvm_max_ull",
+ "llvm.nvvm.membar.cta" => "__nvvm_membar_cta",
+ "llvm.nvvm.membar.gl" => "__nvvm_membar_gl",
+ "llvm.nvvm.membar.sys" => "__nvvm_membar_sys",
+ "llvm.nvvm.min.i" => "__nvvm_min_i",
+ "llvm.nvvm.min.ll" => "__nvvm_min_ll",
+ "llvm.nvvm.min.ui" => "__nvvm_min_ui",
+ "llvm.nvvm.min.ull" => "__nvvm_min_ull",
+ "llvm.nvvm.mul.rm.d" => "__nvvm_mul_rm_d",
+ "llvm.nvvm.mul.rm.f" => "__nvvm_mul_rm_f",
+ "llvm.nvvm.mul.rm.ftz.f" => "__nvvm_mul_rm_ftz_f",
+ "llvm.nvvm.mul.rn.d" => "__nvvm_mul_rn_d",
+ "llvm.nvvm.mul.rn.f" => "__nvvm_mul_rn_f",
+ "llvm.nvvm.mul.rn.ftz.f" => "__nvvm_mul_rn_ftz_f",
+ "llvm.nvvm.mul.rp.d" => "__nvvm_mul_rp_d",
+ "llvm.nvvm.mul.rp.f" => "__nvvm_mul_rp_f",
+ "llvm.nvvm.mul.rp.ftz.f" => "__nvvm_mul_rp_ftz_f",
+ "llvm.nvvm.mul.rz.d" => "__nvvm_mul_rz_d",
+ "llvm.nvvm.mul.rz.f" => "__nvvm_mul_rz_f",
+ "llvm.nvvm.mul.rz.ftz.f" => "__nvvm_mul_rz_ftz_f",
+ "llvm.nvvm.mul24.i" => "__nvvm_mul24_i",
+ "llvm.nvvm.mul24.ui" => "__nvvm_mul24_ui",
+ "llvm.nvvm.mulhi.i" => "__nvvm_mulhi_i",
+ "llvm.nvvm.mulhi.ll" => "__nvvm_mulhi_ll",
+ "llvm.nvvm.mulhi.ui" => "__nvvm_mulhi_ui",
+ "llvm.nvvm.mulhi.ull" => "__nvvm_mulhi_ull",
+ "llvm.nvvm.popc.i" => "__nvvm_popc_i",
+ "llvm.nvvm.popc.ll" => "__nvvm_popc_ll",
+ "llvm.nvvm.prmt" => "__nvvm_prmt",
+ "llvm.nvvm.rcp.approx.ftz.d" => "__nvvm_rcp_approx_ftz_d",
+ "llvm.nvvm.rcp.rm.d" => "__nvvm_rcp_rm_d",
+ "llvm.nvvm.rcp.rm.f" => "__nvvm_rcp_rm_f",
+ "llvm.nvvm.rcp.rm.ftz.f" => "__nvvm_rcp_rm_ftz_f",
+ "llvm.nvvm.rcp.rn.d" => "__nvvm_rcp_rn_d",
+ "llvm.nvvm.rcp.rn.f" => "__nvvm_rcp_rn_f",
+ "llvm.nvvm.rcp.rn.ftz.f" => "__nvvm_rcp_rn_ftz_f",
+ "llvm.nvvm.rcp.rp.d" => "__nvvm_rcp_rp_d",
+ "llvm.nvvm.rcp.rp.f" => "__nvvm_rcp_rp_f",
+ "llvm.nvvm.rcp.rp.ftz.f" => "__nvvm_rcp_rp_ftz_f",
+ "llvm.nvvm.rcp.rz.d" => "__nvvm_rcp_rz_d",
+ "llvm.nvvm.rcp.rz.f" => "__nvvm_rcp_rz_f",
+ "llvm.nvvm.rcp.rz.ftz.f" => "__nvvm_rcp_rz_ftz_f",
+ "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.ctaid.x" => "__nvvm_read_ptx_sreg_ctaid_x",
+ "llvm.nvvm.read.ptx.sreg.ctaid.y" => "__nvvm_read_ptx_sreg_ctaid_y",
+ "llvm.nvvm.read.ptx.sreg.ctaid.z" => "__nvvm_read_ptx_sreg_ctaid_z",
+ "llvm.nvvm.read.ptx.sreg.envreg0" => "__nvvm_read_ptx_sreg_envreg0",
+ "llvm.nvvm.read.ptx.sreg.envreg1" => "__nvvm_read_ptx_sreg_envreg1",
+ "llvm.nvvm.read.ptx.sreg.envreg10" => "__nvvm_read_ptx_sreg_envreg10",
+ "llvm.nvvm.read.ptx.sreg.envreg11" => "__nvvm_read_ptx_sreg_envreg11",
+ "llvm.nvvm.read.ptx.sreg.envreg12" => "__nvvm_read_ptx_sreg_envreg12",
+ "llvm.nvvm.read.ptx.sreg.envreg13" => "__nvvm_read_ptx_sreg_envreg13",
+ "llvm.nvvm.read.ptx.sreg.envreg14" => "__nvvm_read_ptx_sreg_envreg14",
+ "llvm.nvvm.read.ptx.sreg.envreg15" => "__nvvm_read_ptx_sreg_envreg15",
+ "llvm.nvvm.read.ptx.sreg.envreg16" => "__nvvm_read_ptx_sreg_envreg16",
+ "llvm.nvvm.read.ptx.sreg.envreg17" => "__nvvm_read_ptx_sreg_envreg17",
+ "llvm.nvvm.read.ptx.sreg.envreg18" => "__nvvm_read_ptx_sreg_envreg18",
+ "llvm.nvvm.read.ptx.sreg.envreg19" => "__nvvm_read_ptx_sreg_envreg19",
+ "llvm.nvvm.read.ptx.sreg.envreg2" => "__nvvm_read_ptx_sreg_envreg2",
+ "llvm.nvvm.read.ptx.sreg.envreg20" => "__nvvm_read_ptx_sreg_envreg20",
+ "llvm.nvvm.read.ptx.sreg.envreg21" => "__nvvm_read_ptx_sreg_envreg21",
+ "llvm.nvvm.read.ptx.sreg.envreg22" => "__nvvm_read_ptx_sreg_envreg22",
+ "llvm.nvvm.read.ptx.sreg.envreg23" => "__nvvm_read_ptx_sreg_envreg23",
+ "llvm.nvvm.read.ptx.sreg.envreg24" => "__nvvm_read_ptx_sreg_envreg24",
+ "llvm.nvvm.read.ptx.sreg.envreg25" => "__nvvm_read_ptx_sreg_envreg25",
+ "llvm.nvvm.read.ptx.sreg.envreg26" => "__nvvm_read_ptx_sreg_envreg26",
+ "llvm.nvvm.read.ptx.sreg.envreg27" => "__nvvm_read_ptx_sreg_envreg27",
+ "llvm.nvvm.read.ptx.sreg.envreg28" => "__nvvm_read_ptx_sreg_envreg28",
+ "llvm.nvvm.read.ptx.sreg.envreg29" => "__nvvm_read_ptx_sreg_envreg29",
+ "llvm.nvvm.read.ptx.sreg.envreg3" => "__nvvm_read_ptx_sreg_envreg3",
+ "llvm.nvvm.read.ptx.sreg.envreg30" => "__nvvm_read_ptx_sreg_envreg30",
+ "llvm.nvvm.read.ptx.sreg.envreg31" => "__nvvm_read_ptx_sreg_envreg31",
+ "llvm.nvvm.read.ptx.sreg.envreg4" => "__nvvm_read_ptx_sreg_envreg4",
+ "llvm.nvvm.read.ptx.sreg.envreg5" => "__nvvm_read_ptx_sreg_envreg5",
+ "llvm.nvvm.read.ptx.sreg.envreg6" => "__nvvm_read_ptx_sreg_envreg6",
+ "llvm.nvvm.read.ptx.sreg.envreg7" => "__nvvm_read_ptx_sreg_envreg7",
+ "llvm.nvvm.read.ptx.sreg.envreg8" => "__nvvm_read_ptx_sreg_envreg8",
+ "llvm.nvvm.read.ptx.sreg.envreg9" => "__nvvm_read_ptx_sreg_envreg9",
+ "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.nctaid.x" => "__nvvm_read_ptx_sreg_nctaid_x",
+ "llvm.nvvm.read.ptx.sreg.nctaid.y" => "__nvvm_read_ptx_sreg_nctaid_y",
+ "llvm.nvvm.read.ptx.sreg.nctaid.z" => "__nvvm_read_ptx_sreg_nctaid_z",
+ "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.ntid.x" => "__nvvm_read_ptx_sreg_ntid_x",
+ "llvm.nvvm.read.ptx.sreg.ntid.y" => "__nvvm_read_ptx_sreg_ntid_y",
+ "llvm.nvvm.read.ptx.sreg.ntid.z" => "__nvvm_read_ptx_sreg_ntid_z",
+ "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.tid.x" => "__nvvm_read_ptx_sreg_tid_x",
+ "llvm.nvvm.read.ptx.sreg.tid.y" => "__nvvm_read_ptx_sreg_tid_y",
+ "llvm.nvvm.read.ptx.sreg.tid.z" => "__nvvm_read_ptx_sreg_tid_z",
+ "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_warpsize",
+ // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.rotate.b32" => "__nvvm_rotate_b32",
+ "llvm.nvvm.rotate.b64" => "__nvvm_rotate_b64",
+ "llvm.nvvm.rotate.right.b64" => "__nvvm_rotate_right_b64",
+ "llvm.nvvm.round.d" => "__nvvm_round_d",
+ "llvm.nvvm.round.f" => "__nvvm_round_f",
+ "llvm.nvvm.round.ftz.f" => "__nvvm_round_ftz_f",
+ "llvm.nvvm.rsqrt.approx.d" => "__nvvm_rsqrt_approx_d",
+ "llvm.nvvm.rsqrt.approx.f" => "__nvvm_rsqrt_approx_f",
+ "llvm.nvvm.rsqrt.approx.ftz.f" => "__nvvm_rsqrt_approx_ftz_f",
+ "llvm.nvvm.sad.i" => "__nvvm_sad_i",
+ "llvm.nvvm.sad.ui" => "__nvvm_sad_ui",
+ "llvm.nvvm.saturate.d" => "__nvvm_saturate_d",
+ "llvm.nvvm.saturate.f" => "__nvvm_saturate_f",
+ "llvm.nvvm.saturate.ftz.f" => "__nvvm_saturate_ftz_f",
+ "llvm.nvvm.shfl.bfly.f32" => "__nvvm_shfl_bfly_f32",
+ "llvm.nvvm.shfl.bfly.i32" => "__nvvm_shfl_bfly_i32",
+ "llvm.nvvm.shfl.down.f32" => "__nvvm_shfl_down_f32",
+ "llvm.nvvm.shfl.down.i32" => "__nvvm_shfl_down_i32",
+ "llvm.nvvm.shfl.idx.f32" => "__nvvm_shfl_idx_f32",
+ "llvm.nvvm.shfl.idx.i32" => "__nvvm_shfl_idx_i32",
+ "llvm.nvvm.shfl.up.f32" => "__nvvm_shfl_up_f32",
+ "llvm.nvvm.shfl.up.i32" => "__nvvm_shfl_up_i32",
+ "llvm.nvvm.sin.approx.f" => "__nvvm_sin_approx_f",
+ "llvm.nvvm.sin.approx.ftz.f" => "__nvvm_sin_approx_ftz_f",
+ "llvm.nvvm.sqrt.approx.f" => "__nvvm_sqrt_approx_f",
+ "llvm.nvvm.sqrt.approx.ftz.f" => "__nvvm_sqrt_approx_ftz_f",
+ "llvm.nvvm.sqrt.f" => "__nvvm_sqrt_f",
+ "llvm.nvvm.sqrt.rm.d" => "__nvvm_sqrt_rm_d",
+ "llvm.nvvm.sqrt.rm.f" => "__nvvm_sqrt_rm_f",
+ "llvm.nvvm.sqrt.rm.ftz.f" => "__nvvm_sqrt_rm_ftz_f",
+ "llvm.nvvm.sqrt.rn.d" => "__nvvm_sqrt_rn_d",
+ "llvm.nvvm.sqrt.rn.f" => "__nvvm_sqrt_rn_f",
+ "llvm.nvvm.sqrt.rn.ftz.f" => "__nvvm_sqrt_rn_ftz_f",
+ "llvm.nvvm.sqrt.rp.d" => "__nvvm_sqrt_rp_d",
+ "llvm.nvvm.sqrt.rp.f" => "__nvvm_sqrt_rp_f",
+ "llvm.nvvm.sqrt.rp.ftz.f" => "__nvvm_sqrt_rp_ftz_f",
+ "llvm.nvvm.sqrt.rz.d" => "__nvvm_sqrt_rz_d",
+ "llvm.nvvm.sqrt.rz.f" => "__nvvm_sqrt_rz_f",
+ "llvm.nvvm.sqrt.rz.ftz.f" => "__nvvm_sqrt_rz_ftz_f",
+ "llvm.nvvm.suq.array.size" => "__nvvm_suq_array_size",
+ "llvm.nvvm.suq.channel.data.type" => "__nvvm_suq_channel_data_type",
+ "llvm.nvvm.suq.channel.order" => "__nvvm_suq_channel_order",
+ "llvm.nvvm.suq.depth" => "__nvvm_suq_depth",
+ "llvm.nvvm.suq.height" => "__nvvm_suq_height",
+ "llvm.nvvm.suq.width" => "__nvvm_suq_width",
+ "llvm.nvvm.sust.b.1d.array.i16.clamp" => "__nvvm_sust_b_1d_array_i16_clamp",
+ "llvm.nvvm.sust.b.1d.array.i16.trap" => "__nvvm_sust_b_1d_array_i16_trap",
+ "llvm.nvvm.sust.b.1d.array.i16.zero" => "__nvvm_sust_b_1d_array_i16_zero",
+ "llvm.nvvm.sust.b.1d.array.i32.clamp" => "__nvvm_sust_b_1d_array_i32_clamp",
+ "llvm.nvvm.sust.b.1d.array.i32.trap" => "__nvvm_sust_b_1d_array_i32_trap",
+ "llvm.nvvm.sust.b.1d.array.i32.zero" => "__nvvm_sust_b_1d_array_i32_zero",
+ "llvm.nvvm.sust.b.1d.array.i64.clamp" => "__nvvm_sust_b_1d_array_i64_clamp",
+ "llvm.nvvm.sust.b.1d.array.i64.trap" => "__nvvm_sust_b_1d_array_i64_trap",
+ "llvm.nvvm.sust.b.1d.array.i64.zero" => "__nvvm_sust_b_1d_array_i64_zero",
+ "llvm.nvvm.sust.b.1d.array.i8.clamp" => "__nvvm_sust_b_1d_array_i8_clamp",
+ "llvm.nvvm.sust.b.1d.array.i8.trap" => "__nvvm_sust_b_1d_array_i8_trap",
+ "llvm.nvvm.sust.b.1d.array.i8.zero" => "__nvvm_sust_b_1d_array_i8_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i16.clamp" => "__nvvm_sust_b_1d_array_v2i16_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i16.trap" => "__nvvm_sust_b_1d_array_v2i16_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i16.zero" => "__nvvm_sust_b_1d_array_v2i16_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i32.clamp" => "__nvvm_sust_b_1d_array_v2i32_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i32.trap" => "__nvvm_sust_b_1d_array_v2i32_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i32.zero" => "__nvvm_sust_b_1d_array_v2i32_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i64.clamp" => "__nvvm_sust_b_1d_array_v2i64_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i64.trap" => "__nvvm_sust_b_1d_array_v2i64_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i64.zero" => "__nvvm_sust_b_1d_array_v2i64_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i8.clamp" => "__nvvm_sust_b_1d_array_v2i8_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i8.trap" => "__nvvm_sust_b_1d_array_v2i8_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i8.zero" => "__nvvm_sust_b_1d_array_v2i8_zero",
+ "llvm.nvvm.sust.b.1d.array.v4i16.clamp" => "__nvvm_sust_b_1d_array_v4i16_clamp",
+ "llvm.nvvm.sust.b.1d.array.v4i16.trap" => "__nvvm_sust_b_1d_array_v4i16_trap",
+ "llvm.nvvm.sust.b.1d.array.v4i16.zero" => "__nvvm_sust_b_1d_array_v4i16_zero",
+ "llvm.nvvm.sust.b.1d.array.v4i32.clamp" => "__nvvm_sust_b_1d_array_v4i32_clamp",
+ "llvm.nvvm.sust.b.1d.array.v4i32.trap" => "__nvvm_sust_b_1d_array_v4i32_trap",
+ "llvm.nvvm.sust.b.1d.array.v4i32.zero" => "__nvvm_sust_b_1d_array_v4i32_zero",
+ "llvm.nvvm.sust.b.1d.array.v4i8.clamp" => "__nvvm_sust_b_1d_array_v4i8_clamp",
+ "llvm.nvvm.sust.b.1d.array.v4i8.trap" => "__nvvm_sust_b_1d_array_v4i8_trap",
+ "llvm.nvvm.sust.b.1d.array.v4i8.zero" => "__nvvm_sust_b_1d_array_v4i8_zero",
+ "llvm.nvvm.sust.b.1d.i16.clamp" => "__nvvm_sust_b_1d_i16_clamp",
+ "llvm.nvvm.sust.b.1d.i16.trap" => "__nvvm_sust_b_1d_i16_trap",
+ "llvm.nvvm.sust.b.1d.i16.zero" => "__nvvm_sust_b_1d_i16_zero",
+ "llvm.nvvm.sust.b.1d.i32.clamp" => "__nvvm_sust_b_1d_i32_clamp",
+ "llvm.nvvm.sust.b.1d.i32.trap" => "__nvvm_sust_b_1d_i32_trap",
+ "llvm.nvvm.sust.b.1d.i32.zero" => "__nvvm_sust_b_1d_i32_zero",
+ "llvm.nvvm.sust.b.1d.i64.clamp" => "__nvvm_sust_b_1d_i64_clamp",
+ "llvm.nvvm.sust.b.1d.i64.trap" => "__nvvm_sust_b_1d_i64_trap",
+ "llvm.nvvm.sust.b.1d.i64.zero" => "__nvvm_sust_b_1d_i64_zero",
+ "llvm.nvvm.sust.b.1d.i8.clamp" => "__nvvm_sust_b_1d_i8_clamp",
+ "llvm.nvvm.sust.b.1d.i8.trap" => "__nvvm_sust_b_1d_i8_trap",
+ "llvm.nvvm.sust.b.1d.i8.zero" => "__nvvm_sust_b_1d_i8_zero",
+ "llvm.nvvm.sust.b.1d.v2i16.clamp" => "__nvvm_sust_b_1d_v2i16_clamp",
+ "llvm.nvvm.sust.b.1d.v2i16.trap" => "__nvvm_sust_b_1d_v2i16_trap",
+ "llvm.nvvm.sust.b.1d.v2i16.zero" => "__nvvm_sust_b_1d_v2i16_zero",
+ "llvm.nvvm.sust.b.1d.v2i32.clamp" => "__nvvm_sust_b_1d_v2i32_clamp",
+ "llvm.nvvm.sust.b.1d.v2i32.trap" => "__nvvm_sust_b_1d_v2i32_trap",
+ "llvm.nvvm.sust.b.1d.v2i32.zero" => "__nvvm_sust_b_1d_v2i32_zero",
+ "llvm.nvvm.sust.b.1d.v2i64.clamp" => "__nvvm_sust_b_1d_v2i64_clamp",
+ "llvm.nvvm.sust.b.1d.v2i64.trap" => "__nvvm_sust_b_1d_v2i64_trap",
+ "llvm.nvvm.sust.b.1d.v2i64.zero" => "__nvvm_sust_b_1d_v2i64_zero",
+ "llvm.nvvm.sust.b.1d.v2i8.clamp" => "__nvvm_sust_b_1d_v2i8_clamp",
+ "llvm.nvvm.sust.b.1d.v2i8.trap" => "__nvvm_sust_b_1d_v2i8_trap",
+ "llvm.nvvm.sust.b.1d.v2i8.zero" => "__nvvm_sust_b_1d_v2i8_zero",
+ "llvm.nvvm.sust.b.1d.v4i16.clamp" => "__nvvm_sust_b_1d_v4i16_clamp",
+ "llvm.nvvm.sust.b.1d.v4i16.trap" => "__nvvm_sust_b_1d_v4i16_trap",
+ "llvm.nvvm.sust.b.1d.v4i16.zero" => "__nvvm_sust_b_1d_v4i16_zero",
+ "llvm.nvvm.sust.b.1d.v4i32.clamp" => "__nvvm_sust_b_1d_v4i32_clamp",
+ "llvm.nvvm.sust.b.1d.v4i32.trap" => "__nvvm_sust_b_1d_v4i32_trap",
+ "llvm.nvvm.sust.b.1d.v4i32.zero" => "__nvvm_sust_b_1d_v4i32_zero",
+ "llvm.nvvm.sust.b.1d.v4i8.clamp" => "__nvvm_sust_b_1d_v4i8_clamp",
+ "llvm.nvvm.sust.b.1d.v4i8.trap" => "__nvvm_sust_b_1d_v4i8_trap",
+ "llvm.nvvm.sust.b.1d.v4i8.zero" => "__nvvm_sust_b_1d_v4i8_zero",
+ "llvm.nvvm.sust.b.2d.array.i16.clamp" => "__nvvm_sust_b_2d_array_i16_clamp",
+ "llvm.nvvm.sust.b.2d.array.i16.trap" => "__nvvm_sust_b_2d_array_i16_trap",
+ "llvm.nvvm.sust.b.2d.array.i16.zero" => "__nvvm_sust_b_2d_array_i16_zero",
+ "llvm.nvvm.sust.b.2d.array.i32.clamp" => "__nvvm_sust_b_2d_array_i32_clamp",
+ "llvm.nvvm.sust.b.2d.array.i32.trap" => "__nvvm_sust_b_2d_array_i32_trap",
+ "llvm.nvvm.sust.b.2d.array.i32.zero" => "__nvvm_sust_b_2d_array_i32_zero",
+ "llvm.nvvm.sust.b.2d.array.i64.clamp" => "__nvvm_sust_b_2d_array_i64_clamp",
+ "llvm.nvvm.sust.b.2d.array.i64.trap" => "__nvvm_sust_b_2d_array_i64_trap",
+ "llvm.nvvm.sust.b.2d.array.i64.zero" => "__nvvm_sust_b_2d_array_i64_zero",
+ "llvm.nvvm.sust.b.2d.array.i8.clamp" => "__nvvm_sust_b_2d_array_i8_clamp",
+ "llvm.nvvm.sust.b.2d.array.i8.trap" => "__nvvm_sust_b_2d_array_i8_trap",
+ "llvm.nvvm.sust.b.2d.array.i8.zero" => "__nvvm_sust_b_2d_array_i8_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i16.clamp" => "__nvvm_sust_b_2d_array_v2i16_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i16.trap" => "__nvvm_sust_b_2d_array_v2i16_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i16.zero" => "__nvvm_sust_b_2d_array_v2i16_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i32.clamp" => "__nvvm_sust_b_2d_array_v2i32_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i32.trap" => "__nvvm_sust_b_2d_array_v2i32_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i32.zero" => "__nvvm_sust_b_2d_array_v2i32_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i64.clamp" => "__nvvm_sust_b_2d_array_v2i64_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i64.trap" => "__nvvm_sust_b_2d_array_v2i64_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i64.zero" => "__nvvm_sust_b_2d_array_v2i64_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i8.clamp" => "__nvvm_sust_b_2d_array_v2i8_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i8.trap" => "__nvvm_sust_b_2d_array_v2i8_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i8.zero" => "__nvvm_sust_b_2d_array_v2i8_zero",
+ "llvm.nvvm.sust.b.2d.array.v4i16.clamp" => "__nvvm_sust_b_2d_array_v4i16_clamp",
+ "llvm.nvvm.sust.b.2d.array.v4i16.trap" => "__nvvm_sust_b_2d_array_v4i16_trap",
+ "llvm.nvvm.sust.b.2d.array.v4i16.zero" => "__nvvm_sust_b_2d_array_v4i16_zero",
+ "llvm.nvvm.sust.b.2d.array.v4i32.clamp" => "__nvvm_sust_b_2d_array_v4i32_clamp",
+ "llvm.nvvm.sust.b.2d.array.v4i32.trap" => "__nvvm_sust_b_2d_array_v4i32_trap",
+ "llvm.nvvm.sust.b.2d.array.v4i32.zero" => "__nvvm_sust_b_2d_array_v4i32_zero",
+ "llvm.nvvm.sust.b.2d.array.v4i8.clamp" => "__nvvm_sust_b_2d_array_v4i8_clamp",
+ "llvm.nvvm.sust.b.2d.array.v4i8.trap" => "__nvvm_sust_b_2d_array_v4i8_trap",
+ "llvm.nvvm.sust.b.2d.array.v4i8.zero" => "__nvvm_sust_b_2d_array_v4i8_zero",
+ "llvm.nvvm.sust.b.2d.i16.clamp" => "__nvvm_sust_b_2d_i16_clamp",
+ "llvm.nvvm.sust.b.2d.i16.trap" => "__nvvm_sust_b_2d_i16_trap",
+ "llvm.nvvm.sust.b.2d.i16.zero" => "__nvvm_sust_b_2d_i16_zero",
+ "llvm.nvvm.sust.b.2d.i32.clamp" => "__nvvm_sust_b_2d_i32_clamp",
+ "llvm.nvvm.sust.b.2d.i32.trap" => "__nvvm_sust_b_2d_i32_trap",
+ "llvm.nvvm.sust.b.2d.i32.zero" => "__nvvm_sust_b_2d_i32_zero",
+ "llvm.nvvm.sust.b.2d.i64.clamp" => "__nvvm_sust_b_2d_i64_clamp",
+ "llvm.nvvm.sust.b.2d.i64.trap" => "__nvvm_sust_b_2d_i64_trap",
+ "llvm.nvvm.sust.b.2d.i64.zero" => "__nvvm_sust_b_2d_i64_zero",
+ "llvm.nvvm.sust.b.2d.i8.clamp" => "__nvvm_sust_b_2d_i8_clamp",
+ "llvm.nvvm.sust.b.2d.i8.trap" => "__nvvm_sust_b_2d_i8_trap",
+ "llvm.nvvm.sust.b.2d.i8.zero" => "__nvvm_sust_b_2d_i8_zero",
+ "llvm.nvvm.sust.b.2d.v2i16.clamp" => "__nvvm_sust_b_2d_v2i16_clamp",
+ "llvm.nvvm.sust.b.2d.v2i16.trap" => "__nvvm_sust_b_2d_v2i16_trap",
+ "llvm.nvvm.sust.b.2d.v2i16.zero" => "__nvvm_sust_b_2d_v2i16_zero",
+ "llvm.nvvm.sust.b.2d.v2i32.clamp" => "__nvvm_sust_b_2d_v2i32_clamp",
+ "llvm.nvvm.sust.b.2d.v2i32.trap" => "__nvvm_sust_b_2d_v2i32_trap",
+ "llvm.nvvm.sust.b.2d.v2i32.zero" => "__nvvm_sust_b_2d_v2i32_zero",
+ "llvm.nvvm.sust.b.2d.v2i64.clamp" => "__nvvm_sust_b_2d_v2i64_clamp",
+ "llvm.nvvm.sust.b.2d.v2i64.trap" => "__nvvm_sust_b_2d_v2i64_trap",
+ "llvm.nvvm.sust.b.2d.v2i64.zero" => "__nvvm_sust_b_2d_v2i64_zero",
+ "llvm.nvvm.sust.b.2d.v2i8.clamp" => "__nvvm_sust_b_2d_v2i8_clamp",
+ "llvm.nvvm.sust.b.2d.v2i8.trap" => "__nvvm_sust_b_2d_v2i8_trap",
+ "llvm.nvvm.sust.b.2d.v2i8.zero" => "__nvvm_sust_b_2d_v2i8_zero",
+ "llvm.nvvm.sust.b.2d.v4i16.clamp" => "__nvvm_sust_b_2d_v4i16_clamp",
+ "llvm.nvvm.sust.b.2d.v4i16.trap" => "__nvvm_sust_b_2d_v4i16_trap",
+ "llvm.nvvm.sust.b.2d.v4i16.zero" => "__nvvm_sust_b_2d_v4i16_zero",
+ "llvm.nvvm.sust.b.2d.v4i32.clamp" => "__nvvm_sust_b_2d_v4i32_clamp",
+ "llvm.nvvm.sust.b.2d.v4i32.trap" => "__nvvm_sust_b_2d_v4i32_trap",
+ "llvm.nvvm.sust.b.2d.v4i32.zero" => "__nvvm_sust_b_2d_v4i32_zero",
+ "llvm.nvvm.sust.b.2d.v4i8.clamp" => "__nvvm_sust_b_2d_v4i8_clamp",
+ "llvm.nvvm.sust.b.2d.v4i8.trap" => "__nvvm_sust_b_2d_v4i8_trap",
+ "llvm.nvvm.sust.b.2d.v4i8.zero" => "__nvvm_sust_b_2d_v4i8_zero",
+ "llvm.nvvm.sust.b.3d.i16.clamp" => "__nvvm_sust_b_3d_i16_clamp",
+ "llvm.nvvm.sust.b.3d.i16.trap" => "__nvvm_sust_b_3d_i16_trap",
+ "llvm.nvvm.sust.b.3d.i16.zero" => "__nvvm_sust_b_3d_i16_zero",
+ "llvm.nvvm.sust.b.3d.i32.clamp" => "__nvvm_sust_b_3d_i32_clamp",
+ "llvm.nvvm.sust.b.3d.i32.trap" => "__nvvm_sust_b_3d_i32_trap",
+ "llvm.nvvm.sust.b.3d.i32.zero" => "__nvvm_sust_b_3d_i32_zero",
+ "llvm.nvvm.sust.b.3d.i64.clamp" => "__nvvm_sust_b_3d_i64_clamp",
+ "llvm.nvvm.sust.b.3d.i64.trap" => "__nvvm_sust_b_3d_i64_trap",
+ "llvm.nvvm.sust.b.3d.i64.zero" => "__nvvm_sust_b_3d_i64_zero",
+ "llvm.nvvm.sust.b.3d.i8.clamp" => "__nvvm_sust_b_3d_i8_clamp",
+ "llvm.nvvm.sust.b.3d.i8.trap" => "__nvvm_sust_b_3d_i8_trap",
+ "llvm.nvvm.sust.b.3d.i8.zero" => "__nvvm_sust_b_3d_i8_zero",
+ "llvm.nvvm.sust.b.3d.v2i16.clamp" => "__nvvm_sust_b_3d_v2i16_clamp",
+ "llvm.nvvm.sust.b.3d.v2i16.trap" => "__nvvm_sust_b_3d_v2i16_trap",
+ "llvm.nvvm.sust.b.3d.v2i16.zero" => "__nvvm_sust_b_3d_v2i16_zero",
+ "llvm.nvvm.sust.b.3d.v2i32.clamp" => "__nvvm_sust_b_3d_v2i32_clamp",
+ "llvm.nvvm.sust.b.3d.v2i32.trap" => "__nvvm_sust_b_3d_v2i32_trap",
+ "llvm.nvvm.sust.b.3d.v2i32.zero" => "__nvvm_sust_b_3d_v2i32_zero",
+ "llvm.nvvm.sust.b.3d.v2i64.clamp" => "__nvvm_sust_b_3d_v2i64_clamp",
+ "llvm.nvvm.sust.b.3d.v2i64.trap" => "__nvvm_sust_b_3d_v2i64_trap",
+ "llvm.nvvm.sust.b.3d.v2i64.zero" => "__nvvm_sust_b_3d_v2i64_zero",
+ "llvm.nvvm.sust.b.3d.v2i8.clamp" => "__nvvm_sust_b_3d_v2i8_clamp",
+ "llvm.nvvm.sust.b.3d.v2i8.trap" => "__nvvm_sust_b_3d_v2i8_trap",
+ "llvm.nvvm.sust.b.3d.v2i8.zero" => "__nvvm_sust_b_3d_v2i8_zero",
+ "llvm.nvvm.sust.b.3d.v4i16.clamp" => "__nvvm_sust_b_3d_v4i16_clamp",
+ "llvm.nvvm.sust.b.3d.v4i16.trap" => "__nvvm_sust_b_3d_v4i16_trap",
+ "llvm.nvvm.sust.b.3d.v4i16.zero" => "__nvvm_sust_b_3d_v4i16_zero",
+ "llvm.nvvm.sust.b.3d.v4i32.clamp" => "__nvvm_sust_b_3d_v4i32_clamp",
+ "llvm.nvvm.sust.b.3d.v4i32.trap" => "__nvvm_sust_b_3d_v4i32_trap",
+ "llvm.nvvm.sust.b.3d.v4i32.zero" => "__nvvm_sust_b_3d_v4i32_zero",
+ "llvm.nvvm.sust.b.3d.v4i8.clamp" => "__nvvm_sust_b_3d_v4i8_clamp",
+ "llvm.nvvm.sust.b.3d.v4i8.trap" => "__nvvm_sust_b_3d_v4i8_trap",
+ "llvm.nvvm.sust.b.3d.v4i8.zero" => "__nvvm_sust_b_3d_v4i8_zero",
+ "llvm.nvvm.sust.p.1d.array.i16.trap" => "__nvvm_sust_p_1d_array_i16_trap",
+ "llvm.nvvm.sust.p.1d.array.i32.trap" => "__nvvm_sust_p_1d_array_i32_trap",
+ "llvm.nvvm.sust.p.1d.array.i8.trap" => "__nvvm_sust_p_1d_array_i8_trap",
+ "llvm.nvvm.sust.p.1d.array.v2i16.trap" => "__nvvm_sust_p_1d_array_v2i16_trap",
+ "llvm.nvvm.sust.p.1d.array.v2i32.trap" => "__nvvm_sust_p_1d_array_v2i32_trap",
+ "llvm.nvvm.sust.p.1d.array.v2i8.trap" => "__nvvm_sust_p_1d_array_v2i8_trap",
+ "llvm.nvvm.sust.p.1d.array.v4i16.trap" => "__nvvm_sust_p_1d_array_v4i16_trap",
+ "llvm.nvvm.sust.p.1d.array.v4i32.trap" => "__nvvm_sust_p_1d_array_v4i32_trap",
+ "llvm.nvvm.sust.p.1d.array.v4i8.trap" => "__nvvm_sust_p_1d_array_v4i8_trap",
+ "llvm.nvvm.sust.p.1d.i16.trap" => "__nvvm_sust_p_1d_i16_trap",
+ "llvm.nvvm.sust.p.1d.i32.trap" => "__nvvm_sust_p_1d_i32_trap",
+ "llvm.nvvm.sust.p.1d.i8.trap" => "__nvvm_sust_p_1d_i8_trap",
+ "llvm.nvvm.sust.p.1d.v2i16.trap" => "__nvvm_sust_p_1d_v2i16_trap",
+ "llvm.nvvm.sust.p.1d.v2i32.trap" => "__nvvm_sust_p_1d_v2i32_trap",
+ "llvm.nvvm.sust.p.1d.v2i8.trap" => "__nvvm_sust_p_1d_v2i8_trap",
+ "llvm.nvvm.sust.p.1d.v4i16.trap" => "__nvvm_sust_p_1d_v4i16_trap",
+ "llvm.nvvm.sust.p.1d.v4i32.trap" => "__nvvm_sust_p_1d_v4i32_trap",
+ "llvm.nvvm.sust.p.1d.v4i8.trap" => "__nvvm_sust_p_1d_v4i8_trap",
+ "llvm.nvvm.sust.p.2d.array.i16.trap" => "__nvvm_sust_p_2d_array_i16_trap",
+ "llvm.nvvm.sust.p.2d.array.i32.trap" => "__nvvm_sust_p_2d_array_i32_trap",
+ "llvm.nvvm.sust.p.2d.array.i8.trap" => "__nvvm_sust_p_2d_array_i8_trap",
+ "llvm.nvvm.sust.p.2d.array.v2i16.trap" => "__nvvm_sust_p_2d_array_v2i16_trap",
+ "llvm.nvvm.sust.p.2d.array.v2i32.trap" => "__nvvm_sust_p_2d_array_v2i32_trap",
+ "llvm.nvvm.sust.p.2d.array.v2i8.trap" => "__nvvm_sust_p_2d_array_v2i8_trap",
+ "llvm.nvvm.sust.p.2d.array.v4i16.trap" => "__nvvm_sust_p_2d_array_v4i16_trap",
+ "llvm.nvvm.sust.p.2d.array.v4i32.trap" => "__nvvm_sust_p_2d_array_v4i32_trap",
+ "llvm.nvvm.sust.p.2d.array.v4i8.trap" => "__nvvm_sust_p_2d_array_v4i8_trap",
+ "llvm.nvvm.sust.p.2d.i16.trap" => "__nvvm_sust_p_2d_i16_trap",
+ "llvm.nvvm.sust.p.2d.i32.trap" => "__nvvm_sust_p_2d_i32_trap",
+ "llvm.nvvm.sust.p.2d.i8.trap" => "__nvvm_sust_p_2d_i8_trap",
+ "llvm.nvvm.sust.p.2d.v2i16.trap" => "__nvvm_sust_p_2d_v2i16_trap",
+ "llvm.nvvm.sust.p.2d.v2i32.trap" => "__nvvm_sust_p_2d_v2i32_trap",
+ "llvm.nvvm.sust.p.2d.v2i8.trap" => "__nvvm_sust_p_2d_v2i8_trap",
+ "llvm.nvvm.sust.p.2d.v4i16.trap" => "__nvvm_sust_p_2d_v4i16_trap",
+ "llvm.nvvm.sust.p.2d.v4i32.trap" => "__nvvm_sust_p_2d_v4i32_trap",
+ "llvm.nvvm.sust.p.2d.v4i8.trap" => "__nvvm_sust_p_2d_v4i8_trap",
+ "llvm.nvvm.sust.p.3d.i16.trap" => "__nvvm_sust_p_3d_i16_trap",
+ "llvm.nvvm.sust.p.3d.i32.trap" => "__nvvm_sust_p_3d_i32_trap",
+ "llvm.nvvm.sust.p.3d.i8.trap" => "__nvvm_sust_p_3d_i8_trap",
+ "llvm.nvvm.sust.p.3d.v2i16.trap" => "__nvvm_sust_p_3d_v2i16_trap",
+ "llvm.nvvm.sust.p.3d.v2i32.trap" => "__nvvm_sust_p_3d_v2i32_trap",
+ "llvm.nvvm.sust.p.3d.v2i8.trap" => "__nvvm_sust_p_3d_v2i8_trap",
+ "llvm.nvvm.sust.p.3d.v4i16.trap" => "__nvvm_sust_p_3d_v4i16_trap",
+ "llvm.nvvm.sust.p.3d.v4i32.trap" => "__nvvm_sust_p_3d_v4i32_trap",
+ "llvm.nvvm.sust.p.3d.v4i8.trap" => "__nvvm_sust_p_3d_v4i8_trap",
+ "llvm.nvvm.swap.lo.hi.b64" => "__nvvm_swap_lo_hi_b64",
+ "llvm.nvvm.trunc.d" => "__nvvm_trunc_d",
+ "llvm.nvvm.trunc.f" => "__nvvm_trunc_f",
+ "llvm.nvvm.trunc.ftz.f" => "__nvvm_trunc_ftz_f",
+ "llvm.nvvm.txq.array.size" => "__nvvm_txq_array_size",
+ "llvm.nvvm.txq.channel.data.type" => "__nvvm_txq_channel_data_type",
+ "llvm.nvvm.txq.channel.order" => "__nvvm_txq_channel_order",
+ "llvm.nvvm.txq.depth" => "__nvvm_txq_depth",
+ "llvm.nvvm.txq.height" => "__nvvm_txq_height",
+ "llvm.nvvm.txq.num.mipmap.levels" => "__nvvm_txq_num_mipmap_levels",
+ "llvm.nvvm.txq.num.samples" => "__nvvm_txq_num_samples",
+ "llvm.nvvm.txq.width" => "__nvvm_txq_width",
+ "llvm.nvvm.ui2d.rm" => "__nvvm_ui2d_rm",
+ "llvm.nvvm.ui2d.rn" => "__nvvm_ui2d_rn",
+ "llvm.nvvm.ui2d.rp" => "__nvvm_ui2d_rp",
+ "llvm.nvvm.ui2d.rz" => "__nvvm_ui2d_rz",
+ "llvm.nvvm.ui2f.rm" => "__nvvm_ui2f_rm",
+ "llvm.nvvm.ui2f.rn" => "__nvvm_ui2f_rn",
+ "llvm.nvvm.ui2f.rp" => "__nvvm_ui2f_rp",
+ "llvm.nvvm.ui2f.rz" => "__nvvm_ui2f_rz",
+ "llvm.nvvm.ull2d.rm" => "__nvvm_ull2d_rm",
+ "llvm.nvvm.ull2d.rn" => "__nvvm_ull2d_rn",
+ "llvm.nvvm.ull2d.rp" => "__nvvm_ull2d_rp",
+ "llvm.nvvm.ull2d.rz" => "__nvvm_ull2d_rz",
+ "llvm.nvvm.ull2f.rm" => "__nvvm_ull2f_rm",
+ "llvm.nvvm.ull2f.rn" => "__nvvm_ull2f_rn",
+ "llvm.nvvm.ull2f.rp" => "__nvvm_ull2f_rp",
+ "llvm.nvvm.ull2f.rz" => "__nvvm_ull2f_rz",
+ // ppc
+ "llvm.ppc.addex" => "__builtin_ppc_addex",
+ "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd",
+ "llvm.ppc.altivec.crypto.vcipher" => "__builtin_altivec_crypto_vcipher",
+ "llvm.ppc.altivec.crypto.vcipherlast" => "__builtin_altivec_crypto_vcipherlast",
+ "llvm.ppc.altivec.crypto.vncipher" => "__builtin_altivec_crypto_vncipher",
+ "llvm.ppc.altivec.crypto.vncipherlast" => "__builtin_altivec_crypto_vncipherlast",
+ "llvm.ppc.altivec.crypto.vpermxor" => "__builtin_altivec_crypto_vpermxor",
+ "llvm.ppc.altivec.crypto.vpermxor.be" => "__builtin_altivec_crypto_vpermxor_be",
+ "llvm.ppc.altivec.crypto.vpmsumb" => "__builtin_altivec_crypto_vpmsumb",
+ "llvm.ppc.altivec.crypto.vpmsumd" => "__builtin_altivec_crypto_vpmsumd",
+ "llvm.ppc.altivec.crypto.vpmsumh" => "__builtin_altivec_crypto_vpmsumh",
+ "llvm.ppc.altivec.crypto.vpmsumw" => "__builtin_altivec_crypto_vpmsumw",
+ "llvm.ppc.altivec.crypto.vsbox" => "__builtin_altivec_crypto_vsbox",
+ "llvm.ppc.altivec.crypto.vshasigmad" => "__builtin_altivec_crypto_vshasigmad",
+ "llvm.ppc.altivec.crypto.vshasigmaw" => "__builtin_altivec_crypto_vshasigmaw",
+ "llvm.ppc.altivec.dss" => "__builtin_altivec_dss",
+ "llvm.ppc.altivec.dssall" => "__builtin_altivec_dssall",
+ "llvm.ppc.altivec.dst" => "__builtin_altivec_dst",
+ "llvm.ppc.altivec.dstst" => "__builtin_altivec_dstst",
+ "llvm.ppc.altivec.dststt" => "__builtin_altivec_dststt",
+ "llvm.ppc.altivec.dstt" => "__builtin_altivec_dstt",
+ "llvm.ppc.altivec.mfvscr" => "__builtin_altivec_mfvscr",
+ "llvm.ppc.altivec.mtvscr" => "__builtin_altivec_mtvscr",
+ "llvm.ppc.altivec.mtvsrbm" => "__builtin_altivec_mtvsrbm",
+ "llvm.ppc.altivec.mtvsrdm" => "__builtin_altivec_mtvsrdm",
+ "llvm.ppc.altivec.mtvsrhm" => "__builtin_altivec_mtvsrhm",
+ "llvm.ppc.altivec.mtvsrqm" => "__builtin_altivec_mtvsrqm",
+ "llvm.ppc.altivec.mtvsrwm" => "__builtin_altivec_mtvsrwm",
+ "llvm.ppc.altivec.vaddcuw" => "__builtin_altivec_vaddcuw",
+ "llvm.ppc.altivec.vaddecuq" => "__builtin_altivec_vaddecuq",
+ "llvm.ppc.altivec.vaddeuqm" => "__builtin_altivec_vaddeuqm",
+ "llvm.ppc.altivec.vaddsbs" => "__builtin_altivec_vaddsbs",
+ "llvm.ppc.altivec.vaddshs" => "__builtin_altivec_vaddshs",
+ "llvm.ppc.altivec.vaddsws" => "__builtin_altivec_vaddsws",
+ "llvm.ppc.altivec.vaddubs" => "__builtin_altivec_vaddubs",
+ "llvm.ppc.altivec.vadduhs" => "__builtin_altivec_vadduhs",
+ "llvm.ppc.altivec.vadduws" => "__builtin_altivec_vadduws",
+ "llvm.ppc.altivec.vavgsb" => "__builtin_altivec_vavgsb",
+ "llvm.ppc.altivec.vavgsh" => "__builtin_altivec_vavgsh",
+ "llvm.ppc.altivec.vavgsw" => "__builtin_altivec_vavgsw",
+ "llvm.ppc.altivec.vavgub" => "__builtin_altivec_vavgub",
+ "llvm.ppc.altivec.vavguh" => "__builtin_altivec_vavguh",
+ "llvm.ppc.altivec.vavguw" => "__builtin_altivec_vavguw",
+ "llvm.ppc.altivec.vbpermd" => "__builtin_altivec_vbpermd",
+ "llvm.ppc.altivec.vbpermq" => "__builtin_altivec_vbpermq",
+ "llvm.ppc.altivec.vcfsx" => "__builtin_altivec_vcfsx",
+ "llvm.ppc.altivec.vcfuged" => "__builtin_altivec_vcfuged",
+ "llvm.ppc.altivec.vcfux" => "__builtin_altivec_vcfux",
+ "llvm.ppc.altivec.vclrlb" => "__builtin_altivec_vclrlb",
+ "llvm.ppc.altivec.vclrrb" => "__builtin_altivec_vclrrb",
+ "llvm.ppc.altivec.vclzdm" => "__builtin_altivec_vclzdm",
+ "llvm.ppc.altivec.vclzlsbb" => "__builtin_altivec_vclzlsbb",
+ "llvm.ppc.altivec.vcmpbfp" => "__builtin_altivec_vcmpbfp",
+ "llvm.ppc.altivec.vcmpbfp.p" => "__builtin_altivec_vcmpbfp_p",
+ "llvm.ppc.altivec.vcmpeqfp" => "__builtin_altivec_vcmpeqfp",
+ "llvm.ppc.altivec.vcmpeqfp.p" => "__builtin_altivec_vcmpeqfp_p",
+ "llvm.ppc.altivec.vcmpequb" => "__builtin_altivec_vcmpequb",
+ "llvm.ppc.altivec.vcmpequb.p" => "__builtin_altivec_vcmpequb_p",
+ "llvm.ppc.altivec.vcmpequd" => "__builtin_altivec_vcmpequd",
+ "llvm.ppc.altivec.vcmpequd.p" => "__builtin_altivec_vcmpequd_p",
+ "llvm.ppc.altivec.vcmpequh" => "__builtin_altivec_vcmpequh",
+ "llvm.ppc.altivec.vcmpequh.p" => "__builtin_altivec_vcmpequh_p",
+ "llvm.ppc.altivec.vcmpequq" => "__builtin_altivec_vcmpequq",
+ "llvm.ppc.altivec.vcmpequq.p" => "__builtin_altivec_vcmpequq_p",
+ "llvm.ppc.altivec.vcmpequw" => "__builtin_altivec_vcmpequw",
+ "llvm.ppc.altivec.vcmpequw.p" => "__builtin_altivec_vcmpequw_p",
+ "llvm.ppc.altivec.vcmpgefp" => "__builtin_altivec_vcmpgefp",
+ "llvm.ppc.altivec.vcmpgefp.p" => "__builtin_altivec_vcmpgefp_p",
+ "llvm.ppc.altivec.vcmpgtfp" => "__builtin_altivec_vcmpgtfp",
+ "llvm.ppc.altivec.vcmpgtfp.p" => "__builtin_altivec_vcmpgtfp_p",
+ "llvm.ppc.altivec.vcmpgtsb" => "__builtin_altivec_vcmpgtsb",
+ "llvm.ppc.altivec.vcmpgtsb.p" => "__builtin_altivec_vcmpgtsb_p",
+ "llvm.ppc.altivec.vcmpgtsd" => "__builtin_altivec_vcmpgtsd",
+ "llvm.ppc.altivec.vcmpgtsd.p" => "__builtin_altivec_vcmpgtsd_p",
+ "llvm.ppc.altivec.vcmpgtsh" => "__builtin_altivec_vcmpgtsh",
+ "llvm.ppc.altivec.vcmpgtsh.p" => "__builtin_altivec_vcmpgtsh_p",
+ "llvm.ppc.altivec.vcmpgtsq" => "__builtin_altivec_vcmpgtsq",
+ "llvm.ppc.altivec.vcmpgtsq.p" => "__builtin_altivec_vcmpgtsq_p",
+ "llvm.ppc.altivec.vcmpgtsw" => "__builtin_altivec_vcmpgtsw",
+ "llvm.ppc.altivec.vcmpgtsw.p" => "__builtin_altivec_vcmpgtsw_p",
+ "llvm.ppc.altivec.vcmpgtub" => "__builtin_altivec_vcmpgtub",
+ "llvm.ppc.altivec.vcmpgtub.p" => "__builtin_altivec_vcmpgtub_p",
+ "llvm.ppc.altivec.vcmpgtud" => "__builtin_altivec_vcmpgtud",
+ "llvm.ppc.altivec.vcmpgtud.p" => "__builtin_altivec_vcmpgtud_p",
+ "llvm.ppc.altivec.vcmpgtuh" => "__builtin_altivec_vcmpgtuh",
+ "llvm.ppc.altivec.vcmpgtuh.p" => "__builtin_altivec_vcmpgtuh_p",
+ "llvm.ppc.altivec.vcmpgtuq" => "__builtin_altivec_vcmpgtuq",
+ "llvm.ppc.altivec.vcmpgtuq.p" => "__builtin_altivec_vcmpgtuq_p",
+ "llvm.ppc.altivec.vcmpgtuw" => "__builtin_altivec_vcmpgtuw",
+ "llvm.ppc.altivec.vcmpgtuw.p" => "__builtin_altivec_vcmpgtuw_p",
+ "llvm.ppc.altivec.vcmpneb" => "__builtin_altivec_vcmpneb",
+ "llvm.ppc.altivec.vcmpneb.p" => "__builtin_altivec_vcmpneb_p",
+ "llvm.ppc.altivec.vcmpneh" => "__builtin_altivec_vcmpneh",
+ "llvm.ppc.altivec.vcmpneh.p" => "__builtin_altivec_vcmpneh_p",
+ "llvm.ppc.altivec.vcmpnew" => "__builtin_altivec_vcmpnew",
+ "llvm.ppc.altivec.vcmpnew.p" => "__builtin_altivec_vcmpnew_p",
+ "llvm.ppc.altivec.vcmpnezb" => "__builtin_altivec_vcmpnezb",
+ "llvm.ppc.altivec.vcmpnezb.p" => "__builtin_altivec_vcmpnezb_p",
+ "llvm.ppc.altivec.vcmpnezh" => "__builtin_altivec_vcmpnezh",
+ "llvm.ppc.altivec.vcmpnezh.p" => "__builtin_altivec_vcmpnezh_p",
+ "llvm.ppc.altivec.vcmpnezw" => "__builtin_altivec_vcmpnezw",
+ "llvm.ppc.altivec.vcmpnezw.p" => "__builtin_altivec_vcmpnezw_p",
+ "llvm.ppc.altivec.vcntmbb" => "__builtin_altivec_vcntmbb",
+ "llvm.ppc.altivec.vcntmbd" => "__builtin_altivec_vcntmbd",
+ "llvm.ppc.altivec.vcntmbh" => "__builtin_altivec_vcntmbh",
+ "llvm.ppc.altivec.vcntmbw" => "__builtin_altivec_vcntmbw",
+ "llvm.ppc.altivec.vctsxs" => "__builtin_altivec_vctsxs",
+ "llvm.ppc.altivec.vctuxs" => "__builtin_altivec_vctuxs",
+ "llvm.ppc.altivec.vctzdm" => "__builtin_altivec_vctzdm",
+ "llvm.ppc.altivec.vctzlsbb" => "__builtin_altivec_vctzlsbb",
+ "llvm.ppc.altivec.vexpandbm" => "__builtin_altivec_vexpandbm",
+ "llvm.ppc.altivec.vexpanddm" => "__builtin_altivec_vexpanddm",
+ "llvm.ppc.altivec.vexpandhm" => "__builtin_altivec_vexpandhm",
+ "llvm.ppc.altivec.vexpandqm" => "__builtin_altivec_vexpandqm",
+ "llvm.ppc.altivec.vexpandwm" => "__builtin_altivec_vexpandwm",
+ "llvm.ppc.altivec.vexptefp" => "__builtin_altivec_vexptefp",
+ "llvm.ppc.altivec.vextddvlx" => "__builtin_altivec_vextddvlx",
+ "llvm.ppc.altivec.vextddvrx" => "__builtin_altivec_vextddvrx",
+ "llvm.ppc.altivec.vextdubvlx" => "__builtin_altivec_vextdubvlx",
+ "llvm.ppc.altivec.vextdubvrx" => "__builtin_altivec_vextdubvrx",
+ "llvm.ppc.altivec.vextduhvlx" => "__builtin_altivec_vextduhvlx",
+ "llvm.ppc.altivec.vextduhvrx" => "__builtin_altivec_vextduhvrx",
+ "llvm.ppc.altivec.vextduwvlx" => "__builtin_altivec_vextduwvlx",
+ "llvm.ppc.altivec.vextduwvrx" => "__builtin_altivec_vextduwvrx",
+ "llvm.ppc.altivec.vextractbm" => "__builtin_altivec_vextractbm",
+ "llvm.ppc.altivec.vextractdm" => "__builtin_altivec_vextractdm",
+ "llvm.ppc.altivec.vextracthm" => "__builtin_altivec_vextracthm",
+ "llvm.ppc.altivec.vextractqm" => "__builtin_altivec_vextractqm",
+ "llvm.ppc.altivec.vextractwm" => "__builtin_altivec_vextractwm",
+ "llvm.ppc.altivec.vextsb2d" => "__builtin_altivec_vextsb2d",
+ "llvm.ppc.altivec.vextsb2w" => "__builtin_altivec_vextsb2w",
+ "llvm.ppc.altivec.vextsd2q" => "__builtin_altivec_vextsd2q",
+ "llvm.ppc.altivec.vextsh2d" => "__builtin_altivec_vextsh2d",
+ "llvm.ppc.altivec.vextsh2w" => "__builtin_altivec_vextsh2w",
+ "llvm.ppc.altivec.vextsw2d" => "__builtin_altivec_vextsw2d",
+ "llvm.ppc.altivec.vgbbd" => "__builtin_altivec_vgbbd",
+ "llvm.ppc.altivec.vgnb" => "__builtin_altivec_vgnb",
+ "llvm.ppc.altivec.vinsblx" => "__builtin_altivec_vinsblx",
+ "llvm.ppc.altivec.vinsbrx" => "__builtin_altivec_vinsbrx",
+ "llvm.ppc.altivec.vinsbvlx" => "__builtin_altivec_vinsbvlx",
+ "llvm.ppc.altivec.vinsbvrx" => "__builtin_altivec_vinsbvrx",
+ "llvm.ppc.altivec.vinsdlx" => "__builtin_altivec_vinsdlx",
+ "llvm.ppc.altivec.vinsdrx" => "__builtin_altivec_vinsdrx",
+ "llvm.ppc.altivec.vinshlx" => "__builtin_altivec_vinshlx",
+ "llvm.ppc.altivec.vinshrx" => "__builtin_altivec_vinshrx",
+ "llvm.ppc.altivec.vinshvlx" => "__builtin_altivec_vinshvlx",
+ "llvm.ppc.altivec.vinshvrx" => "__builtin_altivec_vinshvrx",
+ "llvm.ppc.altivec.vinswlx" => "__builtin_altivec_vinswlx",
+ "llvm.ppc.altivec.vinswrx" => "__builtin_altivec_vinswrx",
+ "llvm.ppc.altivec.vinswvlx" => "__builtin_altivec_vinswvlx",
+ "llvm.ppc.altivec.vinswvrx" => "__builtin_altivec_vinswvrx",
+ "llvm.ppc.altivec.vlogefp" => "__builtin_altivec_vlogefp",
+ "llvm.ppc.altivec.vmaddfp" => "__builtin_altivec_vmaddfp",
+ "llvm.ppc.altivec.vmaxfp" => "__builtin_altivec_vmaxfp",
+ "llvm.ppc.altivec.vmaxsb" => "__builtin_altivec_vmaxsb",
+ "llvm.ppc.altivec.vmaxsd" => "__builtin_altivec_vmaxsd",
+ "llvm.ppc.altivec.vmaxsh" => "__builtin_altivec_vmaxsh",
+ "llvm.ppc.altivec.vmaxsw" => "__builtin_altivec_vmaxsw",
+ "llvm.ppc.altivec.vmaxub" => "__builtin_altivec_vmaxub",
+ "llvm.ppc.altivec.vmaxud" => "__builtin_altivec_vmaxud",
+ "llvm.ppc.altivec.vmaxuh" => "__builtin_altivec_vmaxuh",
+ "llvm.ppc.altivec.vmaxuw" => "__builtin_altivec_vmaxuw",
+ "llvm.ppc.altivec.vmhaddshs" => "__builtin_altivec_vmhaddshs",
+ "llvm.ppc.altivec.vmhraddshs" => "__builtin_altivec_vmhraddshs",
+ "llvm.ppc.altivec.vminfp" => "__builtin_altivec_vminfp",
+ "llvm.ppc.altivec.vminsb" => "__builtin_altivec_vminsb",
+ "llvm.ppc.altivec.vminsd" => "__builtin_altivec_vminsd",
+ "llvm.ppc.altivec.vminsh" => "__builtin_altivec_vminsh",
+ "llvm.ppc.altivec.vminsw" => "__builtin_altivec_vminsw",
+ "llvm.ppc.altivec.vminub" => "__builtin_altivec_vminub",
+ "llvm.ppc.altivec.vminud" => "__builtin_altivec_vminud",
+ "llvm.ppc.altivec.vminuh" => "__builtin_altivec_vminuh",
+ "llvm.ppc.altivec.vminuw" => "__builtin_altivec_vminuw",
+ "llvm.ppc.altivec.vmladduhm" => "__builtin_altivec_vmladduhm",
+ "llvm.ppc.altivec.vmsumcud" => "__builtin_altivec_vmsumcud",
+ "llvm.ppc.altivec.vmsummbm" => "__builtin_altivec_vmsummbm",
+ "llvm.ppc.altivec.vmsumshm" => "__builtin_altivec_vmsumshm",
+ "llvm.ppc.altivec.vmsumshs" => "__builtin_altivec_vmsumshs",
+ "llvm.ppc.altivec.vmsumubm" => "__builtin_altivec_vmsumubm",
+ "llvm.ppc.altivec.vmsumudm" => "__builtin_altivec_vmsumudm",
+ "llvm.ppc.altivec.vmsumuhm" => "__builtin_altivec_vmsumuhm",
+ "llvm.ppc.altivec.vmsumuhs" => "__builtin_altivec_vmsumuhs",
+ "llvm.ppc.altivec.vmulesb" => "__builtin_altivec_vmulesb",
+ "llvm.ppc.altivec.vmulesh" => "__builtin_altivec_vmulesh",
+ "llvm.ppc.altivec.vmulesw" => "__builtin_altivec_vmulesw",
+ "llvm.ppc.altivec.vmuleub" => "__builtin_altivec_vmuleub",
+ "llvm.ppc.altivec.vmuleuh" => "__builtin_altivec_vmuleuh",
+ "llvm.ppc.altivec.vmuleuw" => "__builtin_altivec_vmuleuw",
+ "llvm.ppc.altivec.vmulosb" => "__builtin_altivec_vmulosb",
+ "llvm.ppc.altivec.vmulosh" => "__builtin_altivec_vmulosh",
+ "llvm.ppc.altivec.vmulosw" => "__builtin_altivec_vmulosw",
+ "llvm.ppc.altivec.vmuloub" => "__builtin_altivec_vmuloub",
+ "llvm.ppc.altivec.vmulouh" => "__builtin_altivec_vmulouh",
+ "llvm.ppc.altivec.vmulouw" => "__builtin_altivec_vmulouw",
+ "llvm.ppc.altivec.vnmsubfp" => "__builtin_altivec_vnmsubfp",
+ "llvm.ppc.altivec.vpdepd" => "__builtin_altivec_vpdepd",
+ "llvm.ppc.altivec.vperm" => "__builtin_altivec_vperm_4si",
+ "llvm.ppc.altivec.vpextd" => "__builtin_altivec_vpextd",
+ "llvm.ppc.altivec.vpkpx" => "__builtin_altivec_vpkpx",
+ "llvm.ppc.altivec.vpksdss" => "__builtin_altivec_vpksdss",
+ "llvm.ppc.altivec.vpksdus" => "__builtin_altivec_vpksdus",
+ "llvm.ppc.altivec.vpkshss" => "__builtin_altivec_vpkshss",
+ "llvm.ppc.altivec.vpkshus" => "__builtin_altivec_vpkshus",
+ "llvm.ppc.altivec.vpkswss" => "__builtin_altivec_vpkswss",
+ "llvm.ppc.altivec.vpkswus" => "__builtin_altivec_vpkswus",
+ "llvm.ppc.altivec.vpkudus" => "__builtin_altivec_vpkudus",
+ "llvm.ppc.altivec.vpkuhus" => "__builtin_altivec_vpkuhus",
+ "llvm.ppc.altivec.vpkuwus" => "__builtin_altivec_vpkuwus",
+ "llvm.ppc.altivec.vprtybd" => "__builtin_altivec_vprtybd",
+ "llvm.ppc.altivec.vprtybq" => "__builtin_altivec_vprtybq",
+ "llvm.ppc.altivec.vprtybw" => "__builtin_altivec_vprtybw",
+ "llvm.ppc.altivec.vrefp" => "__builtin_altivec_vrefp",
+ "llvm.ppc.altivec.vrfim" => "__builtin_altivec_vrfim",
+ "llvm.ppc.altivec.vrfin" => "__builtin_altivec_vrfin",
+ "llvm.ppc.altivec.vrfip" => "__builtin_altivec_vrfip",
+ "llvm.ppc.altivec.vrfiz" => "__builtin_altivec_vrfiz",
+ "llvm.ppc.altivec.vrlb" => "__builtin_altivec_vrlb",
+ "llvm.ppc.altivec.vrld" => "__builtin_altivec_vrld",
+ "llvm.ppc.altivec.vrlh" => "__builtin_altivec_vrlh",
+ "llvm.ppc.altivec.vrlw" => "__builtin_altivec_vrlw",
+ "llvm.ppc.altivec.vrsqrtefp" => "__builtin_altivec_vrsqrtefp",
+ "llvm.ppc.altivec.vsel" => "__builtin_altivec_vsel_4si",
+ "llvm.ppc.altivec.vsl" => "__builtin_altivec_vsl",
+ "llvm.ppc.altivec.vslb" => "__builtin_altivec_vslb",
+ "llvm.ppc.altivec.vsldbi" => "__builtin_altivec_vsldbi",
+ "llvm.ppc.altivec.vslh" => "__builtin_altivec_vslh",
+ "llvm.ppc.altivec.vslo" => "__builtin_altivec_vslo",
+ "llvm.ppc.altivec.vslw" => "__builtin_altivec_vslw",
+ "llvm.ppc.altivec.vsr" => "__builtin_altivec_vsr",
+ "llvm.ppc.altivec.vsrab" => "__builtin_altivec_vsrab",
+ "llvm.ppc.altivec.vsrah" => "__builtin_altivec_vsrah",
+ "llvm.ppc.altivec.vsraw" => "__builtin_altivec_vsraw",
+ "llvm.ppc.altivec.vsrb" => "__builtin_altivec_vsrb",
+ "llvm.ppc.altivec.vsrdbi" => "__builtin_altivec_vsrdbi",
+ "llvm.ppc.altivec.vsrh" => "__builtin_altivec_vsrh",
+ "llvm.ppc.altivec.vsro" => "__builtin_altivec_vsro",
+ "llvm.ppc.altivec.vsrw" => "__builtin_altivec_vsrw",
+ "llvm.ppc.altivec.vstribl" => "__builtin_altivec_vstribl",
+ "llvm.ppc.altivec.vstribl.p" => "__builtin_altivec_vstribl_p",
+ "llvm.ppc.altivec.vstribr" => "__builtin_altivec_vstribr",
+ "llvm.ppc.altivec.vstribr.p" => "__builtin_altivec_vstribr_p",
+ "llvm.ppc.altivec.vstrihl" => "__builtin_altivec_vstrihl",
+ "llvm.ppc.altivec.vstrihl.p" => "__builtin_altivec_vstrihl_p",
+ "llvm.ppc.altivec.vstrihr" => "__builtin_altivec_vstrihr",
+ "llvm.ppc.altivec.vstrihr.p" => "__builtin_altivec_vstrihr_p",
+ "llvm.ppc.altivec.vsubcuw" => "__builtin_altivec_vsubcuw",
+ "llvm.ppc.altivec.vsubecuq" => "__builtin_altivec_vsubecuq",
+ "llvm.ppc.altivec.vsubeuqm" => "__builtin_altivec_vsubeuqm",
+ "llvm.ppc.altivec.vsubsbs" => "__builtin_altivec_vsubsbs",
+ "llvm.ppc.altivec.vsubshs" => "__builtin_altivec_vsubshs",
+ "llvm.ppc.altivec.vsubsws" => "__builtin_altivec_vsubsws",
+ "llvm.ppc.altivec.vsububs" => "__builtin_altivec_vsububs",
+ "llvm.ppc.altivec.vsubuhs" => "__builtin_altivec_vsubuhs",
+ "llvm.ppc.altivec.vsubuws" => "__builtin_altivec_vsubuws",
+ "llvm.ppc.altivec.vsum2sws" => "__builtin_altivec_vsum2sws",
+ "llvm.ppc.altivec.vsum4sbs" => "__builtin_altivec_vsum4sbs",
+ "llvm.ppc.altivec.vsum4shs" => "__builtin_altivec_vsum4shs",
+ "llvm.ppc.altivec.vsum4ubs" => "__builtin_altivec_vsum4ubs",
+ "llvm.ppc.altivec.vsumsws" => "__builtin_altivec_vsumsws",
+ "llvm.ppc.altivec.vupkhpx" => "__builtin_altivec_vupkhpx",
+ "llvm.ppc.altivec.vupkhsb" => "__builtin_altivec_vupkhsb",
+ "llvm.ppc.altivec.vupkhsh" => "__builtin_altivec_vupkhsh",
+ "llvm.ppc.altivec.vupkhsw" => "__builtin_altivec_vupkhsw",
+ "llvm.ppc.altivec.vupklpx" => "__builtin_altivec_vupklpx",
+ "llvm.ppc.altivec.vupklsb" => "__builtin_altivec_vupklsb",
+ "llvm.ppc.altivec.vupklsh" => "__builtin_altivec_vupklsh",
+ "llvm.ppc.altivec.vupklsw" => "__builtin_altivec_vupklsw",
+ "llvm.ppc.bcdadd" => "__builtin_ppc_bcdadd",
+ "llvm.ppc.bcdadd.p" => "__builtin_ppc_bcdadd_p",
+ "llvm.ppc.bcdsub" => "__builtin_ppc_bcdsub",
+ "llvm.ppc.bcdsub.p" => "__builtin_ppc_bcdsub_p",
+ "llvm.ppc.bpermd" => "__builtin_bpermd",
+ "llvm.ppc.cfuged" => "__builtin_cfuged",
+ "llvm.ppc.cmpeqb" => "__builtin_ppc_cmpeqb",
+ "llvm.ppc.cmprb" => "__builtin_ppc_cmprb",
+ "llvm.ppc.cntlzdm" => "__builtin_cntlzdm",
+ "llvm.ppc.cnttzdm" => "__builtin_cnttzdm",
+ "llvm.ppc.compare.exp.eq" => "__builtin_ppc_compare_exp_eq",
+ "llvm.ppc.compare.exp.gt" => "__builtin_ppc_compare_exp_gt",
+ "llvm.ppc.compare.exp.lt" => "__builtin_ppc_compare_exp_lt",
+ "llvm.ppc.compare.exp.uo" => "__builtin_ppc_compare_exp_uo",
+ "llvm.ppc.darn" => "__builtin_darn",
+ "llvm.ppc.darn32" => "__builtin_darn_32",
+ "llvm.ppc.darnraw" => "__builtin_darn_raw",
+ "llvm.ppc.dcbf" => "__builtin_dcbf",
+ "llvm.ppc.dcbfl" => "__builtin_ppc_dcbfl",
+ "llvm.ppc.dcbflp" => "__builtin_ppc_dcbflp",
+ "llvm.ppc.dcbst" => "__builtin_ppc_dcbst",
+ "llvm.ppc.dcbt" => "__builtin_ppc_dcbt",
+ "llvm.ppc.dcbtst" => "__builtin_ppc_dcbtst",
+ "llvm.ppc.dcbtstt" => "__builtin_ppc_dcbtstt",
+ "llvm.ppc.dcbtt" => "__builtin_ppc_dcbtt",
+ "llvm.ppc.dcbz" => "__builtin_ppc_dcbz",
+ "llvm.ppc.divde" => "__builtin_divde",
+ "llvm.ppc.divdeu" => "__builtin_divdeu",
+ "llvm.ppc.divf128.round.to.odd" => "__builtin_divf128_round_to_odd",
+ "llvm.ppc.divwe" => "__builtin_divwe",
+ "llvm.ppc.divweu" => "__builtin_divweu",
+ "llvm.ppc.eieio" => "__builtin_ppc_eieio",
+ "llvm.ppc.extract.exp" => "__builtin_ppc_extract_exp",
+ "llvm.ppc.extract.sig" => "__builtin_ppc_extract_sig",
+ "llvm.ppc.fcfid" => "__builtin_ppc_fcfid",
+ "llvm.ppc.fcfud" => "__builtin_ppc_fcfud",
+ "llvm.ppc.fctid" => "__builtin_ppc_fctid",
+ "llvm.ppc.fctidz" => "__builtin_ppc_fctidz",
+ "llvm.ppc.fctiw" => "__builtin_ppc_fctiw",
+ "llvm.ppc.fctiwz" => "__builtin_ppc_fctiwz",
+ "llvm.ppc.fctudz" => "__builtin_ppc_fctudz",
+ "llvm.ppc.fctuwz" => "__builtin_ppc_fctuwz",
+ "llvm.ppc.fmaf128.round.to.odd" => "__builtin_fmaf128_round_to_odd",
+ "llvm.ppc.fmsub" => "__builtin_ppc_fmsub",
+ "llvm.ppc.fmsubs" => "__builtin_ppc_fmsubs",
+ "llvm.ppc.fnmadd" => "__builtin_ppc_fnmadd",
+ "llvm.ppc.fnmadds" => "__builtin_ppc_fnmadds",
+ "llvm.ppc.fre" => "__builtin_ppc_fre",
+ "llvm.ppc.fres" => "__builtin_ppc_fres",
+ "llvm.ppc.frsqrte" => "__builtin_ppc_frsqrte",
+ "llvm.ppc.frsqrtes" => "__builtin_ppc_frsqrtes",
+ "llvm.ppc.fsel" => "__builtin_ppc_fsel",
+ "llvm.ppc.fsels" => "__builtin_ppc_fsels",
+ "llvm.ppc.get.texasr" => "__builtin_get_texasr",
+ "llvm.ppc.get.texasru" => "__builtin_get_texasru",
+ "llvm.ppc.get.tfhar" => "__builtin_get_tfhar",
+ "llvm.ppc.get.tfiar" => "__builtin_get_tfiar",
+ "llvm.ppc.icbt" => "__builtin_ppc_icbt",
+ "llvm.ppc.insert.exp" => "__builtin_ppc_insert_exp",
+ "llvm.ppc.iospace.eieio" => "__builtin_ppc_iospace_eieio",
+ "llvm.ppc.iospace.lwsync" => "__builtin_ppc_iospace_lwsync",
+ "llvm.ppc.iospace.sync" => "__builtin_ppc_iospace_sync",
+ "llvm.ppc.isync" => "__builtin_ppc_isync",
+ "llvm.ppc.load4r" => "__builtin_ppc_load4r",
+ "llvm.ppc.load8r" => "__builtin_ppc_load8r",
+ "llvm.ppc.lwsync" => "__builtin_ppc_lwsync",
+ "llvm.ppc.maddhd" => "__builtin_ppc_maddhd",
+ "llvm.ppc.maddhdu" => "__builtin_ppc_maddhdu",
+ "llvm.ppc.maddld" => "__builtin_ppc_maddld",
+ "llvm.ppc.mfmsr" => "__builtin_ppc_mfmsr",
+ "llvm.ppc.mftbu" => "__builtin_ppc_mftbu",
+ "llvm.ppc.mtfsb0" => "__builtin_ppc_mtfsb0",
+ "llvm.ppc.mtfsb1" => "__builtin_ppc_mtfsb1",
+ "llvm.ppc.mtfsfi" => "__builtin_ppc_mtfsfi",
+ "llvm.ppc.mtmsr" => "__builtin_ppc_mtmsr",
+ "llvm.ppc.mulf128.round.to.odd" => "__builtin_mulf128_round_to_odd",
+ "llvm.ppc.mulhd" => "__builtin_ppc_mulhd",
+ "llvm.ppc.mulhdu" => "__builtin_ppc_mulhdu",
+ "llvm.ppc.mulhw" => "__builtin_ppc_mulhw",
+ "llvm.ppc.mulhwu" => "__builtin_ppc_mulhwu",
+ "llvm.ppc.pack.longdouble" => "__builtin_pack_longdouble",
+ "llvm.ppc.pdepd" => "__builtin_pdepd",
+ "llvm.ppc.pextd" => "__builtin_pextd",
+ "llvm.ppc.qpx.qvfabs" => "__builtin_qpx_qvfabs",
+ "llvm.ppc.qpx.qvfadd" => "__builtin_qpx_qvfadd",
+ "llvm.ppc.qpx.qvfadds" => "__builtin_qpx_qvfadds",
+ "llvm.ppc.qpx.qvfcfid" => "__builtin_qpx_qvfcfid",
+ "llvm.ppc.qpx.qvfcfids" => "__builtin_qpx_qvfcfids",
+ "llvm.ppc.qpx.qvfcfidu" => "__builtin_qpx_qvfcfidu",
+ "llvm.ppc.qpx.qvfcfidus" => "__builtin_qpx_qvfcfidus",
+ "llvm.ppc.qpx.qvfcmpeq" => "__builtin_qpx_qvfcmpeq",
+ "llvm.ppc.qpx.qvfcmpgt" => "__builtin_qpx_qvfcmpgt",
+ "llvm.ppc.qpx.qvfcmplt" => "__builtin_qpx_qvfcmplt",
+ "llvm.ppc.qpx.qvfcpsgn" => "__builtin_qpx_qvfcpsgn",
+ "llvm.ppc.qpx.qvfctid" => "__builtin_qpx_qvfctid",
+ "llvm.ppc.qpx.qvfctidu" => "__builtin_qpx_qvfctidu",
+ "llvm.ppc.qpx.qvfctiduz" => "__builtin_qpx_qvfctiduz",
+ "llvm.ppc.qpx.qvfctidz" => "__builtin_qpx_qvfctidz",
+ "llvm.ppc.qpx.qvfctiw" => "__builtin_qpx_qvfctiw",
+ "llvm.ppc.qpx.qvfctiwu" => "__builtin_qpx_qvfctiwu",
+ "llvm.ppc.qpx.qvfctiwuz" => "__builtin_qpx_qvfctiwuz",
+ "llvm.ppc.qpx.qvfctiwz" => "__builtin_qpx_qvfctiwz",
+ "llvm.ppc.qpx.qvflogical" => "__builtin_qpx_qvflogical",
+ "llvm.ppc.qpx.qvfmadd" => "__builtin_qpx_qvfmadd",
+ "llvm.ppc.qpx.qvfmadds" => "__builtin_qpx_qvfmadds",
+ "llvm.ppc.qpx.qvfmsub" => "__builtin_qpx_qvfmsub",
+ "llvm.ppc.qpx.qvfmsubs" => "__builtin_qpx_qvfmsubs",
+ "llvm.ppc.qpx.qvfmul" => "__builtin_qpx_qvfmul",
+ "llvm.ppc.qpx.qvfmuls" => "__builtin_qpx_qvfmuls",
+ "llvm.ppc.qpx.qvfnabs" => "__builtin_qpx_qvfnabs",
+ "llvm.ppc.qpx.qvfneg" => "__builtin_qpx_qvfneg",
+ "llvm.ppc.qpx.qvfnmadd" => "__builtin_qpx_qvfnmadd",
+ "llvm.ppc.qpx.qvfnmadds" => "__builtin_qpx_qvfnmadds",
+ "llvm.ppc.qpx.qvfnmsub" => "__builtin_qpx_qvfnmsub",
+ "llvm.ppc.qpx.qvfnmsubs" => "__builtin_qpx_qvfnmsubs",
+ "llvm.ppc.qpx.qvfperm" => "__builtin_qpx_qvfperm",
+ "llvm.ppc.qpx.qvfre" => "__builtin_qpx_qvfre",
+ "llvm.ppc.qpx.qvfres" => "__builtin_qpx_qvfres",
+ "llvm.ppc.qpx.qvfrim" => "__builtin_qpx_qvfrim",
+ "llvm.ppc.qpx.qvfrin" => "__builtin_qpx_qvfrin",
+ "llvm.ppc.qpx.qvfrip" => "__builtin_qpx_qvfrip",
+ "llvm.ppc.qpx.qvfriz" => "__builtin_qpx_qvfriz",
+ "llvm.ppc.qpx.qvfrsp" => "__builtin_qpx_qvfrsp",
+ "llvm.ppc.qpx.qvfrsqrte" => "__builtin_qpx_qvfrsqrte",
+ "llvm.ppc.qpx.qvfrsqrtes" => "__builtin_qpx_qvfrsqrtes",
+ "llvm.ppc.qpx.qvfsel" => "__builtin_qpx_qvfsel",
+ "llvm.ppc.qpx.qvfsub" => "__builtin_qpx_qvfsub",
+ "llvm.ppc.qpx.qvfsubs" => "__builtin_qpx_qvfsubs",
+ "llvm.ppc.qpx.qvftstnan" => "__builtin_qpx_qvftstnan",
+ "llvm.ppc.qpx.qvfxmadd" => "__builtin_qpx_qvfxmadd",
+ "llvm.ppc.qpx.qvfxmadds" => "__builtin_qpx_qvfxmadds",
+ "llvm.ppc.qpx.qvfxmul" => "__builtin_qpx_qvfxmul",
+ "llvm.ppc.qpx.qvfxmuls" => "__builtin_qpx_qvfxmuls",
+ "llvm.ppc.qpx.qvfxxcpnmadd" => "__builtin_qpx_qvfxxcpnmadd",
+ "llvm.ppc.qpx.qvfxxcpnmadds" => "__builtin_qpx_qvfxxcpnmadds",
+ "llvm.ppc.qpx.qvfxxmadd" => "__builtin_qpx_qvfxxmadd",
+ "llvm.ppc.qpx.qvfxxmadds" => "__builtin_qpx_qvfxxmadds",
+ "llvm.ppc.qpx.qvfxxnpmadd" => "__builtin_qpx_qvfxxnpmadd",
+ "llvm.ppc.qpx.qvfxxnpmadds" => "__builtin_qpx_qvfxxnpmadds",
+ "llvm.ppc.qpx.qvgpci" => "__builtin_qpx_qvgpci",
+ "llvm.ppc.qpx.qvlfcd" => "__builtin_qpx_qvlfcd",
+ "llvm.ppc.qpx.qvlfcda" => "__builtin_qpx_qvlfcda",
+ "llvm.ppc.qpx.qvlfcs" => "__builtin_qpx_qvlfcs",
+ "llvm.ppc.qpx.qvlfcsa" => "__builtin_qpx_qvlfcsa",
+ "llvm.ppc.qpx.qvlfd" => "__builtin_qpx_qvlfd",
+ "llvm.ppc.qpx.qvlfda" => "__builtin_qpx_qvlfda",
+ "llvm.ppc.qpx.qvlfiwa" => "__builtin_qpx_qvlfiwa",
+ "llvm.ppc.qpx.qvlfiwaa" => "__builtin_qpx_qvlfiwaa",
+ "llvm.ppc.qpx.qvlfiwz" => "__builtin_qpx_qvlfiwz",
+ "llvm.ppc.qpx.qvlfiwza" => "__builtin_qpx_qvlfiwza",
+ "llvm.ppc.qpx.qvlfs" => "__builtin_qpx_qvlfs",
+ "llvm.ppc.qpx.qvlfsa" => "__builtin_qpx_qvlfsa",
+ "llvm.ppc.qpx.qvlpcld" => "__builtin_qpx_qvlpcld",
+ "llvm.ppc.qpx.qvlpcls" => "__builtin_qpx_qvlpcls",
+ "llvm.ppc.qpx.qvlpcrd" => "__builtin_qpx_qvlpcrd",
+ "llvm.ppc.qpx.qvlpcrs" => "__builtin_qpx_qvlpcrs",
+ "llvm.ppc.qpx.qvstfcd" => "__builtin_qpx_qvstfcd",
+ "llvm.ppc.qpx.qvstfcda" => "__builtin_qpx_qvstfcda",
+ "llvm.ppc.qpx.qvstfcs" => "__builtin_qpx_qvstfcs",
+ "llvm.ppc.qpx.qvstfcsa" => "__builtin_qpx_qvstfcsa",
+ "llvm.ppc.qpx.qvstfd" => "__builtin_qpx_qvstfd",
+ "llvm.ppc.qpx.qvstfda" => "__builtin_qpx_qvstfda",
+ "llvm.ppc.qpx.qvstfiw" => "__builtin_qpx_qvstfiw",
+ "llvm.ppc.qpx.qvstfiwa" => "__builtin_qpx_qvstfiwa",
+ "llvm.ppc.qpx.qvstfs" => "__builtin_qpx_qvstfs",
+ "llvm.ppc.qpx.qvstfsa" => "__builtin_qpx_qvstfsa",
+ "llvm.ppc.readflm" => "__builtin_readflm",
+ "llvm.ppc.scalar.extract.expq" => "__builtin_vsx_scalar_extract_expq",
+ "llvm.ppc.scalar.insert.exp.qp" => "__builtin_vsx_scalar_insert_exp_qp",
+ "llvm.ppc.set.texasr" => "__builtin_set_texasr",
+ "llvm.ppc.set.texasru" => "__builtin_set_texasru",
+ "llvm.ppc.set.tfhar" => "__builtin_set_tfhar",
+ "llvm.ppc.set.tfiar" => "__builtin_set_tfiar",
+ "llvm.ppc.setb" => "__builtin_ppc_setb",
+ "llvm.ppc.setflm" => "__builtin_setflm",
+ "llvm.ppc.setrnd" => "__builtin_setrnd",
+ "llvm.ppc.sqrtf128.round.to.odd" => "__builtin_sqrtf128_round_to_odd",
+ "llvm.ppc.stbcx" => "__builtin_ppc_stbcx",
+ "llvm.ppc.stdcx" => "__builtin_ppc_stdcx",
+ "llvm.ppc.stfiw" => "__builtin_ppc_stfiw",
+ "llvm.ppc.store2r" => "__builtin_ppc_store2r",
+ "llvm.ppc.store4r" => "__builtin_ppc_store4r",
+ "llvm.ppc.store8r" => "__builtin_ppc_store8r",
+ "llvm.ppc.stwcx" => "__builtin_ppc_stwcx",
+ "llvm.ppc.subf128.round.to.odd" => "__builtin_subf128_round_to_odd",
+ "llvm.ppc.sync" => "__builtin_ppc_sync",
+ "llvm.ppc.tabort" => "__builtin_tabort",
+ "llvm.ppc.tabortdc" => "__builtin_tabortdc",
+ "llvm.ppc.tabortdci" => "__builtin_tabortdci",
+ "llvm.ppc.tabortwc" => "__builtin_tabortwc",
+ "llvm.ppc.tabortwci" => "__builtin_tabortwci",
+ "llvm.ppc.tbegin" => "__builtin_tbegin",
+ "llvm.ppc.tcheck" => "__builtin_tcheck",
+ "llvm.ppc.tdw" => "__builtin_ppc_tdw",
+ "llvm.ppc.tend" => "__builtin_tend",
+ "llvm.ppc.tendall" => "__builtin_tendall",
+ "llvm.ppc.trap" => "__builtin_ppc_trap",
+ "llvm.ppc.trapd" => "__builtin_ppc_trapd",
+ "llvm.ppc.trechkpt" => "__builtin_trechkpt",
+ "llvm.ppc.treclaim" => "__builtin_treclaim",
+ "llvm.ppc.tresume" => "__builtin_tresume",
+ "llvm.ppc.truncf128.round.to.odd" => "__builtin_truncf128_round_to_odd",
+ "llvm.ppc.tsr" => "__builtin_tsr",
+ "llvm.ppc.tsuspend" => "__builtin_tsuspend",
+ "llvm.ppc.ttest" => "__builtin_ttest",
+ "llvm.ppc.tw" => "__builtin_ppc_tw",
+ "llvm.ppc.unpack.longdouble" => "__builtin_unpack_longdouble",
+ "llvm.ppc.vsx.xsmaxdp" => "__builtin_vsx_xsmaxdp",
+ "llvm.ppc.vsx.xsmindp" => "__builtin_vsx_xsmindp",
+ "llvm.ppc.vsx.xvcmpeqdp" => "__builtin_vsx_xvcmpeqdp",
+ "llvm.ppc.vsx.xvcmpeqdp.p" => "__builtin_vsx_xvcmpeqdp_p",
+ "llvm.ppc.vsx.xvcmpeqsp" => "__builtin_vsx_xvcmpeqsp",
+ "llvm.ppc.vsx.xvcmpeqsp.p" => "__builtin_vsx_xvcmpeqsp_p",
+ "llvm.ppc.vsx.xvcmpgedp" => "__builtin_vsx_xvcmpgedp",
+ "llvm.ppc.vsx.xvcmpgedp.p" => "__builtin_vsx_xvcmpgedp_p",
+ "llvm.ppc.vsx.xvcmpgesp" => "__builtin_vsx_xvcmpgesp",
+ "llvm.ppc.vsx.xvcmpgesp.p" => "__builtin_vsx_xvcmpgesp_p",
+ "llvm.ppc.vsx.xvcmpgtdp" => "__builtin_vsx_xvcmpgtdp",
+ "llvm.ppc.vsx.xvcmpgtdp.p" => "__builtin_vsx_xvcmpgtdp_p",
+ "llvm.ppc.vsx.xvcmpgtsp" => "__builtin_vsx_xvcmpgtsp",
+ "llvm.ppc.vsx.xvcmpgtsp.p" => "__builtin_vsx_xvcmpgtsp_p",
+ "llvm.ppc.vsx.xvdivdp" => "__builtin_vsx_xvdivdp",
+ "llvm.ppc.vsx.xvdivsp" => "__builtin_vsx_xvdivsp",
+ "llvm.ppc.vsx.xvmaxdp" => "__builtin_vsx_xvmaxdp",
+ "llvm.ppc.vsx.xvmaxsp" => "__builtin_vsx_xvmaxsp",
+ "llvm.ppc.vsx.xvmindp" => "__builtin_vsx_xvmindp",
+ "llvm.ppc.vsx.xvminsp" => "__builtin_vsx_xvminsp",
+ "llvm.ppc.vsx.xvredp" => "__builtin_vsx_xvredp",
+ "llvm.ppc.vsx.xvresp" => "__builtin_vsx_xvresp",
+ "llvm.ppc.vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp",
+ "llvm.ppc.vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp",
+ "llvm.ppc.vsx.xxblendvb" => "__builtin_vsx_xxblendvb",
+ "llvm.ppc.vsx.xxblendvd" => "__builtin_vsx_xxblendvd",
+ "llvm.ppc.vsx.xxblendvh" => "__builtin_vsx_xxblendvh",
+ "llvm.ppc.vsx.xxblendvw" => "__builtin_vsx_xxblendvw",
+ "llvm.ppc.vsx.xxleqv" => "__builtin_vsx_xxleqv",
+ "llvm.ppc.vsx.xxpermx" => "__builtin_vsx_xxpermx",
+ // ptx
+ "llvm.ptx.bar.sync" => "__builtin_ptx_bar_sync",
+ "llvm.ptx.read.clock" => "__builtin_ptx_read_clock",
+ "llvm.ptx.read.clock64" => "__builtin_ptx_read_clock64",
+ "llvm.ptx.read.gridid" => "__builtin_ptx_read_gridid",
+ "llvm.ptx.read.laneid" => "__builtin_ptx_read_laneid",
+ "llvm.ptx.read.lanemask.eq" => "__builtin_ptx_read_lanemask_eq",
+ "llvm.ptx.read.lanemask.ge" => "__builtin_ptx_read_lanemask_ge",
+ "llvm.ptx.read.lanemask.gt" => "__builtin_ptx_read_lanemask_gt",
+ "llvm.ptx.read.lanemask.le" => "__builtin_ptx_read_lanemask_le",
+ "llvm.ptx.read.lanemask.lt" => "__builtin_ptx_read_lanemask_lt",
+ "llvm.ptx.read.nsmid" => "__builtin_ptx_read_nsmid",
+ "llvm.ptx.read.nwarpid" => "__builtin_ptx_read_nwarpid",
+ "llvm.ptx.read.pm0" => "__builtin_ptx_read_pm0",
+ "llvm.ptx.read.pm1" => "__builtin_ptx_read_pm1",
+ "llvm.ptx.read.pm2" => "__builtin_ptx_read_pm2",
+ "llvm.ptx.read.pm3" => "__builtin_ptx_read_pm3",
+ "llvm.ptx.read.smid" => "__builtin_ptx_read_smid",
+ "llvm.ptx.read.warpid" => "__builtin_ptx_read_warpid",
+ // s390
+ "llvm.s390.efpc" => "__builtin_s390_efpc",
+ "llvm.s390.etnd" => "__builtin_tx_nesting_depth",
+ "llvm.s390.lcbb" => "__builtin_s390_lcbb",
+ "llvm.s390.ppa.txassist" => "__builtin_tx_assist",
+ "llvm.s390.sfpc" => "__builtin_s390_sfpc",
+ "llvm.s390.tend" => "__builtin_tend",
+ "llvm.s390.vcfn" => "__builtin_s390_vcfn",
+ "llvm.s390.vclfnhs" => "__builtin_s390_vclfnhs",
+ "llvm.s390.vclfnls" => "__builtin_s390_vclfnls",
+ "llvm.s390.vcnf" => "__builtin_s390_vcnf",
+ "llvm.s390.vcrnfs" => "__builtin_s390_vcrnfs",
+ "llvm.s390.vlbb" => "__builtin_s390_vlbb",
+ "llvm.s390.vll" => "__builtin_s390_vll",
+ "llvm.s390.vlrl" => "__builtin_s390_vlrl",
+ "llvm.s390.vmslg" => "__builtin_s390_vmslg",
+ "llvm.s390.vpdi" => "__builtin_s390_vpdi",
+ "llvm.s390.vperm" => "__builtin_s390_vperm",
+ "llvm.s390.vsld" => "__builtin_s390_vsld",
+ "llvm.s390.vsldb" => "__builtin_s390_vsldb",
+ "llvm.s390.vsrd" => "__builtin_s390_vsrd",
+ "llvm.s390.vstl" => "__builtin_s390_vstl",
+ "llvm.s390.vstrl" => "__builtin_s390_vstrl",
+ // ve
+ "llvm.ve.vl.extract.vm512l" => "__builtin_ve_vl_extract_vm512l",
+ "llvm.ve.vl.extract.vm512u" => "__builtin_ve_vl_extract_vm512u",
+ "llvm.ve.vl.insert.vm512l" => "__builtin_ve_vl_insert_vm512l",
+ "llvm.ve.vl.insert.vm512u" => "__builtin_ve_vl_insert_vm512u",
+ "llvm.ve.vl.pack.f32a" => "__builtin_ve_vl_pack_f32a",
+ "llvm.ve.vl.pack.f32p" => "__builtin_ve_vl_pack_f32p",
+ // x86
+ "llvm.x86.3dnow.pavgusb" => "__builtin_ia32_pavgusb",
+ "llvm.x86.3dnow.pf2id" => "__builtin_ia32_pf2id",
+ "llvm.x86.3dnow.pfacc" => "__builtin_ia32_pfacc",
+ "llvm.x86.3dnow.pfadd" => "__builtin_ia32_pfadd",
+ "llvm.x86.3dnow.pfcmpeq" => "__builtin_ia32_pfcmpeq",
+ "llvm.x86.3dnow.pfcmpge" => "__builtin_ia32_pfcmpge",
+ "llvm.x86.3dnow.pfcmpgt" => "__builtin_ia32_pfcmpgt",
+ "llvm.x86.3dnow.pfmax" => "__builtin_ia32_pfmax",
+ "llvm.x86.3dnow.pfmin" => "__builtin_ia32_pfmin",
+ "llvm.x86.3dnow.pfmul" => "__builtin_ia32_pfmul",
+ "llvm.x86.3dnow.pfrcp" => "__builtin_ia32_pfrcp",
+ "llvm.x86.3dnow.pfrcpit1" => "__builtin_ia32_pfrcpit1",
+ "llvm.x86.3dnow.pfrcpit2" => "__builtin_ia32_pfrcpit2",
+ "llvm.x86.3dnow.pfrsqit1" => "__builtin_ia32_pfrsqit1",
+ "llvm.x86.3dnow.pfrsqrt" => "__builtin_ia32_pfrsqrt",
+ "llvm.x86.3dnow.pfsub" => "__builtin_ia32_pfsub",
+ "llvm.x86.3dnow.pfsubr" => "__builtin_ia32_pfsubr",
+ "llvm.x86.3dnow.pi2fd" => "__builtin_ia32_pi2fd",
+ "llvm.x86.3dnow.pmulhrw" => "__builtin_ia32_pmulhrw",
+ "llvm.x86.3dnowa.pf2iw" => "__builtin_ia32_pf2iw",
+ "llvm.x86.3dnowa.pfnacc" => "__builtin_ia32_pfnacc",
+ "llvm.x86.3dnowa.pfpnacc" => "__builtin_ia32_pfpnacc",
+ "llvm.x86.3dnowa.pi2fw" => "__builtin_ia32_pi2fw",
+ "llvm.x86.addcarry.u32" => "__builtin_ia32_addcarry_u32",
+ "llvm.x86.addcarry.u64" => "__builtin_ia32_addcarry_u64",
+ "llvm.x86.addcarryx.u32" => "__builtin_ia32_addcarryx_u32",
+ "llvm.x86.addcarryx.u64" => "__builtin_ia32_addcarryx_u64",
+ "llvm.x86.aesni.aesdec" => "__builtin_ia32_aesdec128",
+ "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_aesdec256",
+ "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_aesdec512",
+ "llvm.x86.aesni.aesdeclast" => "__builtin_ia32_aesdeclast128",
+ "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_aesdeclast256",
+ "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_aesdeclast512",
+ "llvm.x86.aesni.aesenc" => "__builtin_ia32_aesenc128",
+ "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_aesenc256",
+ "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_aesenc512",
+ "llvm.x86.aesni.aesenclast" => "__builtin_ia32_aesenclast128",
+ "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_aesenclast256",
+ "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_aesenclast512",
+ "llvm.x86.aesni.aesimc" => "__builtin_ia32_aesimc128",
+ "llvm.x86.aesni.aeskeygenassist" => "__builtin_ia32_aeskeygenassist128",
+ "llvm.x86.avx.addsub.pd.256" => "__builtin_ia32_addsubpd256",
+ "llvm.x86.avx.addsub.ps.256" => "__builtin_ia32_addsubps256",
+ "llvm.x86.avx.blend.pd.256" => "__builtin_ia32_blendpd256",
+ "llvm.x86.avx.blend.ps.256" => "__builtin_ia32_blendps256",
+ "llvm.x86.avx.blendv.pd.256" => "__builtin_ia32_blendvpd256",
+ "llvm.x86.avx.blendv.ps.256" => "__builtin_ia32_blendvps256",
+ "llvm.x86.avx.cmp.pd.256" => "__builtin_ia32_cmppd256",
+ "llvm.x86.avx.cmp.ps.256" => "__builtin_ia32_cmpps256",
+ "llvm.x86.avx.cvt.pd2.ps.256" => "__builtin_ia32_cvtpd2ps256",
+ "llvm.x86.avx.cvt.pd2dq.256" => "__builtin_ia32_cvtpd2dq256",
+ "llvm.x86.avx.cvt.ps2.pd.256" => "__builtin_ia32_cvtps2pd256",
+ "llvm.x86.avx.cvt.ps2dq.256" => "__builtin_ia32_cvtps2dq256",
+ "llvm.x86.avx.cvtdq2.pd.256" => "__builtin_ia32_cvtdq2pd256",
+ "llvm.x86.avx.cvtdq2.ps.256" => "__builtin_ia32_cvtdq2ps256",
+ "llvm.x86.avx.cvtt.pd2dq.256" => "__builtin_ia32_cvttpd2dq256",
+ "llvm.x86.avx.cvtt.ps2dq.256" => "__builtin_ia32_cvttps2dq256",
+ "llvm.x86.avx.dp.ps.256" => "__builtin_ia32_dpps256",
+ "llvm.x86.avx.hadd.pd.256" => "__builtin_ia32_haddpd256",
+ "llvm.x86.avx.hadd.ps.256" => "__builtin_ia32_haddps256",
+ "llvm.x86.avx.hsub.pd.256" => "__builtin_ia32_hsubpd256",
+ "llvm.x86.avx.hsub.ps.256" => "__builtin_ia32_hsubps256",
+ "llvm.x86.avx.ldu.dq.256" => "__builtin_ia32_lddqu256",
+ "llvm.x86.avx.maskload.pd" => "__builtin_ia32_maskloadpd",
+ "llvm.x86.avx.maskload.pd.256" => "__builtin_ia32_maskloadpd256",
+ "llvm.x86.avx.maskload.ps" => "__builtin_ia32_maskloadps",
+ "llvm.x86.avx.maskload.ps.256" => "__builtin_ia32_maskloadps256",
+ "llvm.x86.avx.maskstore.pd" => "__builtin_ia32_maskstorepd",
+ "llvm.x86.avx.maskstore.pd.256" => "__builtin_ia32_maskstorepd256",
+ "llvm.x86.avx.maskstore.ps" => "__builtin_ia32_maskstoreps",
+ "llvm.x86.avx.maskstore.ps.256" => "__builtin_ia32_maskstoreps256",
+ "llvm.x86.avx.max.pd.256" => "__builtin_ia32_maxpd256",
+ "llvm.x86.avx.max.ps.256" => "__builtin_ia32_maxps256",
+ "llvm.x86.avx.min.pd.256" => "__builtin_ia32_minpd256",
+ "llvm.x86.avx.min.ps.256" => "__builtin_ia32_minps256",
+ "llvm.x86.avx.movmsk.pd.256" => "__builtin_ia32_movmskpd256",
+ "llvm.x86.avx.movmsk.ps.256" => "__builtin_ia32_movmskps256",
+ "llvm.x86.avx.ptestc.256" => "__builtin_ia32_ptestc256",
+ "llvm.x86.avx.ptestnzc.256" => "__builtin_ia32_ptestnzc256",
+ "llvm.x86.avx.ptestz.256" => "__builtin_ia32_ptestz256",
+ "llvm.x86.avx.rcp.ps.256" => "__builtin_ia32_rcpps256",
+ "llvm.x86.avx.round.pd.256" => "__builtin_ia32_roundpd256",
+ "llvm.x86.avx.round.ps.256" => "__builtin_ia32_roundps256",
+ "llvm.x86.avx.rsqrt.ps.256" => "__builtin_ia32_rsqrtps256",
+ "llvm.x86.avx.sqrt.pd.256" => "__builtin_ia32_sqrtpd256",
+ "llvm.x86.avx.sqrt.ps.256" => "__builtin_ia32_sqrtps256",
+ "llvm.x86.avx.storeu.dq.256" => "__builtin_ia32_storedqu256",
+ "llvm.x86.avx.storeu.pd.256" => "__builtin_ia32_storeupd256",
+ "llvm.x86.avx.storeu.ps.256" => "__builtin_ia32_storeups256",
+ "llvm.x86.avx.vbroadcastf128.pd.256" => "__builtin_ia32_vbroadcastf128_pd256",
+ "llvm.x86.avx.vbroadcastf128.ps.256" => "__builtin_ia32_vbroadcastf128_ps256",
+ "llvm.x86.avx.vextractf128.pd.256" => "__builtin_ia32_vextractf128_pd256",
+ "llvm.x86.avx.vextractf128.ps.256" => "__builtin_ia32_vextractf128_ps256",
+ "llvm.x86.avx.vextractf128.si.256" => "__builtin_ia32_vextractf128_si256",
+ "llvm.x86.avx.vinsertf128.pd.256" => "__builtin_ia32_vinsertf128_pd256",
+ "llvm.x86.avx.vinsertf128.ps.256" => "__builtin_ia32_vinsertf128_ps256",
+ "llvm.x86.avx.vinsertf128.si.256" => "__builtin_ia32_vinsertf128_si256",
+ "llvm.x86.avx.vperm2f128.pd.256" => "__builtin_ia32_vperm2f128_pd256",
+ "llvm.x86.avx.vperm2f128.ps.256" => "__builtin_ia32_vperm2f128_ps256",
+ "llvm.x86.avx.vperm2f128.si.256" => "__builtin_ia32_vperm2f128_si256",
+ "llvm.x86.avx.vpermilvar.pd" => "__builtin_ia32_vpermilvarpd",
+ "llvm.x86.avx.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256",
+ "llvm.x86.avx.vpermilvar.ps" => "__builtin_ia32_vpermilvarps",
+ "llvm.x86.avx.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256",
+ "llvm.x86.avx.vtestc.pd" => "__builtin_ia32_vtestcpd",
+ "llvm.x86.avx.vtestc.pd.256" => "__builtin_ia32_vtestcpd256",
+ "llvm.x86.avx.vtestc.ps" => "__builtin_ia32_vtestcps",
+ "llvm.x86.avx.vtestc.ps.256" => "__builtin_ia32_vtestcps256",
+ "llvm.x86.avx.vtestnzc.pd" => "__builtin_ia32_vtestnzcpd",
+ "llvm.x86.avx.vtestnzc.pd.256" => "__builtin_ia32_vtestnzcpd256",
+ "llvm.x86.avx.vtestnzc.ps" => "__builtin_ia32_vtestnzcps",
+ "llvm.x86.avx.vtestnzc.ps.256" => "__builtin_ia32_vtestnzcps256",
+ "llvm.x86.avx.vtestz.pd" => "__builtin_ia32_vtestzpd",
+ "llvm.x86.avx.vtestz.pd.256" => "__builtin_ia32_vtestzpd256",
+ "llvm.x86.avx.vtestz.ps" => "__builtin_ia32_vtestzps",
+ "llvm.x86.avx.vtestz.ps.256" => "__builtin_ia32_vtestzps256",
+ "llvm.x86.avx.vzeroall" => "__builtin_ia32_vzeroall",
+ "llvm.x86.avx.vzeroupper" => "__builtin_ia32_vzeroupper",
+ "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gatherd_d",
+ "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gatherd_d256",
+ "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gatherd_pd",
+ "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gatherd_pd256",
+ "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gatherd_ps",
+ "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gatherd_ps256",
+ "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gatherd_q",
+ "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gatherd_q256",
+ "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherq_d",
+ "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherq_d256",
+ "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherq_pd",
+ "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherq_pd256",
+ "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherq_ps",
+ "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherq_ps256",
+ "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherq_q",
+ "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherq_q256",
+ "llvm.x86.avx2.maskload.d" => "__builtin_ia32_maskloadd",
+ "llvm.x86.avx2.maskload.d.256" => "__builtin_ia32_maskloadd256",
+ "llvm.x86.avx2.maskload.q" => "__builtin_ia32_maskloadq",
+ "llvm.x86.avx2.maskload.q.256" => "__builtin_ia32_maskloadq256",
+ "llvm.x86.avx2.maskstore.d" => "__builtin_ia32_maskstored",
+ "llvm.x86.avx2.maskstore.d.256" => "__builtin_ia32_maskstored256",
+ "llvm.x86.avx2.maskstore.q" => "__builtin_ia32_maskstoreq",
+ "llvm.x86.avx2.maskstore.q.256" => "__builtin_ia32_maskstoreq256",
+ "llvm.x86.avx2.movntdqa" => "__builtin_ia32_movntdqa256",
+ "llvm.x86.avx2.mpsadbw" => "__builtin_ia32_mpsadbw256",
+ "llvm.x86.avx2.pabs.b" => "__builtin_ia32_pabsb256",
+ "llvm.x86.avx2.pabs.d" => "__builtin_ia32_pabsd256",
+ "llvm.x86.avx2.pabs.w" => "__builtin_ia32_pabsw256",
+ "llvm.x86.avx2.packssdw" => "__builtin_ia32_packssdw256",
+ "llvm.x86.avx2.packsswb" => "__builtin_ia32_packsswb256",
+ "llvm.x86.avx2.packusdw" => "__builtin_ia32_packusdw256",
+ "llvm.x86.avx2.packuswb" => "__builtin_ia32_packuswb256",
+ "llvm.x86.avx2.padds.b" => "__builtin_ia32_paddsb256",
+ "llvm.x86.avx2.padds.w" => "__builtin_ia32_paddsw256",
+ "llvm.x86.avx2.paddus.b" => "__builtin_ia32_paddusb256",
+ "llvm.x86.avx2.paddus.w" => "__builtin_ia32_paddusw256",
+ "llvm.x86.avx2.pavg.b" => "__builtin_ia32_pavgb256",
+ "llvm.x86.avx2.pavg.w" => "__builtin_ia32_pavgw256",
+ "llvm.x86.avx2.pblendd.128" => "__builtin_ia32_pblendd128",
+ "llvm.x86.avx2.pblendd.256" => "__builtin_ia32_pblendd256",
+ "llvm.x86.avx2.pblendvb" => "__builtin_ia32_pblendvb256",
+ "llvm.x86.avx2.pblendw" => "__builtin_ia32_pblendw256",
+ "llvm.x86.avx2.pbroadcastb.128" => "__builtin_ia32_pbroadcastb128",
+ "llvm.x86.avx2.pbroadcastb.256" => "__builtin_ia32_pbroadcastb256",
+ "llvm.x86.avx2.pbroadcastd.128" => "__builtin_ia32_pbroadcastd128",
+ "llvm.x86.avx2.pbroadcastd.256" => "__builtin_ia32_pbroadcastd256",
+ "llvm.x86.avx2.pbroadcastq.128" => "__builtin_ia32_pbroadcastq128",
+ "llvm.x86.avx2.pbroadcastq.256" => "__builtin_ia32_pbroadcastq256",
+ "llvm.x86.avx2.pbroadcastw.128" => "__builtin_ia32_pbroadcastw128",
+ "llvm.x86.avx2.pbroadcastw.256" => "__builtin_ia32_pbroadcastw256",
+ "llvm.x86.avx2.permd" => "__builtin_ia32_permvarsi256",
+ "llvm.x86.avx2.permps" => "__builtin_ia32_permvarsf256",
+ "llvm.x86.avx2.phadd.d" => "__builtin_ia32_phaddd256",
+ "llvm.x86.avx2.phadd.sw" => "__builtin_ia32_phaddsw256",
+ "llvm.x86.avx2.phadd.w" => "__builtin_ia32_phaddw256",
+ "llvm.x86.avx2.phsub.d" => "__builtin_ia32_phsubd256",
+ "llvm.x86.avx2.phsub.sw" => "__builtin_ia32_phsubsw256",
+ "llvm.x86.avx2.phsub.w" => "__builtin_ia32_phsubw256",
+ "llvm.x86.avx2.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw256",
+ "llvm.x86.avx2.pmadd.wd" => "__builtin_ia32_pmaddwd256",
+ "llvm.x86.avx2.pmaxs.b" => "__builtin_ia32_pmaxsb256",
+ "llvm.x86.avx2.pmaxs.d" => "__builtin_ia32_pmaxsd256",
+ "llvm.x86.avx2.pmaxs.w" => "__builtin_ia32_pmaxsw256",
+ "llvm.x86.avx2.pmaxu.b" => "__builtin_ia32_pmaxub256",
+ "llvm.x86.avx2.pmaxu.d" => "__builtin_ia32_pmaxud256",
+ "llvm.x86.avx2.pmaxu.w" => "__builtin_ia32_pmaxuw256",
+ "llvm.x86.avx2.pmins.b" => "__builtin_ia32_pminsb256",
+ "llvm.x86.avx2.pmins.d" => "__builtin_ia32_pminsd256",
+ "llvm.x86.avx2.pmins.w" => "__builtin_ia32_pminsw256",
+ "llvm.x86.avx2.pminu.b" => "__builtin_ia32_pminub256",
+ "llvm.x86.avx2.pminu.d" => "__builtin_ia32_pminud256",
+ "llvm.x86.avx2.pminu.w" => "__builtin_ia32_pminuw256",
+ "llvm.x86.avx2.pmovmskb" => "__builtin_ia32_pmovmskb256",
+ "llvm.x86.avx2.pmovsxbd" => "__builtin_ia32_pmovsxbd256",
+ "llvm.x86.avx2.pmovsxbq" => "__builtin_ia32_pmovsxbq256",
+ "llvm.x86.avx2.pmovsxbw" => "__builtin_ia32_pmovsxbw256",
+ "llvm.x86.avx2.pmovsxdq" => "__builtin_ia32_pmovsxdq256",
+ "llvm.x86.avx2.pmovsxwd" => "__builtin_ia32_pmovsxwd256",
+ "llvm.x86.avx2.pmovsxwq" => "__builtin_ia32_pmovsxwq256",
+ "llvm.x86.avx2.pmovzxbd" => "__builtin_ia32_pmovzxbd256",
+ "llvm.x86.avx2.pmovzxbq" => "__builtin_ia32_pmovzxbq256",
+ "llvm.x86.avx2.pmovzxbw" => "__builtin_ia32_pmovzxbw256",
+ "llvm.x86.avx2.pmovzxdq" => "__builtin_ia32_pmovzxdq256",
+ "llvm.x86.avx2.pmovzxwd" => "__builtin_ia32_pmovzxwd256",
+ "llvm.x86.avx2.pmovzxwq" => "__builtin_ia32_pmovzxwq256",
+ "llvm.x86.avx2.pmul.dq" => "__builtin_ia32_pmuldq256",
+ "llvm.x86.avx2.pmul.hr.sw" => "__builtin_ia32_pmulhrsw256",
+ "llvm.x86.avx2.pmulh.w" => "__builtin_ia32_pmulhw256",
+ "llvm.x86.avx2.pmulhu.w" => "__builtin_ia32_pmulhuw256",
+ "llvm.x86.avx2.pmulu.dq" => "__builtin_ia32_pmuludq256",
+ "llvm.x86.avx2.psad.bw" => "__builtin_ia32_psadbw256",
+ "llvm.x86.avx2.pshuf.b" => "__builtin_ia32_pshufb256",
+ "llvm.x86.avx2.psign.b" => "__builtin_ia32_psignb256",
+ "llvm.x86.avx2.psign.d" => "__builtin_ia32_psignd256",
+ "llvm.x86.avx2.psign.w" => "__builtin_ia32_psignw256",
+ "llvm.x86.avx2.psll.d" => "__builtin_ia32_pslld256",
+ "llvm.x86.avx2.psll.dq" => "__builtin_ia32_pslldqi256",
+ "llvm.x86.avx2.psll.dq.bs" => "__builtin_ia32_pslldqi256_byteshift",
+ "llvm.x86.avx2.psll.q" => "__builtin_ia32_psllq256",
+ "llvm.x86.avx2.psll.w" => "__builtin_ia32_psllw256",
+ "llvm.x86.avx2.pslli.d" => "__builtin_ia32_pslldi256",
+ "llvm.x86.avx2.pslli.q" => "__builtin_ia32_psllqi256",
+ "llvm.x86.avx2.pslli.w" => "__builtin_ia32_psllwi256",
+ "llvm.x86.avx2.psllv.d" => "__builtin_ia32_psllv4si",
+ "llvm.x86.avx2.psllv.d.256" => "__builtin_ia32_psllv8si",
+ "llvm.x86.avx2.psllv.q" => "__builtin_ia32_psllv2di",
+ "llvm.x86.avx2.psllv.q.256" => "__builtin_ia32_psllv4di",
+ "llvm.x86.avx2.psra.d" => "__builtin_ia32_psrad256",
+ "llvm.x86.avx2.psra.w" => "__builtin_ia32_psraw256",
+ "llvm.x86.avx2.psrai.d" => "__builtin_ia32_psradi256",
+ "llvm.x86.avx2.psrai.w" => "__builtin_ia32_psrawi256",
+ "llvm.x86.avx2.psrav.d" => "__builtin_ia32_psrav4si",
+ "llvm.x86.avx2.psrav.d.256" => "__builtin_ia32_psrav8si",
+ "llvm.x86.avx2.psrl.d" => "__builtin_ia32_psrld256",
+ "llvm.x86.avx2.psrl.dq" => "__builtin_ia32_psrldqi256",
+ "llvm.x86.avx2.psrl.dq.bs" => "__builtin_ia32_psrldqi256_byteshift",
+ "llvm.x86.avx2.psrl.q" => "__builtin_ia32_psrlq256",
+ "llvm.x86.avx2.psrl.w" => "__builtin_ia32_psrlw256",
+ "llvm.x86.avx2.psrli.d" => "__builtin_ia32_psrldi256",
+ "llvm.x86.avx2.psrli.q" => "__builtin_ia32_psrlqi256",
+ "llvm.x86.avx2.psrli.w" => "__builtin_ia32_psrlwi256",
+ "llvm.x86.avx2.psrlv.d" => "__builtin_ia32_psrlv4si",
+ "llvm.x86.avx2.psrlv.d.256" => "__builtin_ia32_psrlv8si",
+ "llvm.x86.avx2.psrlv.q" => "__builtin_ia32_psrlv2di",
+ "llvm.x86.avx2.psrlv.q.256" => "__builtin_ia32_psrlv4di",
+ "llvm.x86.avx2.psubs.b" => "__builtin_ia32_psubsb256",
+ "llvm.x86.avx2.psubs.w" => "__builtin_ia32_psubsw256",
+ "llvm.x86.avx2.psubus.b" => "__builtin_ia32_psubusb256",
+ "llvm.x86.avx2.psubus.w" => "__builtin_ia32_psubusw256",
+ "llvm.x86.avx2.vbroadcast.sd.pd.256" => "__builtin_ia32_vbroadcastsd_pd256",
+ "llvm.x86.avx2.vbroadcast.ss.ps" => "__builtin_ia32_vbroadcastss_ps",
+ "llvm.x86.avx2.vbroadcast.ss.ps.256" => "__builtin_ia32_vbroadcastss_ps256",
+ "llvm.x86.avx2.vextracti128" => "__builtin_ia32_extract128i256",
+ "llvm.x86.avx2.vinserti128" => "__builtin_ia32_insert128i256",
+ "llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256",
+ "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512",
+ "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512",
+ "llvm.x86.avx512.broadcastmb.128" => "__builtin_ia32_broadcastmb128",
+ "llvm.x86.avx512.broadcastmb.256" => "__builtin_ia32_broadcastmb256",
+ "llvm.x86.avx512.broadcastmb.512" => "__builtin_ia32_broadcastmb512",
+ "llvm.x86.avx512.broadcastmw.128" => "__builtin_ia32_broadcastmw128",
+ "llvm.x86.avx512.broadcastmw.256" => "__builtin_ia32_broadcastmw256",
+ "llvm.x86.avx512.broadcastmw.512" => "__builtin_ia32_broadcastmw512",
+ "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128",
+ "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256",
+ "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512",
+ "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128",
+ "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256",
+ "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512",
+ "llvm.x86.avx512.cvtb2mask.128" => "__builtin_ia32_cvtb2mask128",
+ "llvm.x86.avx512.cvtb2mask.256" => "__builtin_ia32_cvtb2mask256",
+ "llvm.x86.avx512.cvtb2mask.512" => "__builtin_ia32_cvtb2mask512",
+ "llvm.x86.avx512.cvtd2mask.128" => "__builtin_ia32_cvtd2mask128",
+ "llvm.x86.avx512.cvtd2mask.256" => "__builtin_ia32_cvtd2mask256",
+ "llvm.x86.avx512.cvtd2mask.512" => "__builtin_ia32_cvtd2mask512",
+ "llvm.x86.avx512.cvtmask2b.128" => "__builtin_ia32_cvtmask2b128",
+ "llvm.x86.avx512.cvtmask2b.256" => "__builtin_ia32_cvtmask2b256",
+ "llvm.x86.avx512.cvtmask2b.512" => "__builtin_ia32_cvtmask2b512",
+ "llvm.x86.avx512.cvtmask2d.128" => "__builtin_ia32_cvtmask2d128",
+ "llvm.x86.avx512.cvtmask2d.256" => "__builtin_ia32_cvtmask2d256",
+ "llvm.x86.avx512.cvtmask2d.512" => "__builtin_ia32_cvtmask2d512",
+ "llvm.x86.avx512.cvtmask2q.128" => "__builtin_ia32_cvtmask2q128",
+ "llvm.x86.avx512.cvtmask2q.256" => "__builtin_ia32_cvtmask2q256",
+ "llvm.x86.avx512.cvtmask2q.512" => "__builtin_ia32_cvtmask2q512",
+ "llvm.x86.avx512.cvtmask2w.128" => "__builtin_ia32_cvtmask2w128",
+ "llvm.x86.avx512.cvtmask2w.256" => "__builtin_ia32_cvtmask2w256",
+ "llvm.x86.avx512.cvtmask2w.512" => "__builtin_ia32_cvtmask2w512",
+ "llvm.x86.avx512.cvtq2mask.128" => "__builtin_ia32_cvtq2mask128",
+ "llvm.x86.avx512.cvtq2mask.256" => "__builtin_ia32_cvtq2mask256",
+ "llvm.x86.avx512.cvtq2mask.512" => "__builtin_ia32_cvtq2mask512",
+ "llvm.x86.avx512.cvtsd2usi" => "__builtin_ia32_cvtsd2usi",
+ "llvm.x86.avx512.cvtsd2usi64" => "__builtin_ia32_cvtsd2usi64",
+ "llvm.x86.avx512.cvtsi2sd32" => "__builtin_ia32_cvtsi2sd32",
+ "llvm.x86.avx512.cvtsi2sd64" => "__builtin_ia32_cvtsi2sd64",
+ "llvm.x86.avx512.cvtsi2ss32" => "__builtin_ia32_cvtsi2ss32",
+ "llvm.x86.avx512.cvtsi2ss64" => "__builtin_ia32_cvtsi2ss64",
+ "llvm.x86.avx512.cvtss2usi" => "__builtin_ia32_cvtss2usi",
+ "llvm.x86.avx512.cvtss2usi64" => "__builtin_ia32_cvtss2usi64",
+ "llvm.x86.avx512.cvttsd2si" => "__builtin_ia32_vcvttsd2si32",
+ "llvm.x86.avx512.cvttsd2si64" => "__builtin_ia32_vcvttsd2si64",
+ "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_vcvttsd2usi32",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_cvttsd2usi",
+ "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_vcvttsd2usi64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_cvttsd2usi64",
+ "llvm.x86.avx512.cvttss2si" => "__builtin_ia32_vcvttss2si32",
+ "llvm.x86.avx512.cvttss2si64" => "__builtin_ia32_vcvttss2si64",
+ "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_vcvttss2usi32",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_cvttss2usi",
+ "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_vcvttss2usi64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_cvttss2usi64",
+ "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd32",
+ "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss32",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss",
+ "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi2sd64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi642sd",
+ "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi2ss64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi642ss",
+ "llvm.x86.avx512.cvtw2mask.128" => "__builtin_ia32_cvtw2mask128",
+ "llvm.x86.avx512.cvtw2mask.256" => "__builtin_ia32_cvtw2mask256",
+ "llvm.x86.avx512.cvtw2mask.512" => "__builtin_ia32_cvtw2mask512",
+ "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128",
+ "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256",
+ "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512",
+ "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512",
+ "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512",
+ "llvm.x86.avx512.exp2.pd" => "__builtin_ia32_exp2pd_mask",
+ "llvm.x86.avx512.exp2.ps" => "__builtin_ia32_exp2ps_mask",
+ "llvm.x86.avx512.gather.dpd.512" => "__builtin_ia32_gathersiv8df",
+ "llvm.x86.avx512.gather.dpi.512" => "__builtin_ia32_gathersiv16si",
+ "llvm.x86.avx512.gather.dpq.512" => "__builtin_ia32_gathersiv8di",
+ "llvm.x86.avx512.gather.dps.512" => "__builtin_ia32_gathersiv16sf",
+ "llvm.x86.avx512.gather.qpd.512" => "__builtin_ia32_gatherdiv8df",
+ "llvm.x86.avx512.gather.qpi.512" => "__builtin_ia32_gatherdiv16si",
+ "llvm.x86.avx512.gather.qpq.512" => "__builtin_ia32_gatherdiv8di",
+ "llvm.x86.avx512.gather.qps.512" => "__builtin_ia32_gatherdiv16sf",
+ "llvm.x86.avx512.gather3div2.df" => "__builtin_ia32_gather3div2df",
+ "llvm.x86.avx512.gather3div2.di" => "__builtin_ia32_gather3div2di",
+ "llvm.x86.avx512.gather3div4.df" => "__builtin_ia32_gather3div4df",
+ "llvm.x86.avx512.gather3div4.di" => "__builtin_ia32_gather3div4di",
+ "llvm.x86.avx512.gather3div4.sf" => "__builtin_ia32_gather3div4sf",
+ "llvm.x86.avx512.gather3div4.si" => "__builtin_ia32_gather3div4si",
+ "llvm.x86.avx512.gather3div8.sf" => "__builtin_ia32_gather3div8sf",
+ "llvm.x86.avx512.gather3div8.si" => "__builtin_ia32_gather3div8si",
+ "llvm.x86.avx512.gather3siv2.df" => "__builtin_ia32_gather3siv2df",
+ "llvm.x86.avx512.gather3siv2.di" => "__builtin_ia32_gather3siv2di",
+ "llvm.x86.avx512.gather3siv4.df" => "__builtin_ia32_gather3siv4df",
+ "llvm.x86.avx512.gather3siv4.di" => "__builtin_ia32_gather3siv4di",
+ "llvm.x86.avx512.gather3siv4.sf" => "__builtin_ia32_gather3siv4sf",
+ "llvm.x86.avx512.gather3siv4.si" => "__builtin_ia32_gather3siv4si",
+ "llvm.x86.avx512.gather3siv8.sf" => "__builtin_ia32_gather3siv8sf",
+ "llvm.x86.avx512.gather3siv8.si" => "__builtin_ia32_gather3siv8si",
+ "llvm.x86.avx512.gatherpf.dpd.512" => "__builtin_ia32_gatherpfdpd",
+ "llvm.x86.avx512.gatherpf.dps.512" => "__builtin_ia32_gatherpfdps",
+ "llvm.x86.avx512.gatherpf.qpd.512" => "__builtin_ia32_gatherpfqpd",
+ "llvm.x86.avx512.gatherpf.qps.512" => "__builtin_ia32_gatherpfqps",
+ "llvm.x86.avx512.kand.w" => "__builtin_ia32_kandhi",
+ "llvm.x86.avx512.kandn.w" => "__builtin_ia32_kandnhi",
+ "llvm.x86.avx512.knot.w" => "__builtin_ia32_knothi",
+ "llvm.x86.avx512.kor.w" => "__builtin_ia32_korhi",
+ "llvm.x86.avx512.kortestc.w" => "__builtin_ia32_kortestchi",
+ "llvm.x86.avx512.kortestz.w" => "__builtin_ia32_kortestzhi",
+ "llvm.x86.avx512.kunpck.bw" => "__builtin_ia32_kunpckhi",
+ "llvm.x86.avx512.kunpck.dq" => "__builtin_ia32_kunpckdi",
+ "llvm.x86.avx512.kunpck.wd" => "__builtin_ia32_kunpcksi",
+ "llvm.x86.avx512.kxnor.w" => "__builtin_ia32_kxnorhi",
+ "llvm.x86.avx512.kxor.w" => "__builtin_ia32_kxorhi",
+ "llvm.x86.avx512.mask.add.pd.128" => "__builtin_ia32_addpd128_mask",
+ "llvm.x86.avx512.mask.add.pd.256" => "__builtin_ia32_addpd256_mask",
+ "llvm.x86.avx512.mask.add.pd.512" => "__builtin_ia32_addpd512_mask",
+ "llvm.x86.avx512.mask.add.ps.128" => "__builtin_ia32_addps128_mask",
+ "llvm.x86.avx512.mask.add.ps.256" => "__builtin_ia32_addps256_mask",
+ "llvm.x86.avx512.mask.add.ps.512" => "__builtin_ia32_addps512_mask",
+ "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask",
+ "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask",
+ "llvm.x86.avx512.mask.and.pd.128" => "__builtin_ia32_andpd128_mask",
+ "llvm.x86.avx512.mask.and.pd.256" => "__builtin_ia32_andpd256_mask",
+ "llvm.x86.avx512.mask.and.pd.512" => "__builtin_ia32_andpd512_mask",
+ "llvm.x86.avx512.mask.and.ps.128" => "__builtin_ia32_andps128_mask",
+ "llvm.x86.avx512.mask.and.ps.256" => "__builtin_ia32_andps256_mask",
+ "llvm.x86.avx512.mask.and.ps.512" => "__builtin_ia32_andps512_mask",
+ "llvm.x86.avx512.mask.andn.pd.128" => "__builtin_ia32_andnpd128_mask",
+ "llvm.x86.avx512.mask.andn.pd.256" => "__builtin_ia32_andnpd256_mask",
+ "llvm.x86.avx512.mask.andn.pd.512" => "__builtin_ia32_andnpd512_mask",
+ "llvm.x86.avx512.mask.andn.ps.128" => "__builtin_ia32_andnps128_mask",
+ "llvm.x86.avx512.mask.andn.ps.256" => "__builtin_ia32_andnps256_mask",
+ "llvm.x86.avx512.mask.andn.ps.512" => "__builtin_ia32_andnps512_mask",
+ "llvm.x86.avx512.mask.blend.d.512" => "__builtin_ia32_blendmd_512_mask",
+ "llvm.x86.avx512.mask.blend.pd.512" => "__builtin_ia32_blendmpd_512_mask",
+ "llvm.x86.avx512.mask.blend.ps.512" => "__builtin_ia32_blendmps_512_mask",
+ "llvm.x86.avx512.mask.blend.q.512" => "__builtin_ia32_blendmq_512_mask",
+ "llvm.x86.avx512.mask.broadcastf32x2.256" => "__builtin_ia32_broadcastf32x2_256_mask",
+ "llvm.x86.avx512.mask.broadcastf32x2.512" => "__builtin_ia32_broadcastf32x2_512_mask",
+ "llvm.x86.avx512.mask.broadcastf32x4.256" => "__builtin_ia32_broadcastf32x4_256_mask",
+ "llvm.x86.avx512.mask.broadcastf32x4.512" => "__builtin_ia32_broadcastf32x4_512",
+ "llvm.x86.avx512.mask.broadcastf32x8.512" => "__builtin_ia32_broadcastf32x8_512_mask",
+ "llvm.x86.avx512.mask.broadcastf64x2.256" => "__builtin_ia32_broadcastf64x2_256_mask",
+ "llvm.x86.avx512.mask.broadcastf64x2.512" => "__builtin_ia32_broadcastf64x2_512_mask",
+ "llvm.x86.avx512.mask.broadcastf64x4.512" => "__builtin_ia32_broadcastf64x4_512",
+ "llvm.x86.avx512.mask.broadcasti32x2.128" => "__builtin_ia32_broadcasti32x2_128_mask",
+ "llvm.x86.avx512.mask.broadcasti32x2.256" => "__builtin_ia32_broadcasti32x2_256_mask",
+ "llvm.x86.avx512.mask.broadcasti32x2.512" => "__builtin_ia32_broadcasti32x2_512_mask",
+ "llvm.x86.avx512.mask.broadcasti32x4.256" => "__builtin_ia32_broadcasti32x4_256_mask",
+ "llvm.x86.avx512.mask.broadcasti32x4.512" => "__builtin_ia32_broadcasti32x4_512",
+ "llvm.x86.avx512.mask.broadcasti32x8.512" => "__builtin_ia32_broadcasti32x8_512_mask",
+ "llvm.x86.avx512.mask.broadcasti64x2.256" => "__builtin_ia32_broadcasti64x2_256_mask",
+ "llvm.x86.avx512.mask.broadcasti64x2.512" => "__builtin_ia32_broadcasti64x2_512_mask",
+ "llvm.x86.avx512.mask.broadcasti64x4.512" => "__builtin_ia32_broadcasti64x4_512",
+ "llvm.x86.avx512.mask.cmp.pd.128" => "__builtin_ia32_cmppd128_mask",
+ "llvm.x86.avx512.mask.cmp.pd.256" => "__builtin_ia32_cmppd256_mask",
+ "llvm.x86.avx512.mask.cmp.pd.512" => "__builtin_ia32_cmppd512_mask",
+ "llvm.x86.avx512.mask.cmp.ps.128" => "__builtin_ia32_cmpps128_mask",
+ "llvm.x86.avx512.mask.cmp.ps.256" => "__builtin_ia32_cmpps256_mask",
+ "llvm.x86.avx512.mask.cmp.ps.512" => "__builtin_ia32_cmpps512_mask",
+ "llvm.x86.avx512.mask.cmp.sd" => "__builtin_ia32_cmpsd_mask",
+ "llvm.x86.avx512.mask.cmp.ss" => "__builtin_ia32_cmpss_mask",
+ "llvm.x86.avx512.mask.compress.d.128" => "__builtin_ia32_compresssi128_mask",
+ "llvm.x86.avx512.mask.compress.d.256" => "__builtin_ia32_compresssi256_mask",
+ "llvm.x86.avx512.mask.compress.d.512" => "__builtin_ia32_compresssi512_mask",
+ "llvm.x86.avx512.mask.compress.pd.128" => "__builtin_ia32_compressdf128_mask",
+ "llvm.x86.avx512.mask.compress.pd.256" => "__builtin_ia32_compressdf256_mask",
+ "llvm.x86.avx512.mask.compress.pd.512" => "__builtin_ia32_compressdf512_mask",
+ "llvm.x86.avx512.mask.compress.ps.128" => "__builtin_ia32_compresssf128_mask",
+ "llvm.x86.avx512.mask.compress.ps.256" => "__builtin_ia32_compresssf256_mask",
+ "llvm.x86.avx512.mask.compress.ps.512" => "__builtin_ia32_compresssf512_mask",
+ "llvm.x86.avx512.mask.compress.q.128" => "__builtin_ia32_compressdi128_mask",
+ "llvm.x86.avx512.mask.compress.q.256" => "__builtin_ia32_compressdi256_mask",
+ "llvm.x86.avx512.mask.compress.q.512" => "__builtin_ia32_compressdi512_mask",
+ "llvm.x86.avx512.mask.compress.store.d.128" => "__builtin_ia32_compressstoresi128_mask",
+ "llvm.x86.avx512.mask.compress.store.d.256" => "__builtin_ia32_compressstoresi256_mask",
+ "llvm.x86.avx512.mask.compress.store.d.512" => "__builtin_ia32_compressstoresi512_mask",
+ "llvm.x86.avx512.mask.compress.store.pd.128" => "__builtin_ia32_compressstoredf128_mask",
+ "llvm.x86.avx512.mask.compress.store.pd.256" => "__builtin_ia32_compressstoredf256_mask",
+ "llvm.x86.avx512.mask.compress.store.pd.512" => "__builtin_ia32_compressstoredf512_mask",
+ "llvm.x86.avx512.mask.compress.store.ps.128" => "__builtin_ia32_compressstoresf128_mask",
+ "llvm.x86.avx512.mask.compress.store.ps.256" => "__builtin_ia32_compressstoresf256_mask",
+ "llvm.x86.avx512.mask.compress.store.ps.512" => "__builtin_ia32_compressstoresf512_mask",
+ "llvm.x86.avx512.mask.compress.store.q.128" => "__builtin_ia32_compressstoredi128_mask",
+ "llvm.x86.avx512.mask.compress.store.q.256" => "__builtin_ia32_compressstoredi256_mask",
+ "llvm.x86.avx512.mask.compress.store.q.512" => "__builtin_ia32_compressstoredi512_mask",
+ "llvm.x86.avx512.mask.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask",
+ "llvm.x86.avx512.mask.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask",
+ "llvm.x86.avx512.mask.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask",
+ "llvm.x86.avx512.mask.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask",
+ "llvm.x86.avx512.mask.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask",
+ "llvm.x86.avx512.mask.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask",
+ "llvm.x86.avx512.mask.cvtdq2pd.128" => "__builtin_ia32_cvtdq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtdq2pd.256" => "__builtin_ia32_cvtdq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtdq2pd.512" => "__builtin_ia32_cvtdq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtdq2ps.128" => "__builtin_ia32_cvtdq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtdq2ps.256" => "__builtin_ia32_cvtdq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtdq2ps.512" => "__builtin_ia32_cvtdq2ps512_mask",
+ "llvm.x86.avx512.mask.cvtpd2dq.128" => "__builtin_ia32_cvtpd2dq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2dq.256" => "__builtin_ia32_cvtpd2dq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2dq.512" => "__builtin_ia32_cvtpd2dq512_mask",
+ "llvm.x86.avx512.mask.cvtpd2ps" => "__builtin_ia32_cvtpd2ps_mask",
+ "llvm.x86.avx512.mask.cvtpd2ps.256" => "__builtin_ia32_cvtpd2ps256_mask",
+ "llvm.x86.avx512.mask.cvtpd2ps.512" => "__builtin_ia32_cvtpd2ps512_mask",
+ "llvm.x86.avx512.mask.cvtpd2qq.128" => "__builtin_ia32_cvtpd2qq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2qq.256" => "__builtin_ia32_cvtpd2qq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2qq.512" => "__builtin_ia32_cvtpd2qq512_mask",
+ "llvm.x86.avx512.mask.cvtpd2udq.128" => "__builtin_ia32_cvtpd2udq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2udq.256" => "__builtin_ia32_cvtpd2udq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2udq.512" => "__builtin_ia32_cvtpd2udq512_mask",
+ "llvm.x86.avx512.mask.cvtpd2uqq.128" => "__builtin_ia32_cvtpd2uqq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2uqq.256" => "__builtin_ia32_cvtpd2uqq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2uqq.512" => "__builtin_ia32_cvtpd2uqq512_mask",
+ "llvm.x86.avx512.mask.cvtps2dq.128" => "__builtin_ia32_cvtps2dq128_mask",
+ "llvm.x86.avx512.mask.cvtps2dq.256" => "__builtin_ia32_cvtps2dq256_mask",
+ "llvm.x86.avx512.mask.cvtps2dq.512" => "__builtin_ia32_cvtps2dq512_mask",
+ "llvm.x86.avx512.mask.cvtps2pd.128" => "__builtin_ia32_cvtps2pd128_mask",
+ "llvm.x86.avx512.mask.cvtps2pd.256" => "__builtin_ia32_cvtps2pd256_mask",
+ "llvm.x86.avx512.mask.cvtps2pd.512" => "__builtin_ia32_cvtps2pd512_mask",
+ "llvm.x86.avx512.mask.cvtps2qq.128" => "__builtin_ia32_cvtps2qq128_mask",
+ "llvm.x86.avx512.mask.cvtps2qq.256" => "__builtin_ia32_cvtps2qq256_mask",
+ "llvm.x86.avx512.mask.cvtps2qq.512" => "__builtin_ia32_cvtps2qq512_mask",
+ "llvm.x86.avx512.mask.cvtps2udq.128" => "__builtin_ia32_cvtps2udq128_mask",
+ "llvm.x86.avx512.mask.cvtps2udq.256" => "__builtin_ia32_cvtps2udq256_mask",
+ "llvm.x86.avx512.mask.cvtps2udq.512" => "__builtin_ia32_cvtps2udq512_mask",
+ "llvm.x86.avx512.mask.cvtps2uqq.128" => "__builtin_ia32_cvtps2uqq128_mask",
+ "llvm.x86.avx512.mask.cvtps2uqq.256" => "__builtin_ia32_cvtps2uqq256_mask",
+ "llvm.x86.avx512.mask.cvtps2uqq.512" => "__builtin_ia32_cvtps2uqq512_mask",
+ "llvm.x86.avx512.mask.cvtqq2pd.128" => "__builtin_ia32_cvtqq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtqq2pd.256" => "__builtin_ia32_cvtqq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtqq2pd.512" => "__builtin_ia32_cvtqq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtqq2ps.128" => "__builtin_ia32_cvtqq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtqq2ps.256" => "__builtin_ia32_cvtqq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtqq2ps.512" => "__builtin_ia32_cvtqq2ps512_mask",
+ "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask",
+ "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask",
+ "llvm.x86.avx512.mask.cvttpd2dq.128" => "__builtin_ia32_cvttpd2dq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2dq.256" => "__builtin_ia32_cvttpd2dq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2dq.512" => "__builtin_ia32_cvttpd2dq512_mask",
+ "llvm.x86.avx512.mask.cvttpd2qq.128" => "__builtin_ia32_cvttpd2qq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2qq.256" => "__builtin_ia32_cvttpd2qq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2qq.512" => "__builtin_ia32_cvttpd2qq512_mask",
+ "llvm.x86.avx512.mask.cvttpd2udq.128" => "__builtin_ia32_cvttpd2udq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2udq.256" => "__builtin_ia32_cvttpd2udq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2udq.512" => "__builtin_ia32_cvttpd2udq512_mask",
+ "llvm.x86.avx512.mask.cvttpd2uqq.128" => "__builtin_ia32_cvttpd2uqq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2uqq.256" => "__builtin_ia32_cvttpd2uqq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2uqq.512" => "__builtin_ia32_cvttpd2uqq512_mask",
+ "llvm.x86.avx512.mask.cvttps2dq.128" => "__builtin_ia32_cvttps2dq128_mask",
+ "llvm.x86.avx512.mask.cvttps2dq.256" => "__builtin_ia32_cvttps2dq256_mask",
+ "llvm.x86.avx512.mask.cvttps2dq.512" => "__builtin_ia32_cvttps2dq512_mask",
+ "llvm.x86.avx512.mask.cvttps2qq.128" => "__builtin_ia32_cvttps2qq128_mask",
+ "llvm.x86.avx512.mask.cvttps2qq.256" => "__builtin_ia32_cvttps2qq256_mask",
+ "llvm.x86.avx512.mask.cvttps2qq.512" => "__builtin_ia32_cvttps2qq512_mask",
+ "llvm.x86.avx512.mask.cvttps2udq.128" => "__builtin_ia32_cvttps2udq128_mask",
+ "llvm.x86.avx512.mask.cvttps2udq.256" => "__builtin_ia32_cvttps2udq256_mask",
+ "llvm.x86.avx512.mask.cvttps2udq.512" => "__builtin_ia32_cvttps2udq512_mask",
+ "llvm.x86.avx512.mask.cvttps2uqq.128" => "__builtin_ia32_cvttps2uqq128_mask",
+ "llvm.x86.avx512.mask.cvttps2uqq.256" => "__builtin_ia32_cvttps2uqq256_mask",
+ "llvm.x86.avx512.mask.cvttps2uqq.512" => "__builtin_ia32_cvttps2uqq512_mask",
+ "llvm.x86.avx512.mask.cvtudq2pd.128" => "__builtin_ia32_cvtudq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtudq2pd.256" => "__builtin_ia32_cvtudq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtudq2pd.512" => "__builtin_ia32_cvtudq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtudq2ps.128" => "__builtin_ia32_cvtudq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtudq2ps.256" => "__builtin_ia32_cvtudq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtudq2ps.512" => "__builtin_ia32_cvtudq2ps512_mask",
+ "llvm.x86.avx512.mask.cvtuqq2pd.128" => "__builtin_ia32_cvtuqq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtuqq2pd.256" => "__builtin_ia32_cvtuqq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtuqq2pd.512" => "__builtin_ia32_cvtuqq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtuqq2ps.128" => "__builtin_ia32_cvtuqq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtuqq2ps.256" => "__builtin_ia32_cvtuqq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtuqq2ps.512" => "__builtin_ia32_cvtuqq2ps512_mask",
+ "llvm.x86.avx512.mask.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask",
+ "llvm.x86.avx512.mask.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask",
+ "llvm.x86.avx512.mask.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask",
+ "llvm.x86.avx512.mask.div.pd.128" => "__builtin_ia32_divpd_mask",
+ "llvm.x86.avx512.mask.div.pd.256" => "__builtin_ia32_divpd256_mask",
+ "llvm.x86.avx512.mask.div.pd.512" => "__builtin_ia32_divpd512_mask",
+ "llvm.x86.avx512.mask.div.ps.128" => "__builtin_ia32_divps_mask",
+ "llvm.x86.avx512.mask.div.ps.256" => "__builtin_ia32_divps256_mask",
+ "llvm.x86.avx512.mask.div.ps.512" => "__builtin_ia32_divps512_mask",
+ "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask",
+ "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask",
+ "llvm.x86.avx512.mask.expand.d.128" => "__builtin_ia32_expandsi128_mask",
+ "llvm.x86.avx512.mask.expand.d.256" => "__builtin_ia32_expandsi256_mask",
+ "llvm.x86.avx512.mask.expand.d.512" => "__builtin_ia32_expandsi512_mask",
+ "llvm.x86.avx512.mask.expand.load.d.128" => "__builtin_ia32_expandloadsi128_mask",
+ "llvm.x86.avx512.mask.expand.load.d.256" => "__builtin_ia32_expandloadsi256_mask",
+ "llvm.x86.avx512.mask.expand.load.d.512" => "__builtin_ia32_expandloadsi512_mask",
+ "llvm.x86.avx512.mask.expand.load.pd.128" => "__builtin_ia32_expandloaddf128_mask",
+ "llvm.x86.avx512.mask.expand.load.pd.256" => "__builtin_ia32_expandloaddf256_mask",
+ "llvm.x86.avx512.mask.expand.load.pd.512" => "__builtin_ia32_expandloaddf512_mask",
+ "llvm.x86.avx512.mask.expand.load.ps.128" => "__builtin_ia32_expandloadsf128_mask",
+ "llvm.x86.avx512.mask.expand.load.ps.256" => "__builtin_ia32_expandloadsf256_mask",
+ "llvm.x86.avx512.mask.expand.load.ps.512" => "__builtin_ia32_expandloadsf512_mask",
+ "llvm.x86.avx512.mask.expand.load.q.128" => "__builtin_ia32_expandloaddi128_mask",
+ "llvm.x86.avx512.mask.expand.load.q.256" => "__builtin_ia32_expandloaddi256_mask",
+ "llvm.x86.avx512.mask.expand.load.q.512" => "__builtin_ia32_expandloaddi512_mask",
+ "llvm.x86.avx512.mask.expand.pd.128" => "__builtin_ia32_expanddf128_mask",
+ "llvm.x86.avx512.mask.expand.pd.256" => "__builtin_ia32_expanddf256_mask",
+ "llvm.x86.avx512.mask.expand.pd.512" => "__builtin_ia32_expanddf512_mask",
+ "llvm.x86.avx512.mask.expand.ps.128" => "__builtin_ia32_expandsf128_mask",
+ "llvm.x86.avx512.mask.expand.ps.256" => "__builtin_ia32_expandsf256_mask",
+ "llvm.x86.avx512.mask.expand.ps.512" => "__builtin_ia32_expandsf512_mask",
+ "llvm.x86.avx512.mask.expand.q.128" => "__builtin_ia32_expanddi128_mask",
+ "llvm.x86.avx512.mask.expand.q.256" => "__builtin_ia32_expanddi256_mask",
+ "llvm.x86.avx512.mask.expand.q.512" => "__builtin_ia32_expanddi512_mask",
+ "llvm.x86.avx512.mask.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_mask",
+ "llvm.x86.avx512.mask.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_mask",
+ "llvm.x86.avx512.mask.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_mask",
+ "llvm.x86.avx512.mask.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_mask",
+ "llvm.x86.avx512.mask.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_mask",
+ "llvm.x86.avx512.mask.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_mask",
+ "llvm.x86.avx512.mask.fixupimm.sd" => "__builtin_ia32_fixupimmsd_mask",
+ "llvm.x86.avx512.mask.fixupimm.ss" => "__builtin_ia32_fixupimmss_mask",
+ "llvm.x86.avx512.mask.fpclass.pd.128" => "__builtin_ia32_fpclasspd128_mask",
+ "llvm.x86.avx512.mask.fpclass.pd.256" => "__builtin_ia32_fpclasspd256_mask",
+ "llvm.x86.avx512.mask.fpclass.pd.512" => "__builtin_ia32_fpclasspd512_mask",
+ "llvm.x86.avx512.mask.fpclass.ps.128" => "__builtin_ia32_fpclassps128_mask",
+ "llvm.x86.avx512.mask.fpclass.ps.256" => "__builtin_ia32_fpclassps256_mask",
+ "llvm.x86.avx512.mask.fpclass.ps.512" => "__builtin_ia32_fpclassps512_mask",
+ "llvm.x86.avx512.mask.fpclass.sd" => "__builtin_ia32_fpclasssd_mask",
+ "llvm.x86.avx512.mask.fpclass.ss" => "__builtin_ia32_fpclassss_mask",
+ "llvm.x86.avx512.mask.getexp.pd.128" => "__builtin_ia32_getexppd128_mask",
+ "llvm.x86.avx512.mask.getexp.pd.256" => "__builtin_ia32_getexppd256_mask",
+ "llvm.x86.avx512.mask.getexp.pd.512" => "__builtin_ia32_getexppd512_mask",
+ "llvm.x86.avx512.mask.getexp.ps.128" => "__builtin_ia32_getexpps128_mask",
+ "llvm.x86.avx512.mask.getexp.ps.256" => "__builtin_ia32_getexpps256_mask",
+ "llvm.x86.avx512.mask.getexp.ps.512" => "__builtin_ia32_getexpps512_mask",
+ "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask",
+ "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask",
+ "llvm.x86.avx512.mask.getmant.pd.128" => "__builtin_ia32_getmantpd128_mask",
+ "llvm.x86.avx512.mask.getmant.pd.256" => "__builtin_ia32_getmantpd256_mask",
+ "llvm.x86.avx512.mask.getmant.pd.512" => "__builtin_ia32_getmantpd512_mask",
+ "llvm.x86.avx512.mask.getmant.ps.128" => "__builtin_ia32_getmantps128_mask",
+ "llvm.x86.avx512.mask.getmant.ps.256" => "__builtin_ia32_getmantps256_mask",
+ "llvm.x86.avx512.mask.getmant.ps.512" => "__builtin_ia32_getmantps512_mask",
+ "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask",
+ "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask",
+ "llvm.x86.avx512.mask.insertf32x4.256" => "__builtin_ia32_insertf32x4_256_mask",
+ "llvm.x86.avx512.mask.insertf32x4.512" => "__builtin_ia32_insertf32x4_mask",
+ "llvm.x86.avx512.mask.insertf32x8.512" => "__builtin_ia32_insertf32x8_mask",
+ "llvm.x86.avx512.mask.insertf64x2.256" => "__builtin_ia32_insertf64x2_256_mask",
+ "llvm.x86.avx512.mask.insertf64x2.512" => "__builtin_ia32_insertf64x2_512_mask",
+ "llvm.x86.avx512.mask.insertf64x4.512" => "__builtin_ia32_insertf64x4_mask",
+ "llvm.x86.avx512.mask.inserti32x4.256" => "__builtin_ia32_inserti32x4_256_mask",
+ "llvm.x86.avx512.mask.inserti32x4.512" => "__builtin_ia32_inserti32x4_mask",
+ "llvm.x86.avx512.mask.inserti32x8.512" => "__builtin_ia32_inserti32x8_mask",
+ "llvm.x86.avx512.mask.inserti64x2.256" => "__builtin_ia32_inserti64x2_256_mask",
+ "llvm.x86.avx512.mask.inserti64x2.512" => "__builtin_ia32_inserti64x2_512_mask",
+ "llvm.x86.avx512.mask.inserti64x4.512" => "__builtin_ia32_inserti64x4_mask",
+ "llvm.x86.avx512.mask.loadu.d.512" => "__builtin_ia32_loaddqusi512_mask",
+ "llvm.x86.avx512.mask.loadu.pd.512" => "__builtin_ia32_loadupd512_mask",
+ "llvm.x86.avx512.mask.loadu.ps.512" => "__builtin_ia32_loadups512_mask",
+ "llvm.x86.avx512.mask.loadu.q.512" => "__builtin_ia32_loaddqudi512_mask",
+ "llvm.x86.avx512.mask.lzcnt.d.512" => "__builtin_ia32_vplzcntd_512_mask",
+ "llvm.x86.avx512.mask.lzcnt.q.512" => "__builtin_ia32_vplzcntq_512_mask",
+ "llvm.x86.avx512.mask.max.pd.128" => "__builtin_ia32_maxpd_mask",
+ "llvm.x86.avx512.mask.max.pd.256" => "__builtin_ia32_maxpd256_mask",
+ "llvm.x86.avx512.mask.max.pd.512" => "__builtin_ia32_maxpd512_mask",
+ "llvm.x86.avx512.mask.max.ps.128" => "__builtin_ia32_maxps_mask",
+ "llvm.x86.avx512.mask.max.ps.256" => "__builtin_ia32_maxps256_mask",
+ "llvm.x86.avx512.mask.max.ps.512" => "__builtin_ia32_maxps512_mask",
+ "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask",
+ "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask",
+ "llvm.x86.avx512.mask.min.pd.128" => "__builtin_ia32_minpd_mask",
+ "llvm.x86.avx512.mask.min.pd.256" => "__builtin_ia32_minpd256_mask",
+ "llvm.x86.avx512.mask.min.pd.512" => "__builtin_ia32_minpd512_mask",
+ "llvm.x86.avx512.mask.min.ps.128" => "__builtin_ia32_minps_mask",
+ "llvm.x86.avx512.mask.min.ps.256" => "__builtin_ia32_minps256_mask",
+ "llvm.x86.avx512.mask.min.ps.512" => "__builtin_ia32_minps512_mask",
+ "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask",
+ "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask",
+ "llvm.x86.avx512.mask.move.sd" => "__builtin_ia32_movsd_mask",
+ "llvm.x86.avx512.mask.move.ss" => "__builtin_ia32_movss_mask",
+ "llvm.x86.avx512.mask.mul.pd.128" => "__builtin_ia32_mulpd_mask",
+ "llvm.x86.avx512.mask.mul.pd.256" => "__builtin_ia32_mulpd256_mask",
+ "llvm.x86.avx512.mask.mul.pd.512" => "__builtin_ia32_mulpd512_mask",
+ "llvm.x86.avx512.mask.mul.ps.128" => "__builtin_ia32_mulps_mask",
+ "llvm.x86.avx512.mask.mul.ps.256" => "__builtin_ia32_mulps256_mask",
+ "llvm.x86.avx512.mask.mul.ps.512" => "__builtin_ia32_mulps512_mask",
+ "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask",
+ "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask",
+ "llvm.x86.avx512.mask.or.pd.128" => "__builtin_ia32_orpd128_mask",
+ "llvm.x86.avx512.mask.or.pd.256" => "__builtin_ia32_orpd256_mask",
+ "llvm.x86.avx512.mask.or.pd.512" => "__builtin_ia32_orpd512_mask",
+ "llvm.x86.avx512.mask.or.ps.128" => "__builtin_ia32_orps128_mask",
+ "llvm.x86.avx512.mask.or.ps.256" => "__builtin_ia32_orps256_mask",
+ "llvm.x86.avx512.mask.or.ps.512" => "__builtin_ia32_orps512_mask",
+ "llvm.x86.avx512.mask.pabs.b.128" => "__builtin_ia32_pabsb128_mask",
+ "llvm.x86.avx512.mask.pabs.b.256" => "__builtin_ia32_pabsb256_mask",
+ "llvm.x86.avx512.mask.pabs.b.512" => "__builtin_ia32_pabsb512_mask",
+ "llvm.x86.avx512.mask.pabs.d.128" => "__builtin_ia32_pabsd128_mask",
+ "llvm.x86.avx512.mask.pabs.d.256" => "__builtin_ia32_pabsd256_mask",
+ "llvm.x86.avx512.mask.pabs.d.512" => "__builtin_ia32_pabsd512_mask",
+ "llvm.x86.avx512.mask.pabs.q.128" => "__builtin_ia32_pabsq128_mask",
+ "llvm.x86.avx512.mask.pabs.q.256" => "__builtin_ia32_pabsq256_mask",
+ "llvm.x86.avx512.mask.pabs.q.512" => "__builtin_ia32_pabsq512_mask",
+ "llvm.x86.avx512.mask.pabs.w.128" => "__builtin_ia32_pabsw128_mask",
+ "llvm.x86.avx512.mask.pabs.w.256" => "__builtin_ia32_pabsw256_mask",
+ "llvm.x86.avx512.mask.pabs.w.512" => "__builtin_ia32_pabsw512_mask",
+ "llvm.x86.avx512.mask.packssdw.128" => "__builtin_ia32_packssdw128_mask",
+ "llvm.x86.avx512.mask.packssdw.256" => "__builtin_ia32_packssdw256_mask",
+ "llvm.x86.avx512.mask.packssdw.512" => "__builtin_ia32_packssdw512_mask",
+ "llvm.x86.avx512.mask.packsswb.128" => "__builtin_ia32_packsswb128_mask",
+ "llvm.x86.avx512.mask.packsswb.256" => "__builtin_ia32_packsswb256_mask",
+ "llvm.x86.avx512.mask.packsswb.512" => "__builtin_ia32_packsswb512_mask",
+ "llvm.x86.avx512.mask.packusdw.128" => "__builtin_ia32_packusdw128_mask",
+ "llvm.x86.avx512.mask.packusdw.256" => "__builtin_ia32_packusdw256_mask",
+ "llvm.x86.avx512.mask.packusdw.512" => "__builtin_ia32_packusdw512_mask",
+ "llvm.x86.avx512.mask.packuswb.128" => "__builtin_ia32_packuswb128_mask",
+ "llvm.x86.avx512.mask.packuswb.256" => "__builtin_ia32_packuswb256_mask",
+ "llvm.x86.avx512.mask.packuswb.512" => "__builtin_ia32_packuswb512_mask",
+ "llvm.x86.avx512.mask.padd.b.128" => "__builtin_ia32_paddb128_mask",
+ "llvm.x86.avx512.mask.padd.b.256" => "__builtin_ia32_paddb256_mask",
+ "llvm.x86.avx512.mask.padd.b.512" => "__builtin_ia32_paddb512_mask",
+ "llvm.x86.avx512.mask.padd.d.128" => "__builtin_ia32_paddd128_mask",
+ "llvm.x86.avx512.mask.padd.d.256" => "__builtin_ia32_paddd256_mask",
+ "llvm.x86.avx512.mask.padd.d.512" => "__builtin_ia32_paddd512_mask",
+ "llvm.x86.avx512.mask.padd.q.128" => "__builtin_ia32_paddq128_mask",
+ "llvm.x86.avx512.mask.padd.q.256" => "__builtin_ia32_paddq256_mask",
+ "llvm.x86.avx512.mask.padd.q.512" => "__builtin_ia32_paddq512_mask",
+ "llvm.x86.avx512.mask.padd.w.128" => "__builtin_ia32_paddw128_mask",
+ "llvm.x86.avx512.mask.padd.w.256" => "__builtin_ia32_paddw256_mask",
+ "llvm.x86.avx512.mask.padd.w.512" => "__builtin_ia32_paddw512_mask",
+ "llvm.x86.avx512.mask.padds.b.128" => "__builtin_ia32_paddsb128_mask",
+ "llvm.x86.avx512.mask.padds.b.256" => "__builtin_ia32_paddsb256_mask",
+ "llvm.x86.avx512.mask.padds.b.512" => "__builtin_ia32_paddsb512_mask",
+ "llvm.x86.avx512.mask.padds.w.128" => "__builtin_ia32_paddsw128_mask",
+ "llvm.x86.avx512.mask.padds.w.256" => "__builtin_ia32_paddsw256_mask",
+ "llvm.x86.avx512.mask.padds.w.512" => "__builtin_ia32_paddsw512_mask",
+ "llvm.x86.avx512.mask.paddus.b.128" => "__builtin_ia32_paddusb128_mask",
+ "llvm.x86.avx512.mask.paddus.b.256" => "__builtin_ia32_paddusb256_mask",
+ "llvm.x86.avx512.mask.paddus.b.512" => "__builtin_ia32_paddusb512_mask",
+ "llvm.x86.avx512.mask.paddus.w.128" => "__builtin_ia32_paddusw128_mask",
+ "llvm.x86.avx512.mask.paddus.w.256" => "__builtin_ia32_paddusw256_mask",
+ "llvm.x86.avx512.mask.paddus.w.512" => "__builtin_ia32_paddusw512_mask",
+ "llvm.x86.avx512.mask.pand.d.512" => "__builtin_ia32_pandd512_mask",
+ "llvm.x86.avx512.mask.pand.q.512" => "__builtin_ia32_pandq512_mask",
+ "llvm.x86.avx512.mask.pavg.b.128" => "__builtin_ia32_pavgb128_mask",
+ "llvm.x86.avx512.mask.pavg.b.256" => "__builtin_ia32_pavgb256_mask",
+ "llvm.x86.avx512.mask.pavg.b.512" => "__builtin_ia32_pavgb512_mask",
+ "llvm.x86.avx512.mask.pavg.w.128" => "__builtin_ia32_pavgw128_mask",
+ "llvm.x86.avx512.mask.pavg.w.256" => "__builtin_ia32_pavgw256_mask",
+ "llvm.x86.avx512.mask.pavg.w.512" => "__builtin_ia32_pavgw512_mask",
+ "llvm.x86.avx512.mask.pbroadcast.b.gpr.128" => "__builtin_ia32_pbroadcastb128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.b.gpr.256" => "__builtin_ia32_pbroadcastb256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.b.gpr.512" => "__builtin_ia32_pbroadcastb512_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.d.gpr.128" => "__builtin_ia32_pbroadcastd128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.d.gpr.256" => "__builtin_ia32_pbroadcastd256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.d.gpr.512" => "__builtin_ia32_pbroadcastd512_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.gpr.128" => "__builtin_ia32_pbroadcastq128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.gpr.256" => "__builtin_ia32_pbroadcastq256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.gpr.512" => "__builtin_ia32_pbroadcastq512_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.mem.512" => "__builtin_ia32_pbroadcastq512_mem_mask",
+ "llvm.x86.avx512.mask.pbroadcast.w.gpr.128" => "__builtin_ia32_pbroadcastw128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.w.gpr.256" => "__builtin_ia32_pbroadcastw256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.w.gpr.512" => "__builtin_ia32_pbroadcastw512_gpr_mask",
+ "llvm.x86.avx512.mask.pcmpeq.b.128" => "__builtin_ia32_pcmpeqb128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.b.256" => "__builtin_ia32_pcmpeqb256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.b.512" => "__builtin_ia32_pcmpeqb512_mask",
+ "llvm.x86.avx512.mask.pcmpeq.d.128" => "__builtin_ia32_pcmpeqd128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.d.256" => "__builtin_ia32_pcmpeqd256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.d.512" => "__builtin_ia32_pcmpeqd512_mask",
+ "llvm.x86.avx512.mask.pcmpeq.q.128" => "__builtin_ia32_pcmpeqq128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.q.256" => "__builtin_ia32_pcmpeqq256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.q.512" => "__builtin_ia32_pcmpeqq512_mask",
+ "llvm.x86.avx512.mask.pcmpeq.w.128" => "__builtin_ia32_pcmpeqw128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.w.256" => "__builtin_ia32_pcmpeqw256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.w.512" => "__builtin_ia32_pcmpeqw512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.b.128" => "__builtin_ia32_pcmpgtb128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.b.256" => "__builtin_ia32_pcmpgtb256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.b.512" => "__builtin_ia32_pcmpgtb512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.d.128" => "__builtin_ia32_pcmpgtd128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.d.256" => "__builtin_ia32_pcmpgtd256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.d.512" => "__builtin_ia32_pcmpgtd512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.q.128" => "__builtin_ia32_pcmpgtq128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.q.256" => "__builtin_ia32_pcmpgtq256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.q.512" => "__builtin_ia32_pcmpgtq512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.w.128" => "__builtin_ia32_pcmpgtw128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.w.256" => "__builtin_ia32_pcmpgtw256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.w.512" => "__builtin_ia32_pcmpgtw512_mask",
+ "llvm.x86.avx512.mask.permvar.df.256" => "__builtin_ia32_permvardf256_mask",
+ "llvm.x86.avx512.mask.permvar.df.512" => "__builtin_ia32_permvardf512_mask",
+ "llvm.x86.avx512.mask.permvar.di.256" => "__builtin_ia32_permvardi256_mask",
+ "llvm.x86.avx512.mask.permvar.di.512" => "__builtin_ia32_permvardi512_mask",
+ "llvm.x86.avx512.mask.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask",
+ "llvm.x86.avx512.mask.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask",
+ "llvm.x86.avx512.mask.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask",
+ "llvm.x86.avx512.mask.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask",
+ "llvm.x86.avx512.mask.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask",
+ "llvm.x86.avx512.mask.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask",
+ "llvm.x86.avx512.mask.permvar.sf.256" => "__builtin_ia32_permvarsf256_mask",
+ "llvm.x86.avx512.mask.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask",
+ "llvm.x86.avx512.mask.permvar.si.256" => "__builtin_ia32_permvarsi256_mask",
+ "llvm.x86.avx512.mask.permvar.si.512" => "__builtin_ia32_permvarsi512_mask",
+ "llvm.x86.avx512.mask.pmaddubs.w.128" => "__builtin_ia32_pmaddubsw128_mask",
+ "llvm.x86.avx512.mask.pmaddubs.w.256" => "__builtin_ia32_pmaddubsw256_mask",
+ "llvm.x86.avx512.mask.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask",
+ "llvm.x86.avx512.mask.pmaddw.d.128" => "__builtin_ia32_pmaddwd128_mask",
+ "llvm.x86.avx512.mask.pmaddw.d.256" => "__builtin_ia32_pmaddwd256_mask",
+ "llvm.x86.avx512.mask.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask",
+ "llvm.x86.avx512.mask.pmaxs.b.128" => "__builtin_ia32_pmaxsb128_mask",
+ "llvm.x86.avx512.mask.pmaxs.b.256" => "__builtin_ia32_pmaxsb256_mask",
+ "llvm.x86.avx512.mask.pmaxs.b.512" => "__builtin_ia32_pmaxsb512_mask",
+ "llvm.x86.avx512.mask.pmaxs.d.128" => "__builtin_ia32_pmaxsd128_mask",
+ "llvm.x86.avx512.mask.pmaxs.d.256" => "__builtin_ia32_pmaxsd256_mask",
+ "llvm.x86.avx512.mask.pmaxs.d.512" => "__builtin_ia32_pmaxsd512_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.512" => "__builtin_ia32_pmaxsq512_mask",
+ "llvm.x86.avx512.mask.pmaxs.w.128" => "__builtin_ia32_pmaxsw128_mask",
+ "llvm.x86.avx512.mask.pmaxs.w.256" => "__builtin_ia32_pmaxsw256_mask",
+ "llvm.x86.avx512.mask.pmaxs.w.512" => "__builtin_ia32_pmaxsw512_mask",
+ "llvm.x86.avx512.mask.pmaxu.b.128" => "__builtin_ia32_pmaxub128_mask",
+ "llvm.x86.avx512.mask.pmaxu.b.256" => "__builtin_ia32_pmaxub256_mask",
+ "llvm.x86.avx512.mask.pmaxu.b.512" => "__builtin_ia32_pmaxub512_mask",
+ "llvm.x86.avx512.mask.pmaxu.d.128" => "__builtin_ia32_pmaxud128_mask",
+ "llvm.x86.avx512.mask.pmaxu.d.256" => "__builtin_ia32_pmaxud256_mask",
+ "llvm.x86.avx512.mask.pmaxu.d.512" => "__builtin_ia32_pmaxud512_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.512" => "__builtin_ia32_pmaxuq512_mask",
+ "llvm.x86.avx512.mask.pmaxu.w.128" => "__builtin_ia32_pmaxuw128_mask",
+ "llvm.x86.avx512.mask.pmaxu.w.256" => "__builtin_ia32_pmaxuw256_mask",
+ "llvm.x86.avx512.mask.pmaxu.w.512" => "__builtin_ia32_pmaxuw512_mask",
+ "llvm.x86.avx512.mask.pmins.b.128" => "__builtin_ia32_pminsb128_mask",
+ "llvm.x86.avx512.mask.pmins.b.256" => "__builtin_ia32_pminsb256_mask",
+ "llvm.x86.avx512.mask.pmins.b.512" => "__builtin_ia32_pminsb512_mask",
+ "llvm.x86.avx512.mask.pmins.d.128" => "__builtin_ia32_pminsd128_mask",
+ "llvm.x86.avx512.mask.pmins.d.256" => "__builtin_ia32_pminsd256_mask",
+ "llvm.x86.avx512.mask.pmins.d.512" => "__builtin_ia32_pminsd512_mask",
+ "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask",
+ "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask",
+ "llvm.x86.avx512.mask.pmins.q.512" => "__builtin_ia32_pminsq512_mask",
+ "llvm.x86.avx512.mask.pmins.w.128" => "__builtin_ia32_pminsw128_mask",
+ "llvm.x86.avx512.mask.pmins.w.256" => "__builtin_ia32_pminsw256_mask",
+ "llvm.x86.avx512.mask.pmins.w.512" => "__builtin_ia32_pminsw512_mask",
+ "llvm.x86.avx512.mask.pminu.b.128" => "__builtin_ia32_pminub128_mask",
+ "llvm.x86.avx512.mask.pminu.b.256" => "__builtin_ia32_pminub256_mask",
+ "llvm.x86.avx512.mask.pminu.b.512" => "__builtin_ia32_pminub512_mask",
+ "llvm.x86.avx512.mask.pminu.d.128" => "__builtin_ia32_pminud128_mask",
+ "llvm.x86.avx512.mask.pminu.d.256" => "__builtin_ia32_pminud256_mask",
+ "llvm.x86.avx512.mask.pminu.d.512" => "__builtin_ia32_pminud512_mask",
+ "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask",
+ "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask",
+ "llvm.x86.avx512.mask.pminu.q.512" => "__builtin_ia32_pminuq512_mask",
+ "llvm.x86.avx512.mask.pminu.w.128" => "__builtin_ia32_pminuw128_mask",
+ "llvm.x86.avx512.mask.pminu.w.256" => "__builtin_ia32_pminuw256_mask",
+ "llvm.x86.avx512.mask.pminu.w.512" => "__builtin_ia32_pminuw512_mask",
+ "llvm.x86.avx512.mask.pmov.db.128" => "__builtin_ia32_pmovdb128_mask",
+ "llvm.x86.avx512.mask.pmov.db.256" => "__builtin_ia32_pmovdb256_mask",
+ "llvm.x86.avx512.mask.pmov.db.512" => "__builtin_ia32_pmovdb512_mask",
+ "llvm.x86.avx512.mask.pmov.db.mem.128" => "__builtin_ia32_pmovdb128mem_mask",
+ "llvm.x86.avx512.mask.pmov.db.mem.256" => "__builtin_ia32_pmovdb256mem_mask",
+ "llvm.x86.avx512.mask.pmov.db.mem.512" => "__builtin_ia32_pmovdb512mem_mask",
+ "llvm.x86.avx512.mask.pmov.dw.128" => "__builtin_ia32_pmovdw128_mask",
+ "llvm.x86.avx512.mask.pmov.dw.256" => "__builtin_ia32_pmovdw256_mask",
+ "llvm.x86.avx512.mask.pmov.dw.512" => "__builtin_ia32_pmovdw512_mask",
+ "llvm.x86.avx512.mask.pmov.dw.mem.128" => "__builtin_ia32_pmovdw128mem_mask",
+ "llvm.x86.avx512.mask.pmov.dw.mem.256" => "__builtin_ia32_pmovdw256mem_mask",
+ "llvm.x86.avx512.mask.pmov.dw.mem.512" => "__builtin_ia32_pmovdw512mem_mask",
+ "llvm.x86.avx512.mask.pmov.qb.128" => "__builtin_ia32_pmovqb128_mask",
+ "llvm.x86.avx512.mask.pmov.qb.256" => "__builtin_ia32_pmovqb256_mask",
+ "llvm.x86.avx512.mask.pmov.qb.512" => "__builtin_ia32_pmovqb512_mask",
+ "llvm.x86.avx512.mask.pmov.qb.mem.128" => "__builtin_ia32_pmovqb128mem_mask",
+ "llvm.x86.avx512.mask.pmov.qb.mem.256" => "__builtin_ia32_pmovqb256mem_mask",
+ "llvm.x86.avx512.mask.pmov.qb.mem.512" => "__builtin_ia32_pmovqb512mem_mask",
+ "llvm.x86.avx512.mask.pmov.qd.128" => "__builtin_ia32_pmovqd128_mask",
+ "llvm.x86.avx512.mask.pmov.qd.256" => "__builtin_ia32_pmovqd256_mask",
+ "llvm.x86.avx512.mask.pmov.qd.512" => "__builtin_ia32_pmovqd512_mask",
+ "llvm.x86.avx512.mask.pmov.qd.mem.128" => "__builtin_ia32_pmovqd128mem_mask",
+ "llvm.x86.avx512.mask.pmov.qd.mem.256" => "__builtin_ia32_pmovqd256mem_mask",
+ "llvm.x86.avx512.mask.pmov.qd.mem.512" => "__builtin_ia32_pmovqd512mem_mask",
+ "llvm.x86.avx512.mask.pmov.qw.128" => "__builtin_ia32_pmovqw128_mask",
+ "llvm.x86.avx512.mask.pmov.qw.256" => "__builtin_ia32_pmovqw256_mask",
+ "llvm.x86.avx512.mask.pmov.qw.512" => "__builtin_ia32_pmovqw512_mask",
+ "llvm.x86.avx512.mask.pmov.qw.mem.128" => "__builtin_ia32_pmovqw128mem_mask",
+ "llvm.x86.avx512.mask.pmov.qw.mem.256" => "__builtin_ia32_pmovqw256mem_mask",
+ "llvm.x86.avx512.mask.pmov.qw.mem.512" => "__builtin_ia32_pmovqw512mem_mask",
+ "llvm.x86.avx512.mask.pmov.wb.128" => "__builtin_ia32_pmovwb128_mask",
+ "llvm.x86.avx512.mask.pmov.wb.256" => "__builtin_ia32_pmovwb256_mask",
+ "llvm.x86.avx512.mask.pmov.wb.512" => "__builtin_ia32_pmovwb512_mask",
+ "llvm.x86.avx512.mask.pmov.wb.mem.128" => "__builtin_ia32_pmovwb128mem_mask",
+ "llvm.x86.avx512.mask.pmov.wb.mem.256" => "__builtin_ia32_pmovwb256mem_mask",
+ "llvm.x86.avx512.mask.pmov.wb.mem.512" => "__builtin_ia32_pmovwb512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.db.128" => "__builtin_ia32_pmovsdb128_mask",
+ "llvm.x86.avx512.mask.pmovs.db.256" => "__builtin_ia32_pmovsdb256_mask",
+ "llvm.x86.avx512.mask.pmovs.db.512" => "__builtin_ia32_pmovsdb512_mask",
+ "llvm.x86.avx512.mask.pmovs.db.mem.128" => "__builtin_ia32_pmovsdb128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.db.mem.256" => "__builtin_ia32_pmovsdb256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.db.mem.512" => "__builtin_ia32_pmovsdb512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.128" => "__builtin_ia32_pmovsdw128_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.256" => "__builtin_ia32_pmovsdw256_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.512" => "__builtin_ia32_pmovsdw512_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.mem.128" => "__builtin_ia32_pmovsdw128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.mem.256" => "__builtin_ia32_pmovsdw256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.mem.512" => "__builtin_ia32_pmovsdw512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.128" => "__builtin_ia32_pmovsqb128_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.256" => "__builtin_ia32_pmovsqb256_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.512" => "__builtin_ia32_pmovsqb512_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.mem.128" => "__builtin_ia32_pmovsqb128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.mem.256" => "__builtin_ia32_pmovsqb256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.mem.512" => "__builtin_ia32_pmovsqb512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.128" => "__builtin_ia32_pmovsqd128_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.256" => "__builtin_ia32_pmovsqd256_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.512" => "__builtin_ia32_pmovsqd512_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.mem.128" => "__builtin_ia32_pmovsqd128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.mem.256" => "__builtin_ia32_pmovsqd256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.mem.512" => "__builtin_ia32_pmovsqd512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.128" => "__builtin_ia32_pmovsqw128_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.256" => "__builtin_ia32_pmovsqw256_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.512" => "__builtin_ia32_pmovsqw512_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.mem.128" => "__builtin_ia32_pmovsqw128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.mem.256" => "__builtin_ia32_pmovsqw256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.mem.512" => "__builtin_ia32_pmovsqw512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.128" => "__builtin_ia32_pmovswb128_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.256" => "__builtin_ia32_pmovswb256_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.512" => "__builtin_ia32_pmovswb512_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.mem.128" => "__builtin_ia32_pmovswb128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.mem.256" => "__builtin_ia32_pmovswb256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.mem.512" => "__builtin_ia32_pmovswb512mem_mask",
+ "llvm.x86.avx512.mask.pmovsxb.d.128" => "__builtin_ia32_pmovsxbd128_mask",
+ "llvm.x86.avx512.mask.pmovsxb.d.256" => "__builtin_ia32_pmovsxbd256_mask",
+ "llvm.x86.avx512.mask.pmovsxb.d.512" => "__builtin_ia32_pmovsxbd512_mask",
+ "llvm.x86.avx512.mask.pmovsxb.q.128" => "__builtin_ia32_pmovsxbq128_mask",
+ "llvm.x86.avx512.mask.pmovsxb.q.256" => "__builtin_ia32_pmovsxbq256_mask",
+ "llvm.x86.avx512.mask.pmovsxb.q.512" => "__builtin_ia32_pmovsxbq512_mask",
+ "llvm.x86.avx512.mask.pmovsxb.w.128" => "__builtin_ia32_pmovsxbw128_mask",
+ "llvm.x86.avx512.mask.pmovsxb.w.256" => "__builtin_ia32_pmovsxbw256_mask",
+ "llvm.x86.avx512.mask.pmovsxb.w.512" => "__builtin_ia32_pmovsxbw512_mask",
+ "llvm.x86.avx512.mask.pmovsxd.q.128" => "__builtin_ia32_pmovsxdq128_mask",
+ "llvm.x86.avx512.mask.pmovsxd.q.256" => "__builtin_ia32_pmovsxdq256_mask",
+ "llvm.x86.avx512.mask.pmovsxd.q.512" => "__builtin_ia32_pmovsxdq512_mask",
+ "llvm.x86.avx512.mask.pmovsxw.d.128" => "__builtin_ia32_pmovsxwd128_mask",
+ "llvm.x86.avx512.mask.pmovsxw.d.256" => "__builtin_ia32_pmovsxwd256_mask",
+ "llvm.x86.avx512.mask.pmovsxw.d.512" => "__builtin_ia32_pmovsxwd512_mask",
+ "llvm.x86.avx512.mask.pmovsxw.q.128" => "__builtin_ia32_pmovsxwq128_mask",
+ "llvm.x86.avx512.mask.pmovsxw.q.256" => "__builtin_ia32_pmovsxwq256_mask",
+ "llvm.x86.avx512.mask.pmovsxw.q.512" => "__builtin_ia32_pmovsxwq512_mask",
+ "llvm.x86.avx512.mask.pmovus.db.128" => "__builtin_ia32_pmovusdb128_mask",
+ "llvm.x86.avx512.mask.pmovus.db.256" => "__builtin_ia32_pmovusdb256_mask",
+ "llvm.x86.avx512.mask.pmovus.db.512" => "__builtin_ia32_pmovusdb512_mask",
+ "llvm.x86.avx512.mask.pmovus.db.mem.128" => "__builtin_ia32_pmovusdb128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.db.mem.256" => "__builtin_ia32_pmovusdb256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.db.mem.512" => "__builtin_ia32_pmovusdb512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.128" => "__builtin_ia32_pmovusdw128_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.256" => "__builtin_ia32_pmovusdw256_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.512" => "__builtin_ia32_pmovusdw512_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.mem.128" => "__builtin_ia32_pmovusdw128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.mem.256" => "__builtin_ia32_pmovusdw256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.mem.512" => "__builtin_ia32_pmovusdw512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.128" => "__builtin_ia32_pmovusqb128_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.256" => "__builtin_ia32_pmovusqb256_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.512" => "__builtin_ia32_pmovusqb512_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.mem.128" => "__builtin_ia32_pmovusqb128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.mem.256" => "__builtin_ia32_pmovusqb256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.mem.512" => "__builtin_ia32_pmovusqb512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.128" => "__builtin_ia32_pmovusqd128_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.256" => "__builtin_ia32_pmovusqd256_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.512" => "__builtin_ia32_pmovusqd512_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.mem.128" => "__builtin_ia32_pmovusqd128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.mem.256" => "__builtin_ia32_pmovusqd256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.mem.512" => "__builtin_ia32_pmovusqd512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.128" => "__builtin_ia32_pmovusqw128_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.256" => "__builtin_ia32_pmovusqw256_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.512" => "__builtin_ia32_pmovusqw512_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.mem.128" => "__builtin_ia32_pmovusqw128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.mem.256" => "__builtin_ia32_pmovusqw256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.mem.512" => "__builtin_ia32_pmovusqw512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.128" => "__builtin_ia32_pmovuswb128_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.256" => "__builtin_ia32_pmovuswb256_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.512" => "__builtin_ia32_pmovuswb512_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.mem.128" => "__builtin_ia32_pmovuswb128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.mem.256" => "__builtin_ia32_pmovuswb256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.mem.512" => "__builtin_ia32_pmovuswb512mem_mask",
+ "llvm.x86.avx512.mask.pmovzxb.d.128" => "__builtin_ia32_pmovzxbd128_mask",
+ "llvm.x86.avx512.mask.pmovzxb.d.256" => "__builtin_ia32_pmovzxbd256_mask",
+ "llvm.x86.avx512.mask.pmovzxb.d.512" => "__builtin_ia32_pmovzxbd512_mask",
+ "llvm.x86.avx512.mask.pmovzxb.q.128" => "__builtin_ia32_pmovzxbq128_mask",
+ "llvm.x86.avx512.mask.pmovzxb.q.256" => "__builtin_ia32_pmovzxbq256_mask",
+ "llvm.x86.avx512.mask.pmovzxb.q.512" => "__builtin_ia32_pmovzxbq512_mask",
+ "llvm.x86.avx512.mask.pmovzxb.w.128" => "__builtin_ia32_pmovzxbw128_mask",
+ "llvm.x86.avx512.mask.pmovzxb.w.256" => "__builtin_ia32_pmovzxbw256_mask",
+ "llvm.x86.avx512.mask.pmovzxb.w.512" => "__builtin_ia32_pmovzxbw512_mask",
+ "llvm.x86.avx512.mask.pmovzxd.q.128" => "__builtin_ia32_pmovzxdq128_mask",
+ "llvm.x86.avx512.mask.pmovzxd.q.256" => "__builtin_ia32_pmovzxdq256_mask",
+ "llvm.x86.avx512.mask.pmovzxd.q.512" => "__builtin_ia32_pmovzxdq512_mask",
+ "llvm.x86.avx512.mask.pmovzxw.d.128" => "__builtin_ia32_pmovzxwd128_mask",
+ "llvm.x86.avx512.mask.pmovzxw.d.256" => "__builtin_ia32_pmovzxwd256_mask",
+ "llvm.x86.avx512.mask.pmovzxw.d.512" => "__builtin_ia32_pmovzxwd512_mask",
+ "llvm.x86.avx512.mask.pmovzxw.q.128" => "__builtin_ia32_pmovzxwq128_mask",
+ "llvm.x86.avx512.mask.pmovzxw.q.256" => "__builtin_ia32_pmovzxwq256_mask",
+ "llvm.x86.avx512.mask.pmovzxw.q.512" => "__builtin_ia32_pmovzxwq512_mask",
+ "llvm.x86.avx512.mask.pmul.dq.128" => "__builtin_ia32_pmuldq128_mask",
+ "llvm.x86.avx512.mask.pmul.dq.256" => "__builtin_ia32_pmuldq256_mask",
+ "llvm.x86.avx512.mask.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
+ "llvm.x86.avx512.mask.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128_mask",
+ "llvm.x86.avx512.mask.pmul.hr.sw.256" => "__builtin_ia32_pmulhrsw256_mask",
+ "llvm.x86.avx512.mask.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask",
+ "llvm.x86.avx512.mask.pmulh.w.128" => "__builtin_ia32_pmulhw128_mask",
+ "llvm.x86.avx512.mask.pmulh.w.256" => "__builtin_ia32_pmulhw256_mask",
+ "llvm.x86.avx512.mask.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask",
+ "llvm.x86.avx512.mask.pmulhu.w.128" => "__builtin_ia32_pmulhuw128_mask",
+ "llvm.x86.avx512.mask.pmulhu.w.256" => "__builtin_ia32_pmulhuw256_mask",
+ "llvm.x86.avx512.mask.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask",
+ "llvm.x86.avx512.mask.pmull.d.128" => "__builtin_ia32_pmulld128_mask",
+ "llvm.x86.avx512.mask.pmull.d.256" => "__builtin_ia32_pmulld256_mask",
+ "llvm.x86.avx512.mask.pmull.d.512" => "__builtin_ia32_pmulld512_mask",
+ "llvm.x86.avx512.mask.pmull.q.128" => "__builtin_ia32_pmullq128_mask",
+ "llvm.x86.avx512.mask.pmull.q.256" => "__builtin_ia32_pmullq256_mask",
+ "llvm.x86.avx512.mask.pmull.q.512" => "__builtin_ia32_pmullq512_mask",
+ "llvm.x86.avx512.mask.pmull.w.128" => "__builtin_ia32_pmullw128_mask",
+ "llvm.x86.avx512.mask.pmull.w.256" => "__builtin_ia32_pmullw256_mask",
+ "llvm.x86.avx512.mask.pmull.w.512" => "__builtin_ia32_pmullw512_mask",
+ "llvm.x86.avx512.mask.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask",
+ "llvm.x86.avx512.mask.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask",
+ "llvm.x86.avx512.mask.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask",
+ "llvm.x86.avx512.mask.pmulu.dq.128" => "__builtin_ia32_pmuludq128_mask",
+ "llvm.x86.avx512.mask.pmulu.dq.256" => "__builtin_ia32_pmuludq256_mask",
+ "llvm.x86.avx512.mask.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
+ "llvm.x86.avx512.mask.prol.d.128" => "__builtin_ia32_prold128_mask",
+ "llvm.x86.avx512.mask.prol.d.256" => "__builtin_ia32_prold256_mask",
+ "llvm.x86.avx512.mask.prol.d.512" => "__builtin_ia32_prold512_mask",
+ "llvm.x86.avx512.mask.prol.q.128" => "__builtin_ia32_prolq128_mask",
+ "llvm.x86.avx512.mask.prol.q.256" => "__builtin_ia32_prolq256_mask",
+ "llvm.x86.avx512.mask.prol.q.512" => "__builtin_ia32_prolq512_mask",
+ "llvm.x86.avx512.mask.prolv.d.128" => "__builtin_ia32_prolvd128_mask",
+ "llvm.x86.avx512.mask.prolv.d.256" => "__builtin_ia32_prolvd256_mask",
+ "llvm.x86.avx512.mask.prolv.d.512" => "__builtin_ia32_prolvd512_mask",
+ "llvm.x86.avx512.mask.prolv.q.128" => "__builtin_ia32_prolvq128_mask",
+ "llvm.x86.avx512.mask.prolv.q.256" => "__builtin_ia32_prolvq256_mask",
+ "llvm.x86.avx512.mask.prolv.q.512" => "__builtin_ia32_prolvq512_mask",
+ "llvm.x86.avx512.mask.pror.d.128" => "__builtin_ia32_prord128_mask",
+ "llvm.x86.avx512.mask.pror.d.256" => "__builtin_ia32_prord256_mask",
+ "llvm.x86.avx512.mask.pror.d.512" => "__builtin_ia32_prord512_mask",
+ "llvm.x86.avx512.mask.pror.q.128" => "__builtin_ia32_prorq128_mask",
+ "llvm.x86.avx512.mask.pror.q.256" => "__builtin_ia32_prorq256_mask",
+ "llvm.x86.avx512.mask.pror.q.512" => "__builtin_ia32_prorq512_mask",
+ "llvm.x86.avx512.mask.prorv.d.128" => "__builtin_ia32_prorvd128_mask",
+ "llvm.x86.avx512.mask.prorv.d.256" => "__builtin_ia32_prorvd256_mask",
+ "llvm.x86.avx512.mask.prorv.d.512" => "__builtin_ia32_prorvd512_mask",
+ "llvm.x86.avx512.mask.prorv.q.128" => "__builtin_ia32_prorvq128_mask",
+ "llvm.x86.avx512.mask.prorv.q.256" => "__builtin_ia32_prorvq256_mask",
+ "llvm.x86.avx512.mask.prorv.q.512" => "__builtin_ia32_prorvq512_mask",
+ "llvm.x86.avx512.mask.pshuf.b.128" => "__builtin_ia32_pshufb128_mask",
+ "llvm.x86.avx512.mask.pshuf.b.256" => "__builtin_ia32_pshufb256_mask",
+ "llvm.x86.avx512.mask.pshuf.b.512" => "__builtin_ia32_pshufb512_mask",
+ "llvm.x86.avx512.mask.psll.d" => "__builtin_ia32_pslld512_mask",
+ "llvm.x86.avx512.mask.psll.d.128" => "__builtin_ia32_pslld128_mask",
+ "llvm.x86.avx512.mask.psll.d.256" => "__builtin_ia32_pslld256_mask",
+ "llvm.x86.avx512.mask.psll.di.128" => "__builtin_ia32_pslldi128_mask",
+ "llvm.x86.avx512.mask.psll.di.256" => "__builtin_ia32_pslldi256_mask",
+ "llvm.x86.avx512.mask.psll.di.512" => "__builtin_ia32_pslldi512_mask",
+ "llvm.x86.avx512.mask.psll.q" => "__builtin_ia32_psllq512_mask",
+ "llvm.x86.avx512.mask.psll.q.128" => "__builtin_ia32_psllq128_mask",
+ "llvm.x86.avx512.mask.psll.q.256" => "__builtin_ia32_psllq256_mask",
+ "llvm.x86.avx512.mask.psll.qi.128" => "__builtin_ia32_psllqi128_mask",
+ "llvm.x86.avx512.mask.psll.qi.256" => "__builtin_ia32_psllqi256_mask",
+ "llvm.x86.avx512.mask.psll.qi.512" => "__builtin_ia32_psllqi512_mask",
+ "llvm.x86.avx512.mask.psll.w.128" => "__builtin_ia32_psllw128_mask",
+ "llvm.x86.avx512.mask.psll.w.256" => "__builtin_ia32_psllw256_mask",
+ "llvm.x86.avx512.mask.psll.w.512" => "__builtin_ia32_psllw512_mask",
+ "llvm.x86.avx512.mask.psll.wi.128" => "__builtin_ia32_psllwi128_mask",
+ "llvm.x86.avx512.mask.psll.wi.256" => "__builtin_ia32_psllwi256_mask",
+ "llvm.x86.avx512.mask.psll.wi.512" => "__builtin_ia32_psllwi512_mask",
+ "llvm.x86.avx512.mask.psllv.d" => "__builtin_ia32_psllv16si_mask",
+ "llvm.x86.avx512.mask.psllv.q" => "__builtin_ia32_psllv8di_mask",
+ "llvm.x86.avx512.mask.psllv16.hi" => "__builtin_ia32_psllv16hi_mask",
+ "llvm.x86.avx512.mask.psllv2.di" => "__builtin_ia32_psllv2di_mask",
+ "llvm.x86.avx512.mask.psllv32hi" => "__builtin_ia32_psllv32hi_mask",
+ "llvm.x86.avx512.mask.psllv4.di" => "__builtin_ia32_psllv4di_mask",
+ "llvm.x86.avx512.mask.psllv4.si" => "__builtin_ia32_psllv4si_mask",
+ "llvm.x86.avx512.mask.psllv8.hi" => "__builtin_ia32_psllv8hi_mask",
+ "llvm.x86.avx512.mask.psllv8.si" => "__builtin_ia32_psllv8si_mask",
+ "llvm.x86.avx512.mask.psra.d" => "__builtin_ia32_psrad512_mask",
+ "llvm.x86.avx512.mask.psra.d.128" => "__builtin_ia32_psrad128_mask",
+ "llvm.x86.avx512.mask.psra.d.256" => "__builtin_ia32_psrad256_mask",
+ "llvm.x86.avx512.mask.psra.di.128" => "__builtin_ia32_psradi128_mask",
+ "llvm.x86.avx512.mask.psra.di.256" => "__builtin_ia32_psradi256_mask",
+ "llvm.x86.avx512.mask.psra.di.512" => "__builtin_ia32_psradi512_mask",
+ "llvm.x86.avx512.mask.psra.q" => "__builtin_ia32_psraq512_mask",
+ "llvm.x86.avx512.mask.psra.q.128" => "__builtin_ia32_psraq128_mask",
+ "llvm.x86.avx512.mask.psra.q.256" => "__builtin_ia32_psraq256_mask",
+ "llvm.x86.avx512.mask.psra.qi.128" => "__builtin_ia32_psraqi128_mask",
+ "llvm.x86.avx512.mask.psra.qi.256" => "__builtin_ia32_psraqi256_mask",
+ "llvm.x86.avx512.mask.psra.qi.512" => "__builtin_ia32_psraqi512_mask",
+ "llvm.x86.avx512.mask.psra.w.128" => "__builtin_ia32_psraw128_mask",
+ "llvm.x86.avx512.mask.psra.w.256" => "__builtin_ia32_psraw256_mask",
+ "llvm.x86.avx512.mask.psra.w.512" => "__builtin_ia32_psraw512_mask",
+ "llvm.x86.avx512.mask.psra.wi.128" => "__builtin_ia32_psrawi128_mask",
+ "llvm.x86.avx512.mask.psra.wi.256" => "__builtin_ia32_psrawi256_mask",
+ "llvm.x86.avx512.mask.psra.wi.512" => "__builtin_ia32_psrawi512_mask",
+ "llvm.x86.avx512.mask.psrav.d" => "__builtin_ia32_psrav16si_mask",
+ "llvm.x86.avx512.mask.psrav.q" => "__builtin_ia32_psrav8di_mask",
+ "llvm.x86.avx512.mask.psrav.q.128" => "__builtin_ia32_psravq128_mask",
+ "llvm.x86.avx512.mask.psrav.q.256" => "__builtin_ia32_psravq256_mask",
+ "llvm.x86.avx512.mask.psrav16.hi" => "__builtin_ia32_psrav16hi_mask",
+ "llvm.x86.avx512.mask.psrav32.hi" => "__builtin_ia32_psrav32hi_mask",
+ "llvm.x86.avx512.mask.psrav4.si" => "__builtin_ia32_psrav4si_mask",
+ "llvm.x86.avx512.mask.psrav8.hi" => "__builtin_ia32_psrav8hi_mask",
+ "llvm.x86.avx512.mask.psrav8.si" => "__builtin_ia32_psrav8si_mask",
+ "llvm.x86.avx512.mask.psrl.d" => "__builtin_ia32_psrld512_mask",
+ "llvm.x86.avx512.mask.psrl.d.128" => "__builtin_ia32_psrld128_mask",
+ "llvm.x86.avx512.mask.psrl.d.256" => "__builtin_ia32_psrld256_mask",
+ "llvm.x86.avx512.mask.psrl.di.128" => "__builtin_ia32_psrldi128_mask",
+ "llvm.x86.avx512.mask.psrl.di.256" => "__builtin_ia32_psrldi256_mask",
+ "llvm.x86.avx512.mask.psrl.di.512" => "__builtin_ia32_psrldi512_mask",
+ "llvm.x86.avx512.mask.psrl.q" => "__builtin_ia32_psrlq512_mask",
+ "llvm.x86.avx512.mask.psrl.q.128" => "__builtin_ia32_psrlq128_mask",
+ "llvm.x86.avx512.mask.psrl.q.256" => "__builtin_ia32_psrlq256_mask",
+ "llvm.x86.avx512.mask.psrl.qi.128" => "__builtin_ia32_psrlqi128_mask",
+ "llvm.x86.avx512.mask.psrl.qi.256" => "__builtin_ia32_psrlqi256_mask",
+ "llvm.x86.avx512.mask.psrl.qi.512" => "__builtin_ia32_psrlqi512_mask",
+ "llvm.x86.avx512.mask.psrl.w.128" => "__builtin_ia32_psrlw128_mask",
+ "llvm.x86.avx512.mask.psrl.w.256" => "__builtin_ia32_psrlw256_mask",
+ "llvm.x86.avx512.mask.psrl.w.512" => "__builtin_ia32_psrlw512_mask",
+ "llvm.x86.avx512.mask.psrl.wi.128" => "__builtin_ia32_psrlwi128_mask",
+ "llvm.x86.avx512.mask.psrl.wi.256" => "__builtin_ia32_psrlwi256_mask",
+ "llvm.x86.avx512.mask.psrl.wi.512" => "__builtin_ia32_psrlwi512_mask",
+ "llvm.x86.avx512.mask.psrlv.d" => "__builtin_ia32_psrlv16si_mask",
+ "llvm.x86.avx512.mask.psrlv.q" => "__builtin_ia32_psrlv8di_mask",
+ "llvm.x86.avx512.mask.psrlv16.hi" => "__builtin_ia32_psrlv16hi_mask",
+ "llvm.x86.avx512.mask.psrlv2.di" => "__builtin_ia32_psrlv2di_mask",
+ "llvm.x86.avx512.mask.psrlv32hi" => "__builtin_ia32_psrlv32hi_mask",
+ "llvm.x86.avx512.mask.psrlv4.di" => "__builtin_ia32_psrlv4di_mask",
+ "llvm.x86.avx512.mask.psrlv4.si" => "__builtin_ia32_psrlv4si_mask",
+ "llvm.x86.avx512.mask.psrlv8.hi" => "__builtin_ia32_psrlv8hi_mask",
+ "llvm.x86.avx512.mask.psrlv8.si" => "__builtin_ia32_psrlv8si_mask",
+ "llvm.x86.avx512.mask.psub.b.128" => "__builtin_ia32_psubb128_mask",
+ "llvm.x86.avx512.mask.psub.b.256" => "__builtin_ia32_psubb256_mask",
+ "llvm.x86.avx512.mask.psub.b.512" => "__builtin_ia32_psubb512_mask",
+ "llvm.x86.avx512.mask.psub.d.128" => "__builtin_ia32_psubd128_mask",
+ "llvm.x86.avx512.mask.psub.d.256" => "__builtin_ia32_psubd256_mask",
+ "llvm.x86.avx512.mask.psub.d.512" => "__builtin_ia32_psubd512_mask",
+ "llvm.x86.avx512.mask.psub.q.128" => "__builtin_ia32_psubq128_mask",
+ "llvm.x86.avx512.mask.psub.q.256" => "__builtin_ia32_psubq256_mask",
+ "llvm.x86.avx512.mask.psub.q.512" => "__builtin_ia32_psubq512_mask",
+ "llvm.x86.avx512.mask.psub.w.128" => "__builtin_ia32_psubw128_mask",
+ "llvm.x86.avx512.mask.psub.w.256" => "__builtin_ia32_psubw256_mask",
+ "llvm.x86.avx512.mask.psub.w.512" => "__builtin_ia32_psubw512_mask",
+ "llvm.x86.avx512.mask.psubs.b.128" => "__builtin_ia32_psubsb128_mask",
+ "llvm.x86.avx512.mask.psubs.b.256" => "__builtin_ia32_psubsb256_mask",
+ "llvm.x86.avx512.mask.psubs.b.512" => "__builtin_ia32_psubsb512_mask",
+ "llvm.x86.avx512.mask.psubs.w.128" => "__builtin_ia32_psubsw128_mask",
+ "llvm.x86.avx512.mask.psubs.w.256" => "__builtin_ia32_psubsw256_mask",
+ "llvm.x86.avx512.mask.psubs.w.512" => "__builtin_ia32_psubsw512_mask",
+ "llvm.x86.avx512.mask.psubus.b.128" => "__builtin_ia32_psubusb128_mask",
+ "llvm.x86.avx512.mask.psubus.b.256" => "__builtin_ia32_psubusb256_mask",
+ "llvm.x86.avx512.mask.psubus.b.512" => "__builtin_ia32_psubusb512_mask",
+ "llvm.x86.avx512.mask.psubus.w.128" => "__builtin_ia32_psubusw128_mask",
+ "llvm.x86.avx512.mask.psubus.w.256" => "__builtin_ia32_psubusw256_mask",
+ "llvm.x86.avx512.mask.psubus.w.512" => "__builtin_ia32_psubusw512_mask",
+ "llvm.x86.avx512.mask.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask",
+ "llvm.x86.avx512.mask.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask",
+ "llvm.x86.avx512.mask.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask",
+ "llvm.x86.avx512.mask.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask",
+ "llvm.x86.avx512.mask.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask",
+ "llvm.x86.avx512.mask.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask",
+ "llvm.x86.avx512.mask.ptestm.d.512" => "__builtin_ia32_ptestmd512",
+ "llvm.x86.avx512.mask.ptestm.q.512" => "__builtin_ia32_ptestmq512",
+ "llvm.x86.avx512.mask.range.pd.128" => "__builtin_ia32_rangepd128_mask",
+ "llvm.x86.avx512.mask.range.pd.256" => "__builtin_ia32_rangepd256_mask",
+ "llvm.x86.avx512.mask.range.pd.512" => "__builtin_ia32_rangepd512_mask",
+ "llvm.x86.avx512.mask.range.ps.128" => "__builtin_ia32_rangeps128_mask",
+ "llvm.x86.avx512.mask.range.ps.256" => "__builtin_ia32_rangeps256_mask",
+ "llvm.x86.avx512.mask.range.ps.512" => "__builtin_ia32_rangeps512_mask",
+ "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask",
+ "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask",
+ "llvm.x86.avx512.mask.reduce.pd.128" => "__builtin_ia32_reducepd128_mask",
+ "llvm.x86.avx512.mask.reduce.pd.256" => "__builtin_ia32_reducepd256_mask",
+ "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask",
+ "llvm.x86.avx512.mask.reduce.ps.128" => "__builtin_ia32_reduceps128_mask",
+ "llvm.x86.avx512.mask.reduce.ps.256" => "__builtin_ia32_reduceps256_mask",
+ "llvm.x86.avx512.mask.reduce.ps.512" => "__builtin_ia32_reduceps512_mask",
+ "llvm.x86.avx512.mask.reduce.sd" => "__builtin_ia32_reducesd_mask",
+ "llvm.x86.avx512.mask.reduce.ss" => "__builtin_ia32_reducess_mask",
+ "llvm.x86.avx512.mask.rndscale.pd.128" => "__builtin_ia32_rndscalepd_128_mask",
+ "llvm.x86.avx512.mask.rndscale.pd.256" => "__builtin_ia32_rndscalepd_256_mask",
+ "llvm.x86.avx512.mask.rndscale.pd.512" => "__builtin_ia32_rndscalepd_mask",
+ "llvm.x86.avx512.mask.rndscale.ps.128" => "__builtin_ia32_rndscaleps_128_mask",
+ "llvm.x86.avx512.mask.rndscale.ps.256" => "__builtin_ia32_rndscaleps_256_mask",
+ "llvm.x86.avx512.mask.rndscale.ps.512" => "__builtin_ia32_rndscaleps_mask",
+ "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask",
+ "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask",
+ "llvm.x86.avx512.mask.scalef.pd.128" => "__builtin_ia32_scalefpd128_mask",
+ "llvm.x86.avx512.mask.scalef.pd.256" => "__builtin_ia32_scalefpd256_mask",
+ "llvm.x86.avx512.mask.scalef.pd.512" => "__builtin_ia32_scalefpd512_mask",
+ "llvm.x86.avx512.mask.scalef.ps.128" => "__builtin_ia32_scalefps128_mask",
+ "llvm.x86.avx512.mask.scalef.ps.256" => "__builtin_ia32_scalefps256_mask",
+ "llvm.x86.avx512.mask.scalef.ps.512" => "__builtin_ia32_scalefps512_mask",
+ "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask",
+ "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask",
+ "llvm.x86.avx512.mask.shuf.f32x4" => "__builtin_ia32_shuf_f32x4_mask",
+ "llvm.x86.avx512.mask.shuf.f32x4.256" => "__builtin_ia32_shuf_f32x4_256_mask",
+ "llvm.x86.avx512.mask.shuf.f64x2" => "__builtin_ia32_shuf_f64x2_mask",
+ "llvm.x86.avx512.mask.shuf.f64x2.256" => "__builtin_ia32_shuf_f64x2_256_mask",
+ "llvm.x86.avx512.mask.shuf.i32x4" => "__builtin_ia32_shuf_i32x4_mask",
+ "llvm.x86.avx512.mask.shuf.i32x4.256" => "__builtin_ia32_shuf_i32x4_256_mask",
+ "llvm.x86.avx512.mask.shuf.i64x2" => "__builtin_ia32_shuf_i64x2_mask",
+ "llvm.x86.avx512.mask.shuf.i64x2.256" => "__builtin_ia32_shuf_i64x2_256_mask",
+ "llvm.x86.avx512.mask.shuf.pd.128" => "__builtin_ia32_shufpd128_mask",
+ "llvm.x86.avx512.mask.shuf.pd.256" => "__builtin_ia32_shufpd256_mask",
+ "llvm.x86.avx512.mask.shuf.pd.512" => "__builtin_ia32_shufpd512_mask",
+ "llvm.x86.avx512.mask.shuf.ps.128" => "__builtin_ia32_shufps128_mask",
+ "llvm.x86.avx512.mask.shuf.ps.256" => "__builtin_ia32_shufps256_mask",
+ "llvm.x86.avx512.mask.shuf.ps.512" => "__builtin_ia32_shufps512_mask",
+ "llvm.x86.avx512.mask.sqrt.pd.128" => "__builtin_ia32_sqrtpd128_mask",
+ "llvm.x86.avx512.mask.sqrt.pd.256" => "__builtin_ia32_sqrtpd256_mask",
+ "llvm.x86.avx512.mask.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask",
+ "llvm.x86.avx512.mask.sqrt.ps.128" => "__builtin_ia32_sqrtps128_mask",
+ "llvm.x86.avx512.mask.sqrt.ps.256" => "__builtin_ia32_sqrtps256_mask",
+ "llvm.x86.avx512.mask.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask",
+ "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask",
+ "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask",
+ "llvm.x86.avx512.mask.store.ss" => "__builtin_ia32_storess_mask",
+ "llvm.x86.avx512.mask.storeu.d.512" => "__builtin_ia32_storedqusi512_mask",
+ "llvm.x86.avx512.mask.storeu.pd.512" => "__builtin_ia32_storeupd512_mask",
+ "llvm.x86.avx512.mask.storeu.ps.512" => "__builtin_ia32_storeups512_mask",
+ "llvm.x86.avx512.mask.storeu.q.512" => "__builtin_ia32_storedqudi512_mask",
+ "llvm.x86.avx512.mask.sub.pd.128" => "__builtin_ia32_subpd128_mask",
+ "llvm.x86.avx512.mask.sub.pd.256" => "__builtin_ia32_subpd256_mask",
+ "llvm.x86.avx512.mask.sub.pd.512" => "__builtin_ia32_subpd512_mask",
+ "llvm.x86.avx512.mask.sub.ps.128" => "__builtin_ia32_subps128_mask",
+ "llvm.x86.avx512.mask.sub.ps.256" => "__builtin_ia32_subps256_mask",
+ "llvm.x86.avx512.mask.sub.ps.512" => "__builtin_ia32_subps512_mask",
+ "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask",
+ "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask",
+ "llvm.x86.avx512.mask.valign.d.128" => "__builtin_ia32_alignd128_mask",
+ "llvm.x86.avx512.mask.valign.d.256" => "__builtin_ia32_alignd256_mask",
+ "llvm.x86.avx512.mask.valign.d.512" => "__builtin_ia32_alignd512_mask",
+ "llvm.x86.avx512.mask.valign.q.128" => "__builtin_ia32_alignq128_mask",
+ "llvm.x86.avx512.mask.valign.q.256" => "__builtin_ia32_alignq256_mask",
+ "llvm.x86.avx512.mask.valign.q.512" => "__builtin_ia32_alignq512_mask",
+ "llvm.x86.avx512.mask.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps_mask",
+ "llvm.x86.avx512.mask.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256_mask",
+ "llvm.x86.avx512.mask.vcvtph2ps.512" => "__builtin_ia32_vcvtph2ps512_mask",
+ "llvm.x86.avx512.mask.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph_mask",
+ "llvm.x86.avx512.mask.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256_mask",
+ "llvm.x86.avx512.mask.vcvtps2ph.512" => "__builtin_ia32_vcvtps2ph512_mask",
+ "llvm.x86.avx512.mask.vextractf32x4.256" => "__builtin_ia32_extractf32x4_256_mask",
+ "llvm.x86.avx512.mask.vextractf32x4.512" => "__builtin_ia32_extractf32x4_mask",
+ "llvm.x86.avx512.mask.vextractf32x8.512" => "__builtin_ia32_extractf32x8_mask",
+ "llvm.x86.avx512.mask.vextractf64x2.256" => "__builtin_ia32_extractf64x2_256_mask",
+ "llvm.x86.avx512.mask.vextractf64x2.512" => "__builtin_ia32_extractf64x2_512_mask",
+ "llvm.x86.avx512.mask.vextractf64x4.512" => "__builtin_ia32_extractf64x4_mask",
+ "llvm.x86.avx512.mask.vextracti32x4.256" => "__builtin_ia32_extracti32x4_256_mask",
+ "llvm.x86.avx512.mask.vextracti32x4.512" => "__builtin_ia32_extracti32x4_mask",
+ "llvm.x86.avx512.mask.vextracti32x8.512" => "__builtin_ia32_extracti32x8_mask",
+ "llvm.x86.avx512.mask.vextracti64x2.256" => "__builtin_ia32_extracti64x2_256_mask",
+ "llvm.x86.avx512.mask.vextracti64x2.512" => "__builtin_ia32_extracti64x2_512_mask",
+ "llvm.x86.avx512.mask.vextracti64x4.512" => "__builtin_ia32_extracti64x4_mask",
+ "llvm.x86.avx512.mask.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask",
+ "llvm.x86.avx512.mask.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask",
+ "llvm.x86.avx512.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+ "llvm.x86.avx512.mask.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask",
+ "llvm.x86.avx512.mask.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask",
+ "llvm.x86.avx512.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.x86.avx512.mask.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask",
+ "llvm.x86.avx512.mask.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+ "llvm.x86.avx512.mask.vfnmadd.pd.128" => "__builtin_ia32_vfnmaddpd128_mask",
+ "llvm.x86.avx512.mask.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256_mask",
+ "llvm.x86.avx512.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask",
+ "llvm.x86.avx512.mask.vfnmadd.ps.128" => "__builtin_ia32_vfnmaddps128_mask",
+ "llvm.x86.avx512.mask.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256_mask",
+ "llvm.x86.avx512.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask",
+ "llvm.x86.avx512.mask.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask",
+ "llvm.x86.avx512.mask.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask",
+ "llvm.x86.avx512.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask",
+ "llvm.x86.avx512.mask.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask",
+ "llvm.x86.avx512.mask.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask",
+ "llvm.x86.avx512.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512_mask",
+ "llvm.x86.avx512.mask.vpermilvar.pd.128" => "__builtin_ia32_vpermilvarpd_mask",
+ "llvm.x86.avx512.mask.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256_mask",
+ "llvm.x86.avx512.mask.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask",
+ "llvm.x86.avx512.mask.vpermilvar.ps.128" => "__builtin_ia32_vpermilvarps_mask",
+ "llvm.x86.avx512.mask.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256_mask",
+ "llvm.x86.avx512.mask.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask",
+ "llvm.x86.avx512.mask.vpermt.d.512" => "__builtin_ia32_vpermt2vard512_mask",
+ "llvm.x86.avx512.mask.vpermt.pd.512" => "__builtin_ia32_vpermt2varpd512_mask",
+ "llvm.x86.avx512.mask.vpermt.ps.512" => "__builtin_ia32_vpermt2varps512_mask",
+ "llvm.x86.avx512.mask.vpermt.q.512" => "__builtin_ia32_vpermt2varq512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask",
+ "llvm.x86.avx512.mask.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask",
+ "llvm.x86.avx512.mask.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask",
+ "llvm.x86.avx512.mask.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask",
+ "llvm.x86.avx512.mask.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_mask",
+ "llvm.x86.avx512.mask.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask",
+ "llvm.x86.avx512.mask.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask",
+ "llvm.x86.avx512.mask.xor.pd.128" => "__builtin_ia32_xorpd128_mask",
+ "llvm.x86.avx512.mask.xor.pd.256" => "__builtin_ia32_xorpd256_mask",
+ "llvm.x86.avx512.mask.xor.pd.512" => "__builtin_ia32_xorpd512_mask",
+ "llvm.x86.avx512.mask.xor.ps.128" => "__builtin_ia32_xorps128_mask",
+ "llvm.x86.avx512.mask.xor.ps.256" => "__builtin_ia32_xorps256_mask",
+ "llvm.x86.avx512.mask.xor.ps.512" => "__builtin_ia32_xorps512_mask",
+ "llvm.x86.avx512.mask3.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.pd.128" => "__builtin_ia32_vfmsubpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.ps.128" => "__builtin_ia32_vfmsubps128_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.pd.128" => "__builtin_ia32_vfmsubaddpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.ps.128" => "__builtin_ia32_vfmsubaddps128_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask3",
+ "llvm.x86.avx512.maskz.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.sd" => "__builtin_ia32_fixupimmsd_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ss" => "__builtin_ia32_fixupimmss_maskz",
+ "llvm.x86.avx512.maskz.pternlog.d.128" => "__builtin_ia32_pternlogd128_maskz",
+ "llvm.x86.avx512.maskz.pternlog.d.256" => "__builtin_ia32_pternlogd256_maskz",
+ "llvm.x86.avx512.maskz.pternlog.d.512" => "__builtin_ia32_pternlogd512_maskz",
+ "llvm.x86.avx512.maskz.pternlog.q.128" => "__builtin_ia32_pternlogq128_maskz",
+ "llvm.x86.avx512.maskz.pternlog.q.256" => "__builtin_ia32_pternlogq256_maskz",
+ "llvm.x86.avx512.maskz.pternlog.q.512" => "__builtin_ia32_pternlogq512_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ss" => "__builtin_ia32_vfmaddss3_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_maskz",
+ "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512",
+ "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512",
+ "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512",
+ "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512",
+ "llvm.x86.avx512.movntdqa" => "__builtin_ia32_movntdqa512",
+ "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512",
+ "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512",
+ "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512",
+ "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512",
+ "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512",
+ "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512",
+ "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512",
+ "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512",
+ "llvm.x86.avx512.pbroadcastd.512" => "__builtin_ia32_pbroadcastd512",
+ "llvm.x86.avx512.pbroadcastq.512" => "__builtin_ia32_pbroadcastq512",
+ "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256",
+ "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512",
+ "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256",
+ "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512",
+ "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128",
+ "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256",
+ "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512",
+ "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128",
+ "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256",
+ "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512",
+ "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512",
+ "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512",
+ "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512",
+ "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512",
+ "llvm.x86.avx512.pmovzxbd" => "__builtin_ia32_pmovzxbd512",
+ "llvm.x86.avx512.pmovzxbq" => "__builtin_ia32_pmovzxbq512",
+ "llvm.x86.avx512.pmovzxdq" => "__builtin_ia32_pmovzxdq512",
+ "llvm.x86.avx512.pmovzxwd" => "__builtin_ia32_pmovzxwd512",
+ "llvm.x86.avx512.pmovzxwq" => "__builtin_ia32_pmovzxwq512",
+ "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512",
+ "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512",
+ "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512",
+ "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128",
+ "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256",
+ "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512",
+ "llvm.x86.avx512.psad.bw.512" => "__builtin_ia32_psadbw512",
+ "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512",
+ "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512",
+ "llvm.x86.avx512.psll.dq" => "__builtin_ia32_pslldqi512",
+ "llvm.x86.avx512.psll.dq.bs" => "__builtin_ia32_pslldqi512_byteshift",
+ "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512",
+ "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512",
+ "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512",
+ "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512",
+ "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512",
+ "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si",
+ "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di",
+ "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi",
+ "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi",
+ "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi",
+ "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512",
+ "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128",
+ "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256",
+ "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512",
+ "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512",
+ "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512",
+ "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128",
+ "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256",
+ "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512",
+ "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512",
+ "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si",
+ "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128",
+ "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256",
+ "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di",
+ "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi",
+ "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi",
+ "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi",
+ "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512",
+ "llvm.x86.avx512.psrl.dq" => "__builtin_ia32_psrldqi512",
+ "llvm.x86.avx512.psrl.dq.bs" => "__builtin_ia32_psrldqi512_byteshift",
+ "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512",
+ "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512",
+ "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512",
+ "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512",
+ "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512",
+ "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si",
+ "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di",
+ "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi",
+ "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi",
+ "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi",
+ "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128",
+ "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256",
+ "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512",
+ "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128",
+ "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256",
+ "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512",
+ "llvm.x86.avx512.ptestm.b.128" => "__builtin_ia32_ptestmb128",
+ "llvm.x86.avx512.ptestm.b.256" => "__builtin_ia32_ptestmb256",
+ "llvm.x86.avx512.ptestm.b.512" => "__builtin_ia32_ptestmb512",
+ "llvm.x86.avx512.ptestm.d.128" => "__builtin_ia32_ptestmd128",
+ "llvm.x86.avx512.ptestm.d.256" => "__builtin_ia32_ptestmd256",
+ "llvm.x86.avx512.ptestm.d.512" => "__builtin_ia32_ptestmd512",
+ "llvm.x86.avx512.ptestm.q.128" => "__builtin_ia32_ptestmq128",
+ "llvm.x86.avx512.ptestm.q.256" => "__builtin_ia32_ptestmq256",
+ "llvm.x86.avx512.ptestm.q.512" => "__builtin_ia32_ptestmq512",
+ "llvm.x86.avx512.ptestm.w.128" => "__builtin_ia32_ptestmw128",
+ "llvm.x86.avx512.ptestm.w.256" => "__builtin_ia32_ptestmw256",
+ "llvm.x86.avx512.ptestm.w.512" => "__builtin_ia32_ptestmw512",
+ "llvm.x86.avx512.ptestnm.b.128" => "__builtin_ia32_ptestnmb128",
+ "llvm.x86.avx512.ptestnm.b.256" => "__builtin_ia32_ptestnmb256",
+ "llvm.x86.avx512.ptestnm.b.512" => "__builtin_ia32_ptestnmb512",
+ "llvm.x86.avx512.ptestnm.d.128" => "__builtin_ia32_ptestnmd128",
+ "llvm.x86.avx512.ptestnm.d.256" => "__builtin_ia32_ptestnmd256",
+ "llvm.x86.avx512.ptestnm.d.512" => "__builtin_ia32_ptestnmd512",
+ "llvm.x86.avx512.ptestnm.q.128" => "__builtin_ia32_ptestnmq128",
+ "llvm.x86.avx512.ptestnm.q.256" => "__builtin_ia32_ptestnmq256",
+ "llvm.x86.avx512.ptestnm.q.512" => "__builtin_ia32_ptestnmq512",
+ "llvm.x86.avx512.ptestnm.w.128" => "__builtin_ia32_ptestnmw128",
+ "llvm.x86.avx512.ptestnm.w.256" => "__builtin_ia32_ptestnmw256",
+ "llvm.x86.avx512.ptestnm.w.512" => "__builtin_ia32_ptestnmw512",
+ "llvm.x86.avx512.rcp14.pd.128" => "__builtin_ia32_rcp14pd128_mask",
+ "llvm.x86.avx512.rcp14.pd.256" => "__builtin_ia32_rcp14pd256_mask",
+ "llvm.x86.avx512.rcp14.pd.512" => "__builtin_ia32_rcp14pd512_mask",
+ "llvm.x86.avx512.rcp14.ps.128" => "__builtin_ia32_rcp14ps128_mask",
+ "llvm.x86.avx512.rcp14.ps.256" => "__builtin_ia32_rcp14ps256_mask",
+ "llvm.x86.avx512.rcp14.ps.512" => "__builtin_ia32_rcp14ps512_mask",
+ "llvm.x86.avx512.rcp14.sd" => "__builtin_ia32_rcp14sd_mask",
+ "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask",
+ "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask",
+ "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask",
+ "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask",
+ "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask",
+ "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd",
+ "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless",
+ "llvm.x86.avx512.rsqrt14.pd.128" => "__builtin_ia32_rsqrt14pd128_mask",
+ "llvm.x86.avx512.rsqrt14.pd.256" => "__builtin_ia32_rsqrt14pd256_mask",
+ "llvm.x86.avx512.rsqrt14.pd.512" => "__builtin_ia32_rsqrt14pd512_mask",
+ "llvm.x86.avx512.rsqrt14.ps.128" => "__builtin_ia32_rsqrt14ps128_mask",
+ "llvm.x86.avx512.rsqrt14.ps.256" => "__builtin_ia32_rsqrt14ps256_mask",
+ "llvm.x86.avx512.rsqrt14.ps.512" => "__builtin_ia32_rsqrt14ps512_mask",
+ "llvm.x86.avx512.rsqrt14.sd" => "__builtin_ia32_rsqrt14sd_mask",
+ "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask",
+ "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask",
+ "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask",
+ "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask",
+ "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask",
+ "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df",
+ "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si",
+ "llvm.x86.avx512.scatter.dpq.512" => "__builtin_ia32_scattersiv8di",
+ "llvm.x86.avx512.scatter.dps.512" => "__builtin_ia32_scattersiv16sf",
+ "llvm.x86.avx512.scatter.qpd.512" => "__builtin_ia32_scatterdiv8df",
+ "llvm.x86.avx512.scatter.qpi.512" => "__builtin_ia32_scatterdiv16si",
+ "llvm.x86.avx512.scatter.qpq.512" => "__builtin_ia32_scatterdiv8di",
+ "llvm.x86.avx512.scatter.qps.512" => "__builtin_ia32_scatterdiv16sf",
+ "llvm.x86.avx512.scatterdiv2.df" => "__builtin_ia32_scatterdiv2df",
+ "llvm.x86.avx512.scatterdiv2.di" => "__builtin_ia32_scatterdiv2di",
+ "llvm.x86.avx512.scatterdiv4.df" => "__builtin_ia32_scatterdiv4df",
+ "llvm.x86.avx512.scatterdiv4.di" => "__builtin_ia32_scatterdiv4di",
+ "llvm.x86.avx512.scatterdiv4.sf" => "__builtin_ia32_scatterdiv4sf",
+ "llvm.x86.avx512.scatterdiv4.si" => "__builtin_ia32_scatterdiv4si",
+ "llvm.x86.avx512.scatterdiv8.sf" => "__builtin_ia32_scatterdiv8sf",
+ "llvm.x86.avx512.scatterdiv8.si" => "__builtin_ia32_scatterdiv8si",
+ "llvm.x86.avx512.scatterpf.dpd.512" => "__builtin_ia32_scatterpfdpd",
+ "llvm.x86.avx512.scatterpf.dps.512" => "__builtin_ia32_scatterpfdps",
+ "llvm.x86.avx512.scatterpf.qpd.512" => "__builtin_ia32_scatterpfqpd",
+ "llvm.x86.avx512.scatterpf.qps.512" => "__builtin_ia32_scatterpfqps",
+ "llvm.x86.avx512.scattersiv2.df" => "__builtin_ia32_scattersiv2df",
+ "llvm.x86.avx512.scattersiv2.di" => "__builtin_ia32_scattersiv2di",
+ "llvm.x86.avx512.scattersiv4.df" => "__builtin_ia32_scattersiv4df",
+ "llvm.x86.avx512.scattersiv4.di" => "__builtin_ia32_scattersiv4di",
+ "llvm.x86.avx512.scattersiv4.sf" => "__builtin_ia32_scattersiv4sf",
+ "llvm.x86.avx512.scattersiv4.si" => "__builtin_ia32_scattersiv4si",
+ "llvm.x86.avx512.scattersiv8.sf" => "__builtin_ia32_scattersiv8sf",
+ "llvm.x86.avx512.scattersiv8.si" => "__builtin_ia32_scattersiv8si",
+ "llvm.x86.avx512.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask",
+ "llvm.x86.avx512.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask",
+ "llvm.x86.avx512.sqrt.sd" => "__builtin_ia32_sqrtrndsd",
+ "llvm.x86.avx512.sqrt.ss" => "__builtin_ia32_sqrtrndss",
+ "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512",
+ "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512",
+ "llvm.x86.avx512.vbroadcast.sd.512" => "__builtin_ia32_vbroadcastsd512",
+ "llvm.x86.avx512.vbroadcast.sd.pd.512" => "__builtin_ia32_vbroadcastsd_pd512",
+ "llvm.x86.avx512.vbroadcast.ss.512" => "__builtin_ia32_vbroadcastss512",
+ "llvm.x86.avx512.vbroadcast.ss.ps.512" => "__builtin_ia32_vbroadcastss_ps512",
+ "llvm.x86.avx512.vcomi.sd" => "__builtin_ia32_vcomisd",
+ "llvm.x86.avx512.vcomi.ss" => "__builtin_ia32_vcomiss",
+ "llvm.x86.avx512.vcvtsd2si32" => "__builtin_ia32_vcvtsd2si32",
+ "llvm.x86.avx512.vcvtsd2si64" => "__builtin_ia32_vcvtsd2si64",
+ "llvm.x86.avx512.vcvtsd2usi32" => "__builtin_ia32_vcvtsd2usi32",
+ "llvm.x86.avx512.vcvtsd2usi64" => "__builtin_ia32_vcvtsd2usi64",
+ "llvm.x86.avx512.vcvtss2si32" => "__builtin_ia32_vcvtss2si32",
+ "llvm.x86.avx512.vcvtss2si64" => "__builtin_ia32_vcvtss2si64",
+ "llvm.x86.avx512.vcvtss2usi32" => "__builtin_ia32_vcvtss2usi32",
+ "llvm.x86.avx512.vcvtss2usi64" => "__builtin_ia32_vcvtss2usi64",
+ "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd128",
+ "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd256",
+ "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd512",
+ "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds128",
+ "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds256",
+ "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds512",
+ "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd128",
+ "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd256",
+ "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd512",
+ "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds128",
+ "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds256",
+ "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds512",
+ "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128",
+ "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256",
+ "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512",
+ "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128",
+ "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256",
+ "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512",
+ "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128",
+ "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256",
+ "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512",
+ "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128",
+ "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256",
+ "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512",
+ "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128",
+ "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256",
+ "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512",
+ "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128",
+ "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256",
+ "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512",
+ "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512",
+ "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512",
+ "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128",
+ "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256",
+ "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512",
+ "llvm.x86.avx512.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128",
+ "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256",
+ "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512",
+ "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_128",
+ "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_256",
+ "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_512",
+ "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_256",
+ "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_512",
+ "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_128",
+ "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_256",
+ "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_512",
+ "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512",
+ "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512",
+ "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask",
+ "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask",
+ "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask",
+ "llvm.x86.avx512fp16.mask.fpclass.sh" => "__builtin_ia32_fpclasssh_mask",
+ "llvm.x86.avx512fp16.mask.getexp.ph.128" => "__builtin_ia32_getexpph128_mask",
+ "llvm.x86.avx512fp16.mask.getexp.ph.256" => "__builtin_ia32_getexpph256_mask",
+ "llvm.x86.avx512fp16.mask.getexp.ph.512" => "__builtin_ia32_getexpph512_mask",
+ "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask",
+ "llvm.x86.avx512fp16.mask.getmant.ph.128" => "__builtin_ia32_getmantph128_mask",
+ "llvm.x86.avx512fp16.mask.getmant.ph.256" => "__builtin_ia32_getmantph256_mask",
+ "llvm.x86.avx512fp16.mask.getmant.ph.512" => "__builtin_ia32_getmantph512_mask",
+ "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask",
+ "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask",
+ "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask",
+ "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask",
+ "llvm.x86.avx512fp16.mask.rcp.ph.128" => "__builtin_ia32_rcpph128_mask",
+ "llvm.x86.avx512fp16.mask.rcp.ph.256" => "__builtin_ia32_rcpph256_mask",
+ "llvm.x86.avx512fp16.mask.rcp.ph.512" => "__builtin_ia32_rcpph512_mask",
+ "llvm.x86.avx512fp16.mask.rcp.sh" => "__builtin_ia32_rcpsh_mask",
+ "llvm.x86.avx512fp16.mask.reduce.ph.128" => "__builtin_ia32_reduceph128_mask",
+ "llvm.x86.avx512fp16.mask.reduce.ph.256" => "__builtin_ia32_reduceph256_mask",
+ "llvm.x86.avx512fp16.mask.reduce.ph.512" => "__builtin_ia32_reduceph512_mask",
+ "llvm.x86.avx512fp16.mask.reduce.sh" => "__builtin_ia32_reducesh_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph_128_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph_256_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.ph.128" => "__builtin_ia32_rsqrtph128_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.ph.256" => "__builtin_ia32_rsqrtph256_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.ph.512" => "__builtin_ia32_rsqrtph512_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.sh" => "__builtin_ia32_rsqrtsh_mask",
+ "llvm.x86.avx512fp16.mask.scalef.ph.128" => "__builtin_ia32_scalefph128_mask",
+ "llvm.x86.avx512fp16.mask.scalef.ph.256" => "__builtin_ia32_scalefph256_mask",
+ "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask",
+ "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask",
+ "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtdq2ph.128" => "__builtin_ia32_vcvtdq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtpd2ph.128" => "__builtin_ia32_vcvtpd2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtpd2ph.256" => "__builtin_ia32_vcvtpd2ph256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtpd2ph.512" => "__builtin_ia32_vcvtpd2ph512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2dq.128" => "__builtin_ia32_vcvtph2dq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2dq.256" => "__builtin_ia32_vcvtph2dq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2dq.512" => "__builtin_ia32_vcvtph2dq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2pd.128" => "__builtin_ia32_vcvtph2pd128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2pd.256" => "__builtin_ia32_vcvtph2pd256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2pd.512" => "__builtin_ia32_vcvtph2pd512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2psx.128" => "__builtin_ia32_vcvtph2psx128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2psx.256" => "__builtin_ia32_vcvtph2psx256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2psx.512" => "__builtin_ia32_vcvtph2psx512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2qq.128" => "__builtin_ia32_vcvtph2qq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2qq.256" => "__builtin_ia32_vcvtph2qq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2qq.512" => "__builtin_ia32_vcvtph2qq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2udq.128" => "__builtin_ia32_vcvtph2udq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2udq.256" => "__builtin_ia32_vcvtph2udq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2udq.512" => "__builtin_ia32_vcvtph2udq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uqq.128" => "__builtin_ia32_vcvtph2uqq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uqq.256" => "__builtin_ia32_vcvtph2uqq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uqq.512" => "__builtin_ia32_vcvtph2uqq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uw.128" => "__builtin_ia32_vcvtph2uw128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uw.256" => "__builtin_ia32_vcvtph2uw256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uw.512" => "__builtin_ia32_vcvtph2uw512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2w.128" => "__builtin_ia32_vcvtph2w128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2w.256" => "__builtin_ia32_vcvtph2w256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2w.512" => "__builtin_ia32_vcvtph2w512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtps2phx.128" => "__builtin_ia32_vcvtps2phx128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtps2phx.256" => "__builtin_ia32_vcvtps2phx256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtqq2ph.128" => "__builtin_ia32_vcvtqq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtqq2ph.256" => "__builtin_ia32_vcvtqq2ph256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2dq.128" => "__builtin_ia32_vcvttph2dq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2dq.256" => "__builtin_ia32_vcvttph2dq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2qq.128" => "__builtin_ia32_vcvttph2qq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2qq.256" => "__builtin_ia32_vcvttph2qq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2qq.512" => "__builtin_ia32_vcvttph2qq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2udq.128" => "__builtin_ia32_vcvttph2udq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2udq.256" => "__builtin_ia32_vcvttph2udq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2udq.512" => "__builtin_ia32_vcvttph2udq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uqq.128" => "__builtin_ia32_vcvttph2uqq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uqq.256" => "__builtin_ia32_vcvttph2uqq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uqq.512" => "__builtin_ia32_vcvttph2uqq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uw.128" => "__builtin_ia32_vcvttph2uw128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uw.256" => "__builtin_ia32_vcvttph2uw256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uw.512" => "__builtin_ia32_vcvttph2uw512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2w.128" => "__builtin_ia32_vcvttph2w128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2w.256" => "__builtin_ia32_vcvttph2w256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2w.512" => "__builtin_ia32_vcvttph2w512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtudq2ph.128" => "__builtin_ia32_vcvtudq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128" => "__builtin_ia32_vcvtuqq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256" => "__builtin_ia32_vcvtuqq2ph256_mask",
+ "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_mask3",
+ "llvm.x86.avx512fp16.mask.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.cph.128" => "__builtin_ia32_vfcmulcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.cph.256" => "__builtin_ia32_vfcmulcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.cph.512" => "__builtin_ia32_vfcmulcph512_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.csh" => "__builtin_ia32_vfcmulcsh_mask",
+ "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_mask3",
+ "llvm.x86.avx512fp16.mask.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.cph.128" => "__builtin_ia32_vfmulcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.cph.256" => "__builtin_ia32_vfmulcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.cph.512" => "__builtin_ia32_vfmulcph512_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.csh" => "__builtin_ia32_vfmulcsh_mask",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_maskz",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_maskz",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_maskz",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_maskz",
+ "llvm.x86.avx512fp16.max.ph.128" => "__builtin_ia32_maxph128",
+ "llvm.x86.avx512fp16.max.ph.256" => "__builtin_ia32_maxph256",
+ "llvm.x86.avx512fp16.max.ph.512" => "__builtin_ia32_maxph512",
+ "llvm.x86.avx512fp16.min.ph.128" => "__builtin_ia32_minph128",
+ "llvm.x86.avx512fp16.min.ph.256" => "__builtin_ia32_minph256",
+ "llvm.x86.avx512fp16.min.ph.512" => "__builtin_ia32_minph512",
+ "llvm.x86.avx512fp16.mul.ph.512" => "__builtin_ia32_mulph512",
+ "llvm.x86.avx512fp16.sub.ph.512" => "__builtin_ia32_subph512",
+ "llvm.x86.avx512fp16.vcomi.sh" => "__builtin_ia32_vcomish",
+ "llvm.x86.avx512fp16.vcvtsh2si32" => "__builtin_ia32_vcvtsh2si32",
+ "llvm.x86.avx512fp16.vcvtsh2si64" => "__builtin_ia32_vcvtsh2si64",
+ "llvm.x86.avx512fp16.vcvtsh2usi32" => "__builtin_ia32_vcvtsh2usi32",
+ "llvm.x86.avx512fp16.vcvtsh2usi64" => "__builtin_ia32_vcvtsh2usi64",
+ "llvm.x86.avx512fp16.vcvtsi2sh" => "__builtin_ia32_vcvtsi2sh",
+ "llvm.x86.avx512fp16.vcvtsi642sh" => "__builtin_ia32_vcvtsi642sh",
+ "llvm.x86.avx512fp16.vcvttsh2si32" => "__builtin_ia32_vcvttsh2si32",
+ "llvm.x86.avx512fp16.vcvttsh2si64" => "__builtin_ia32_vcvttsh2si64",
+ "llvm.x86.avx512fp16.vcvttsh2usi32" => "__builtin_ia32_vcvttsh2usi32",
+ "llvm.x86.avx512fp16.vcvttsh2usi64" => "__builtin_ia32_vcvttsh2usi64",
+ "llvm.x86.avx512fp16.vcvtusi2sh" => "__builtin_ia32_vcvtusi2sh",
+ "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi642sh",
+ "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph",
+ "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256",
+ "llvm.x86.bmi.bextr.32" => "__builtin_ia32_bextr_u32",
+ "llvm.x86.bmi.bextr.64" => "__builtin_ia32_bextr_u64",
+ "llvm.x86.bmi.bzhi.32" => "__builtin_ia32_bzhi_si",
+ "llvm.x86.bmi.bzhi.64" => "__builtin_ia32_bzhi_di",
+ "llvm.x86.bmi.pdep.32" => "__builtin_ia32_pdep_si",
+ "llvm.x86.bmi.pdep.64" => "__builtin_ia32_pdep_di",
+ "llvm.x86.bmi.pext.32" => "__builtin_ia32_pext_si",
+ "llvm.x86.bmi.pext.64" => "__builtin_ia32_pext_di",
+ "llvm.x86.cldemote" => "__builtin_ia32_cldemote",
+ "llvm.x86.clflushopt" => "__builtin_ia32_clflushopt",
+ "llvm.x86.clrssbsy" => "__builtin_ia32_clrssbsy",
+ "llvm.x86.clui" => "__builtin_ia32_clui",
+ "llvm.x86.clwb" => "__builtin_ia32_clwb",
+ "llvm.x86.clzero" => "__builtin_ia32_clzero",
+ "llvm.x86.directstore32" => "__builtin_ia32_directstore_u32",
+ "llvm.x86.directstore64" => "__builtin_ia32_directstore_u64",
+ "llvm.x86.enqcmd" => "__builtin_ia32_enqcmd",
+ "llvm.x86.enqcmds" => "__builtin_ia32_enqcmds",
+ "llvm.x86.flags.read.u32" => "__builtin_ia32_readeflags_u32",
+ "llvm.x86.flags.read.u64" => "__builtin_ia32_readeflags_u64",
+ "llvm.x86.flags.write.u32" => "__builtin_ia32_writeeflags_u32",
+ "llvm.x86.flags.write.u64" => "__builtin_ia32_writeeflags_u64",
+ "llvm.x86.fma.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+ "llvm.x86.fma.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.x86.fma.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+ "llvm.x86.fma.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+ "llvm.x86.fma.mask.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask",
+ "llvm.x86.fma.mask.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask",
+ "llvm.x86.fma.mask.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask",
+ "llvm.x86.fma.mask.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask",
+ "llvm.x86.fma.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask",
+ "llvm.x86.fma.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask",
+ "llvm.x86.fma.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask",
+ "llvm.x86.fma.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask",
+ "llvm.x86.fma.vfmadd.pd" => "__builtin_ia32_vfmaddpd",
+ "llvm.x86.fma.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256",
+ "llvm.x86.fma.vfmadd.ps" => "__builtin_ia32_vfmaddps",
+ "llvm.x86.fma.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256",
+ "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd",
+ "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss",
+ "llvm.x86.fma.vfmaddsub.pd" => "__builtin_ia32_vfmaddsubpd",
+ "llvm.x86.fma.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256",
+ "llvm.x86.fma.vfmaddsub.ps" => "__builtin_ia32_vfmaddsubps",
+ "llvm.x86.fma.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256",
+ "llvm.x86.fma.vfmsub.pd" => "__builtin_ia32_vfmsubpd",
+ "llvm.x86.fma.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256",
+ "llvm.x86.fma.vfmsub.ps" => "__builtin_ia32_vfmsubps",
+ "llvm.x86.fma.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256",
+ "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd",
+ "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss",
+ "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmsubaddpd",
+ "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256",
+ "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmsubaddps",
+ "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256",
+ "llvm.x86.fma.vfnmadd.pd" => "__builtin_ia32_vfnmaddpd",
+ "llvm.x86.fma.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256",
+ "llvm.x86.fma.vfnmadd.ps" => "__builtin_ia32_vfnmaddps",
+ "llvm.x86.fma.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256",
+ "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd",
+ "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss",
+ "llvm.x86.fma.vfnmsub.pd" => "__builtin_ia32_vfnmsubpd",
+ "llvm.x86.fma.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256",
+ "llvm.x86.fma.vfnmsub.ps" => "__builtin_ia32_vfnmsubps",
+ "llvm.x86.fma.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256",
+ "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd",
+ "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss",
+ "llvm.x86.fxrstor" => "__builtin_ia32_fxrstor",
+ "llvm.x86.fxrstor64" => "__builtin_ia32_fxrstor64",
+ "llvm.x86.fxsave" => "__builtin_ia32_fxsave",
+ "llvm.x86.fxsave64" => "__builtin_ia32_fxsave64",
+ "llvm.x86.incsspd" => "__builtin_ia32_incsspd",
+ "llvm.x86.incsspq" => "__builtin_ia32_incsspq",
+ "llvm.x86.invpcid" => "__builtin_ia32_invpcid",
+ "llvm.x86.ldtilecfg" => "__builtin_ia32_tile_loadconfig",
+ "llvm.x86.ldtilecfg.internal" => "__builtin_ia32_tile_loadconfig_internal",
+ "llvm.x86.llwpcb" => "__builtin_ia32_llwpcb",
+ "llvm.x86.loadiwkey" => "__builtin_ia32_loadiwkey",
+ "llvm.x86.lwpins32" => "__builtin_ia32_lwpins32",
+ "llvm.x86.lwpins64" => "__builtin_ia32_lwpins64",
+ "llvm.x86.lwpval32" => "__builtin_ia32_lwpval32",
+ "llvm.x86.lwpval64" => "__builtin_ia32_lwpval64",
+ "llvm.x86.mmx.emms" => "__builtin_ia32_emms",
+ "llvm.x86.mmx.femms" => "__builtin_ia32_femms",
+ "llvm.x86.mmx.maskmovq" => "__builtin_ia32_maskmovq",
+ "llvm.x86.mmx.movnt.dq" => "__builtin_ia32_movntq",
+ "llvm.x86.mmx.packssdw" => "__builtin_ia32_packssdw",
+ "llvm.x86.mmx.packsswb" => "__builtin_ia32_packsswb",
+ "llvm.x86.mmx.packuswb" => "__builtin_ia32_packuswb",
+ "llvm.x86.mmx.padd.b" => "__builtin_ia32_paddb",
+ "llvm.x86.mmx.padd.d" => "__builtin_ia32_paddd",
+ "llvm.x86.mmx.padd.q" => "__builtin_ia32_paddq",
+ "llvm.x86.mmx.padd.w" => "__builtin_ia32_paddw",
+ "llvm.x86.mmx.padds.b" => "__builtin_ia32_paddsb",
+ "llvm.x86.mmx.padds.w" => "__builtin_ia32_paddsw",
+ "llvm.x86.mmx.paddus.b" => "__builtin_ia32_paddusb",
+ "llvm.x86.mmx.paddus.w" => "__builtin_ia32_paddusw",
+ "llvm.x86.mmx.palignr.b" => "__builtin_ia32_palignr",
+ "llvm.x86.mmx.pand" => "__builtin_ia32_pand",
+ "llvm.x86.mmx.pandn" => "__builtin_ia32_pandn",
+ "llvm.x86.mmx.pavg.b" => "__builtin_ia32_pavgb",
+ "llvm.x86.mmx.pavg.w" => "__builtin_ia32_pavgw",
+ "llvm.x86.mmx.pcmpeq.b" => "__builtin_ia32_pcmpeqb",
+ "llvm.x86.mmx.pcmpeq.d" => "__builtin_ia32_pcmpeqd",
+ "llvm.x86.mmx.pcmpeq.w" => "__builtin_ia32_pcmpeqw",
+ "llvm.x86.mmx.pcmpgt.b" => "__builtin_ia32_pcmpgtb",
+ "llvm.x86.mmx.pcmpgt.d" => "__builtin_ia32_pcmpgtd",
+ "llvm.x86.mmx.pcmpgt.w" => "__builtin_ia32_pcmpgtw",
+ "llvm.x86.mmx.pextr.w" => "__builtin_ia32_vec_ext_v4hi",
+ "llvm.x86.mmx.pinsr.w" => "__builtin_ia32_vec_set_v4hi",
+ "llvm.x86.mmx.pmadd.wd" => "__builtin_ia32_pmaddwd",
+ "llvm.x86.mmx.pmaxs.w" => "__builtin_ia32_pmaxsw",
+ "llvm.x86.mmx.pmaxu.b" => "__builtin_ia32_pmaxub",
+ "llvm.x86.mmx.pmins.w" => "__builtin_ia32_pminsw",
+ "llvm.x86.mmx.pminu.b" => "__builtin_ia32_pminub",
+ "llvm.x86.mmx.pmovmskb" => "__builtin_ia32_pmovmskb",
+ "llvm.x86.mmx.pmulh.w" => "__builtin_ia32_pmulhw",
+ "llvm.x86.mmx.pmulhu.w" => "__builtin_ia32_pmulhuw",
+ "llvm.x86.mmx.pmull.w" => "__builtin_ia32_pmullw",
+ "llvm.x86.mmx.pmulu.dq" => "__builtin_ia32_pmuludq",
+ "llvm.x86.mmx.por" => "__builtin_ia32_por",
+ "llvm.x86.mmx.psad.bw" => "__builtin_ia32_psadbw",
+ "llvm.x86.mmx.psll.d" => "__builtin_ia32_pslld",
+ "llvm.x86.mmx.psll.q" => "__builtin_ia32_psllq",
+ "llvm.x86.mmx.psll.w" => "__builtin_ia32_psllw",
+ "llvm.x86.mmx.pslli.d" => "__builtin_ia32_pslldi",
+ "llvm.x86.mmx.pslli.q" => "__builtin_ia32_psllqi",
+ "llvm.x86.mmx.pslli.w" => "__builtin_ia32_psllwi",
+ "llvm.x86.mmx.psra.d" => "__builtin_ia32_psrad",
+ "llvm.x86.mmx.psra.w" => "__builtin_ia32_psraw",
+ "llvm.x86.mmx.psrai.d" => "__builtin_ia32_psradi",
+ "llvm.x86.mmx.psrai.w" => "__builtin_ia32_psrawi",
+ "llvm.x86.mmx.psrl.d" => "__builtin_ia32_psrld",
+ "llvm.x86.mmx.psrl.q" => "__builtin_ia32_psrlq",
+ "llvm.x86.mmx.psrl.w" => "__builtin_ia32_psrlw",
+ "llvm.x86.mmx.psrli.d" => "__builtin_ia32_psrldi",
+ "llvm.x86.mmx.psrli.q" => "__builtin_ia32_psrlqi",
+ "llvm.x86.mmx.psrli.w" => "__builtin_ia32_psrlwi",
+ "llvm.x86.mmx.psub.b" => "__builtin_ia32_psubb",
+ "llvm.x86.mmx.psub.d" => "__builtin_ia32_psubd",
+ "llvm.x86.mmx.psub.q" => "__builtin_ia32_psubq",
+ "llvm.x86.mmx.psub.w" => "__builtin_ia32_psubw",
+ "llvm.x86.mmx.psubs.b" => "__builtin_ia32_psubsb",
+ "llvm.x86.mmx.psubs.w" => "__builtin_ia32_psubsw",
+ "llvm.x86.mmx.psubus.b" => "__builtin_ia32_psubusb",
+ "llvm.x86.mmx.psubus.w" => "__builtin_ia32_psubusw",
+ "llvm.x86.mmx.punpckhbw" => "__builtin_ia32_punpckhbw",
+ "llvm.x86.mmx.punpckhdq" => "__builtin_ia32_punpckhdq",
+ "llvm.x86.mmx.punpckhwd" => "__builtin_ia32_punpckhwd",
+ "llvm.x86.mmx.punpcklbw" => "__builtin_ia32_punpcklbw",
+ "llvm.x86.mmx.punpckldq" => "__builtin_ia32_punpckldq",
+ "llvm.x86.mmx.punpcklwd" => "__builtin_ia32_punpcklwd",
+ "llvm.x86.mmx.pxor" => "__builtin_ia32_pxor",
+ "llvm.x86.monitorx" => "__builtin_ia32_monitorx",
+ "llvm.x86.movdir64b" => "__builtin_ia32_movdir64b",
+ "llvm.x86.mwaitx" => "__builtin_ia32_mwaitx",
+ "llvm.x86.pclmulqdq" => "__builtin_ia32_pclmulqdq128",
+ "llvm.x86.pclmulqdq.256" => "__builtin_ia32_pclmulqdq256",
+ "llvm.x86.pclmulqdq.512" => "__builtin_ia32_pclmulqdq512",
+ "llvm.x86.ptwrite32" => "__builtin_ia32_ptwrite32",
+ "llvm.x86.ptwrite64" => "__builtin_ia32_ptwrite64",
+ "llvm.x86.rdfsbase.32" => "__builtin_ia32_rdfsbase32",
+ "llvm.x86.rdfsbase.64" => "__builtin_ia32_rdfsbase64",
+ "llvm.x86.rdgsbase.32" => "__builtin_ia32_rdgsbase32",
+ "llvm.x86.rdgsbase.64" => "__builtin_ia32_rdgsbase64",
+ "llvm.x86.rdpid" => "__builtin_ia32_rdpid",
+ "llvm.x86.rdpkru" => "__builtin_ia32_rdpkru",
+ "llvm.x86.rdpmc" => "__builtin_ia32_rdpmc",
+ "llvm.x86.rdsspd" => "__builtin_ia32_rdsspd",
+ "llvm.x86.rdsspq" => "__builtin_ia32_rdsspq",
+ "llvm.x86.rdtsc" => "__builtin_ia32_rdtsc",
+ "llvm.x86.rdtscp" => "__builtin_ia32_rdtscp",
+ "llvm.x86.rstorssp" => "__builtin_ia32_rstorssp",
+ "llvm.x86.saveprevssp" => "__builtin_ia32_saveprevssp",
+ "llvm.x86.senduipi" => "__builtin_ia32_senduipi",
+ "llvm.x86.serialize" => "__builtin_ia32_serialize",
+ "llvm.x86.setssbsy" => "__builtin_ia32_setssbsy",
+ "llvm.x86.sha1msg1" => "__builtin_ia32_sha1msg1",
+ "llvm.x86.sha1msg2" => "__builtin_ia32_sha1msg2",
+ "llvm.x86.sha1nexte" => "__builtin_ia32_sha1nexte",
+ "llvm.x86.sha1rnds4" => "__builtin_ia32_sha1rnds4",
+ "llvm.x86.sha256msg1" => "__builtin_ia32_sha256msg1",
+ "llvm.x86.sha256msg2" => "__builtin_ia32_sha256msg2",
+ "llvm.x86.sha256rnds2" => "__builtin_ia32_sha256rnds2",
+ "llvm.x86.slwpcb" => "__builtin_ia32_slwpcb",
+ "llvm.x86.sse.add.ss" => "__builtin_ia32_addss",
+ "llvm.x86.sse.cmp.ps" => "__builtin_ia32_cmpps",
+ "llvm.x86.sse.cmp.ss" => "__builtin_ia32_cmpss",
+ "llvm.x86.sse.comieq.ss" => "__builtin_ia32_comieq",
+ "llvm.x86.sse.comige.ss" => "__builtin_ia32_comige",
+ "llvm.x86.sse.comigt.ss" => "__builtin_ia32_comigt",
+ "llvm.x86.sse.comile.ss" => "__builtin_ia32_comile",
+ "llvm.x86.sse.comilt.ss" => "__builtin_ia32_comilt",
+ "llvm.x86.sse.comineq.ss" => "__builtin_ia32_comineq",
+ "llvm.x86.sse.cvtpd2pi" => "__builtin_ia32_cvtpd2pi",
+ "llvm.x86.sse.cvtpi2pd" => "__builtin_ia32_cvtpi2pd",
+ "llvm.x86.sse.cvtpi2ps" => "__builtin_ia32_cvtpi2ps",
+ "llvm.x86.sse.cvtps2pi" => "__builtin_ia32_cvtps2pi",
+ "llvm.x86.sse.cvtsi2ss" => "__builtin_ia32_cvtsi2ss",
+ "llvm.x86.sse.cvtsi642ss" => "__builtin_ia32_cvtsi642ss",
+ "llvm.x86.sse.cvtss2si" => "__builtin_ia32_cvtss2si",
+ "llvm.x86.sse.cvtss2si64" => "__builtin_ia32_cvtss2si64",
+ "llvm.x86.sse.cvttpd2pi" => "__builtin_ia32_cvttpd2pi",
+ "llvm.x86.sse.cvttps2pi" => "__builtin_ia32_cvttps2pi",
+ "llvm.x86.sse.cvttss2si" => "__builtin_ia32_cvttss2si",
+ "llvm.x86.sse.cvttss2si64" => "__builtin_ia32_cvttss2si64",
+ "llvm.x86.sse.div.ss" => "__builtin_ia32_divss",
+ "llvm.x86.sse.max.ps" => "__builtin_ia32_maxps",
+ "llvm.x86.sse.max.ss" => "__builtin_ia32_maxss",
+ "llvm.x86.sse.min.ps" => "__builtin_ia32_minps",
+ "llvm.x86.sse.min.ss" => "__builtin_ia32_minss",
+ "llvm.x86.sse.movmsk.ps" => "__builtin_ia32_movmskps",
+ "llvm.x86.sse.mul.ss" => "__builtin_ia32_mulss",
+ "llvm.x86.sse.pshuf.w" => "__builtin_ia32_pshufw",
+ "llvm.x86.sse.rcp.ps" => "__builtin_ia32_rcpps",
+ "llvm.x86.sse.rcp.ss" => "__builtin_ia32_rcpss",
+ "llvm.x86.sse.rsqrt.ps" => "__builtin_ia32_rsqrtps",
+ "llvm.x86.sse.rsqrt.ss" => "__builtin_ia32_rsqrtss",
+ "llvm.x86.sse.sfence" => "__builtin_ia32_sfence",
+ "llvm.x86.sse.sqrt.ps" => "__builtin_ia32_sqrtps",
+ "llvm.x86.sse.sqrt.ss" => "__builtin_ia32_sqrtss",
+ "llvm.x86.sse.storeu.ps" => "__builtin_ia32_storeups",
+ "llvm.x86.sse.sub.ss" => "__builtin_ia32_subss",
+ "llvm.x86.sse.ucomieq.ss" => "__builtin_ia32_ucomieq",
+ "llvm.x86.sse.ucomige.ss" => "__builtin_ia32_ucomige",
+ "llvm.x86.sse.ucomigt.ss" => "__builtin_ia32_ucomigt",
+ "llvm.x86.sse.ucomile.ss" => "__builtin_ia32_ucomile",
+ "llvm.x86.sse.ucomilt.ss" => "__builtin_ia32_ucomilt",
+ "llvm.x86.sse.ucomineq.ss" => "__builtin_ia32_ucomineq",
+ "llvm.x86.sse2.add.sd" => "__builtin_ia32_addsd",
+ "llvm.x86.sse2.clflush" => "__builtin_ia32_clflush",
+ "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
+ "llvm.x86.sse2.cmp.sd" => "__builtin_ia32_cmpsd",
+ "llvm.x86.sse2.comieq.sd" => "__builtin_ia32_comisdeq",
+ "llvm.x86.sse2.comige.sd" => "__builtin_ia32_comisdge",
+ "llvm.x86.sse2.comigt.sd" => "__builtin_ia32_comisdgt",
+ "llvm.x86.sse2.comile.sd" => "__builtin_ia32_comisdle",
+ "llvm.x86.sse2.comilt.sd" => "__builtin_ia32_comisdlt",
+ "llvm.x86.sse2.comineq.sd" => "__builtin_ia32_comisdneq",
+ "llvm.x86.sse2.cvtdq2pd" => "__builtin_ia32_cvtdq2pd",
+ "llvm.x86.sse2.cvtdq2ps" => "__builtin_ia32_cvtdq2ps",
+ "llvm.x86.sse2.cvtpd2dq" => "__builtin_ia32_cvtpd2dq",
+ "llvm.x86.sse2.cvtpd2ps" => "__builtin_ia32_cvtpd2ps",
+ "llvm.x86.sse2.cvtps2dq" => "__builtin_ia32_cvtps2dq",
+ "llvm.x86.sse2.cvtps2pd" => "__builtin_ia32_cvtps2pd",
+ "llvm.x86.sse2.cvtsd2si" => "__builtin_ia32_cvtsd2si",
+ "llvm.x86.sse2.cvtsd2si64" => "__builtin_ia32_cvtsd2si64",
+ "llvm.x86.sse2.cvtsd2ss" => "__builtin_ia32_cvtsd2ss",
+ "llvm.x86.sse2.cvtsi2sd" => "__builtin_ia32_cvtsi2sd",
+ "llvm.x86.sse2.cvtsi642sd" => "__builtin_ia32_cvtsi642sd",
+ "llvm.x86.sse2.cvtss2sd" => "__builtin_ia32_cvtss2sd",
+ "llvm.x86.sse2.cvttpd2dq" => "__builtin_ia32_cvttpd2dq",
+ "llvm.x86.sse2.cvttps2dq" => "__builtin_ia32_cvttps2dq",
+ "llvm.x86.sse2.cvttsd2si" => "__builtin_ia32_cvttsd2si",
+ "llvm.x86.sse2.cvttsd2si64" => "__builtin_ia32_cvttsd2si64",
+ "llvm.x86.sse2.div.sd" => "__builtin_ia32_divsd",
+ "llvm.x86.sse2.lfence" => "__builtin_ia32_lfence",
+ "llvm.x86.sse2.maskmov.dqu" => "__builtin_ia32_maskmovdqu",
+ "llvm.x86.sse2.max.pd" => "__builtin_ia32_maxpd",
+ "llvm.x86.sse2.max.sd" => "__builtin_ia32_maxsd",
+ "llvm.x86.sse2.mfence" => "__builtin_ia32_mfence",
+ "llvm.x86.sse2.min.pd" => "__builtin_ia32_minpd",
+ "llvm.x86.sse2.min.sd" => "__builtin_ia32_minsd",
+ "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
+ "llvm.x86.sse2.mul.sd" => "__builtin_ia32_mulsd",
+ "llvm.x86.sse2.packssdw.128" => "__builtin_ia32_packssdw128",
+ "llvm.x86.sse2.packsswb.128" => "__builtin_ia32_packsswb128",
+ "llvm.x86.sse2.packuswb.128" => "__builtin_ia32_packuswb128",
+ "llvm.x86.sse2.padds.b" => "__builtin_ia32_paddsb128",
+ "llvm.x86.sse2.padds.w" => "__builtin_ia32_paddsw128",
+ "llvm.x86.sse2.paddus.b" => "__builtin_ia32_paddusb128",
+ "llvm.x86.sse2.paddus.w" => "__builtin_ia32_paddusw128",
+ "llvm.x86.sse2.pause" => "__builtin_ia32_pause",
+ "llvm.x86.sse2.pavg.b" => "__builtin_ia32_pavgb128",
+ "llvm.x86.sse2.pavg.w" => "__builtin_ia32_pavgw128",
+ "llvm.x86.sse2.pmadd.wd" => "__builtin_ia32_pmaddwd128",
+ "llvm.x86.sse2.pmaxs.w" => "__builtin_ia32_pmaxsw128",
+ "llvm.x86.sse2.pmaxu.b" => "__builtin_ia32_pmaxub128",
+ "llvm.x86.sse2.pmins.w" => "__builtin_ia32_pminsw128",
+ "llvm.x86.sse2.pminu.b" => "__builtin_ia32_pminub128",
+ "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
+ "llvm.x86.sse2.pmulh.w" => "__builtin_ia32_pmulhw128",
+ "llvm.x86.sse2.pmulhu.w" => "__builtin_ia32_pmulhuw128",
+ "llvm.x86.sse2.pmulu.dq" => "__builtin_ia32_pmuludq128",
+ "llvm.x86.sse2.psad.bw" => "__builtin_ia32_psadbw128",
+ "llvm.x86.sse2.pshuf.d" => "__builtin_ia32_pshufd",
+ "llvm.x86.sse2.pshufh.w" => "__builtin_ia32_pshufhw",
+ "llvm.x86.sse2.pshufl.w" => "__builtin_ia32_pshuflw",
+ "llvm.x86.sse2.psll.d" => "__builtin_ia32_pslld128",
+ "llvm.x86.sse2.psll.dq" => "__builtin_ia32_pslldqi128",
+ "llvm.x86.sse2.psll.dq.bs" => "__builtin_ia32_pslldqi128_byteshift",
+ "llvm.x86.sse2.psll.q" => "__builtin_ia32_psllq128",
+ "llvm.x86.sse2.psll.w" => "__builtin_ia32_psllw128",
+ "llvm.x86.sse2.pslli.d" => "__builtin_ia32_pslldi128",
+ "llvm.x86.sse2.pslli.q" => "__builtin_ia32_psllqi128",
+ "llvm.x86.sse2.pslli.w" => "__builtin_ia32_psllwi128",
+ "llvm.x86.sse2.psra.d" => "__builtin_ia32_psrad128",
+ "llvm.x86.sse2.psra.w" => "__builtin_ia32_psraw128",
+ "llvm.x86.sse2.psrai.d" => "__builtin_ia32_psradi128",
+ "llvm.x86.sse2.psrai.w" => "__builtin_ia32_psrawi128",
+ "llvm.x86.sse2.psrl.d" => "__builtin_ia32_psrld128",
+ "llvm.x86.sse2.psrl.dq" => "__builtin_ia32_psrldqi128",
+ "llvm.x86.sse2.psrl.dq.bs" => "__builtin_ia32_psrldqi128_byteshift",
+ "llvm.x86.sse2.psrl.q" => "__builtin_ia32_psrlq128",
+ "llvm.x86.sse2.psrl.w" => "__builtin_ia32_psrlw128",
+ "llvm.x86.sse2.psrli.d" => "__builtin_ia32_psrldi128",
+ "llvm.x86.sse2.psrli.q" => "__builtin_ia32_psrlqi128",
+ "llvm.x86.sse2.psrli.w" => "__builtin_ia32_psrlwi128",
+ "llvm.x86.sse2.psubs.b" => "__builtin_ia32_psubsb128",
+ "llvm.x86.sse2.psubs.w" => "__builtin_ia32_psubsw128",
+ "llvm.x86.sse2.psubus.b" => "__builtin_ia32_psubusb128",
+ "llvm.x86.sse2.psubus.w" => "__builtin_ia32_psubusw128",
+ "llvm.x86.sse2.sqrt.pd" => "__builtin_ia32_sqrtpd",
+ "llvm.x86.sse2.sqrt.sd" => "__builtin_ia32_sqrtsd",
+ "llvm.x86.sse2.storel.dq" => "__builtin_ia32_storelv4si",
+ "llvm.x86.sse2.storeu.dq" => "__builtin_ia32_storedqu",
+ "llvm.x86.sse2.storeu.pd" => "__builtin_ia32_storeupd",
+ "llvm.x86.sse2.sub.sd" => "__builtin_ia32_subsd",
+ "llvm.x86.sse2.ucomieq.sd" => "__builtin_ia32_ucomisdeq",
+ "llvm.x86.sse2.ucomige.sd" => "__builtin_ia32_ucomisdge",
+ "llvm.x86.sse2.ucomigt.sd" => "__builtin_ia32_ucomisdgt",
+ "llvm.x86.sse2.ucomile.sd" => "__builtin_ia32_ucomisdle",
+ "llvm.x86.sse2.ucomilt.sd" => "__builtin_ia32_ucomisdlt",
+ "llvm.x86.sse2.ucomineq.sd" => "__builtin_ia32_ucomisdneq",
+ "llvm.x86.sse3.addsub.pd" => "__builtin_ia32_addsubpd",
+ "llvm.x86.sse3.addsub.ps" => "__builtin_ia32_addsubps",
+ "llvm.x86.sse3.hadd.pd" => "__builtin_ia32_haddpd",
+ "llvm.x86.sse3.hadd.ps" => "__builtin_ia32_haddps",
+ "llvm.x86.sse3.hsub.pd" => "__builtin_ia32_hsubpd",
+ "llvm.x86.sse3.hsub.ps" => "__builtin_ia32_hsubps",
+ "llvm.x86.sse3.ldu.dq" => "__builtin_ia32_lddqu",
+ "llvm.x86.sse3.monitor" => "__builtin_ia32_monitor",
+ "llvm.x86.sse3.mwait" => "__builtin_ia32_mwait",
+ "llvm.x86.sse41.blendpd" => "__builtin_ia32_blendpd",
+ "llvm.x86.sse41.blendps" => "__builtin_ia32_blendps",
+ "llvm.x86.sse41.blendvpd" => "__builtin_ia32_blendvpd",
+ "llvm.x86.sse41.blendvps" => "__builtin_ia32_blendvps",
+ "llvm.x86.sse41.dppd" => "__builtin_ia32_dppd",
+ "llvm.x86.sse41.dpps" => "__builtin_ia32_dpps",
+ "llvm.x86.sse41.extractps" => "__builtin_ia32_extractps128",
+ "llvm.x86.sse41.insertps" => "__builtin_ia32_insertps128",
+ "llvm.x86.sse41.movntdqa" => "__builtin_ia32_movntdqa",
+ "llvm.x86.sse41.mpsadbw" => "__builtin_ia32_mpsadbw128",
+ "llvm.x86.sse41.packusdw" => "__builtin_ia32_packusdw128",
+ "llvm.x86.sse41.pblendvb" => "__builtin_ia32_pblendvb128",
+ "llvm.x86.sse41.pblendw" => "__builtin_ia32_pblendw128",
+ "llvm.x86.sse41.phminposuw" => "__builtin_ia32_phminposuw128",
+ "llvm.x86.sse41.pmaxsb" => "__builtin_ia32_pmaxsb128",
+ "llvm.x86.sse41.pmaxsd" => "__builtin_ia32_pmaxsd128",
+ "llvm.x86.sse41.pmaxud" => "__builtin_ia32_pmaxud128",
+ "llvm.x86.sse41.pmaxuw" => "__builtin_ia32_pmaxuw128",
+ "llvm.x86.sse41.pminsb" => "__builtin_ia32_pminsb128",
+ "llvm.x86.sse41.pminsd" => "__builtin_ia32_pminsd128",
+ "llvm.x86.sse41.pminud" => "__builtin_ia32_pminud128",
+ "llvm.x86.sse41.pminuw" => "__builtin_ia32_pminuw128",
+ "llvm.x86.sse41.pmovsxbd" => "__builtin_ia32_pmovsxbd128",
+ "llvm.x86.sse41.pmovsxbq" => "__builtin_ia32_pmovsxbq128",
+ "llvm.x86.sse41.pmovsxbw" => "__builtin_ia32_pmovsxbw128",
+ "llvm.x86.sse41.pmovsxdq" => "__builtin_ia32_pmovsxdq128",
+ "llvm.x86.sse41.pmovsxwd" => "__builtin_ia32_pmovsxwd128",
+ "llvm.x86.sse41.pmovsxwq" => "__builtin_ia32_pmovsxwq128",
+ "llvm.x86.sse41.pmovzxbd" => "__builtin_ia32_pmovzxbd128",
+ "llvm.x86.sse41.pmovzxbq" => "__builtin_ia32_pmovzxbq128",
+ "llvm.x86.sse41.pmovzxbw" => "__builtin_ia32_pmovzxbw128",
+ "llvm.x86.sse41.pmovzxdq" => "__builtin_ia32_pmovzxdq128",
+ "llvm.x86.sse41.pmovzxwd" => "__builtin_ia32_pmovzxwd128",
+ "llvm.x86.sse41.pmovzxwq" => "__builtin_ia32_pmovzxwq128",
+ "llvm.x86.sse41.pmuldq" => "__builtin_ia32_pmuldq128",
+ "llvm.x86.sse41.ptestc" => "__builtin_ia32_ptestc128",
+ "llvm.x86.sse41.ptestnzc" => "__builtin_ia32_ptestnzc128",
+ "llvm.x86.sse41.ptestz" => "__builtin_ia32_ptestz128",
+ "llvm.x86.sse41.round.pd" => "__builtin_ia32_roundpd",
+ "llvm.x86.sse41.round.ps" => "__builtin_ia32_roundps",
+ "llvm.x86.sse41.round.sd" => "__builtin_ia32_roundsd",
+ "llvm.x86.sse41.round.ss" => "__builtin_ia32_roundss",
+ "llvm.x86.sse42.crc32.32.16" => "__builtin_ia32_crc32hi",
+ "llvm.x86.sse42.crc32.32.32" => "__builtin_ia32_crc32si",
+ "llvm.x86.sse42.crc32.32.8" => "__builtin_ia32_crc32qi",
+ "llvm.x86.sse42.crc32.64.64" => "__builtin_ia32_crc32di",
+ "llvm.x86.sse42.pcmpestri128" => "__builtin_ia32_pcmpestri128",
+ "llvm.x86.sse42.pcmpestria128" => "__builtin_ia32_pcmpestria128",
+ "llvm.x86.sse42.pcmpestric128" => "__builtin_ia32_pcmpestric128",
+ "llvm.x86.sse42.pcmpestrio128" => "__builtin_ia32_pcmpestrio128",
+ "llvm.x86.sse42.pcmpestris128" => "__builtin_ia32_pcmpestris128",
+ "llvm.x86.sse42.pcmpestriz128" => "__builtin_ia32_pcmpestriz128",
+ "llvm.x86.sse42.pcmpestrm128" => "__builtin_ia32_pcmpestrm128",
+ "llvm.x86.sse42.pcmpistri128" => "__builtin_ia32_pcmpistri128",
+ "llvm.x86.sse42.pcmpistria128" => "__builtin_ia32_pcmpistria128",
+ "llvm.x86.sse42.pcmpistric128" => "__builtin_ia32_pcmpistric128",
+ "llvm.x86.sse42.pcmpistrio128" => "__builtin_ia32_pcmpistrio128",
+ "llvm.x86.sse42.pcmpistris128" => "__builtin_ia32_pcmpistris128",
+ "llvm.x86.sse42.pcmpistriz128" => "__builtin_ia32_pcmpistriz128",
+ "llvm.x86.sse42.pcmpistrm128" => "__builtin_ia32_pcmpistrm128",
+ "llvm.x86.sse4a.extrq" => "__builtin_ia32_extrq",
+ "llvm.x86.sse4a.extrqi" => "__builtin_ia32_extrqi",
+ "llvm.x86.sse4a.insertq" => "__builtin_ia32_insertq",
+ "llvm.x86.sse4a.insertqi" => "__builtin_ia32_insertqi",
+ "llvm.x86.sse4a.movnt.sd" => "__builtin_ia32_movntsd",
+ "llvm.x86.sse4a.movnt.ss" => "__builtin_ia32_movntss",
+ "llvm.x86.ssse3.pabs.b" => "__builtin_ia32_pabsb",
+ "llvm.x86.ssse3.pabs.b.128" => "__builtin_ia32_pabsb128",
+ "llvm.x86.ssse3.pabs.d" => "__builtin_ia32_pabsd",
+ "llvm.x86.ssse3.pabs.d.128" => "__builtin_ia32_pabsd128",
+ "llvm.x86.ssse3.pabs.w" => "__builtin_ia32_pabsw",
+ "llvm.x86.ssse3.pabs.w.128" => "__builtin_ia32_pabsw128",
+ "llvm.x86.ssse3.phadd.d" => "__builtin_ia32_phaddd",
+ "llvm.x86.ssse3.phadd.d.128" => "__builtin_ia32_phaddd128",
+ "llvm.x86.ssse3.phadd.sw" => "__builtin_ia32_phaddsw",
+ "llvm.x86.ssse3.phadd.sw.128" => "__builtin_ia32_phaddsw128",
+ "llvm.x86.ssse3.phadd.w" => "__builtin_ia32_phaddw",
+ "llvm.x86.ssse3.phadd.w.128" => "__builtin_ia32_phaddw128",
+ "llvm.x86.ssse3.phsub.d" => "__builtin_ia32_phsubd",
+ "llvm.x86.ssse3.phsub.d.128" => "__builtin_ia32_phsubd128",
+ "llvm.x86.ssse3.phsub.sw" => "__builtin_ia32_phsubsw",
+ "llvm.x86.ssse3.phsub.sw.128" => "__builtin_ia32_phsubsw128",
+ "llvm.x86.ssse3.phsub.w" => "__builtin_ia32_phsubw",
+ "llvm.x86.ssse3.phsub.w.128" => "__builtin_ia32_phsubw128",
+ "llvm.x86.ssse3.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw",
+ "llvm.x86.ssse3.pmadd.ub.sw.128" => "__builtin_ia32_pmaddubsw128",
+ "llvm.x86.ssse3.pmul.hr.sw" => "__builtin_ia32_pmulhrsw",
+ "llvm.x86.ssse3.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128",
+ "llvm.x86.ssse3.pshuf.b" => "__builtin_ia32_pshufb",
+ "llvm.x86.ssse3.pshuf.b.128" => "__builtin_ia32_pshufb128",
+ "llvm.x86.ssse3.psign.b" => "__builtin_ia32_psignb",
+ "llvm.x86.ssse3.psign.b.128" => "__builtin_ia32_psignb128",
+ "llvm.x86.ssse3.psign.d" => "__builtin_ia32_psignd",
+ "llvm.x86.ssse3.psign.d.128" => "__builtin_ia32_psignd128",
+ "llvm.x86.ssse3.psign.w" => "__builtin_ia32_psignw",
+ "llvm.x86.ssse3.psign.w.128" => "__builtin_ia32_psignw128",
+ "llvm.x86.sttilecfg" => "__builtin_ia32_tile_storeconfig",
+ "llvm.x86.stui" => "__builtin_ia32_stui",
+ "llvm.x86.subborrow.u32" => "__builtin_ia32_subborrow_u32",
+ "llvm.x86.subborrow.u64" => "__builtin_ia32_subborrow_u64",
+ "llvm.x86.tbm.bextri.u32" => "__builtin_ia32_bextri_u32",
+ "llvm.x86.tbm.bextri.u64" => "__builtin_ia32_bextri_u64",
+ "llvm.x86.tdpbf16ps" => "__builtin_ia32_tdpbf16ps",
+ "llvm.x86.tdpbf16ps.internal" => "__builtin_ia32_tdpbf16ps_internal",
+ "llvm.x86.tdpbssd" => "__builtin_ia32_tdpbssd",
+ "llvm.x86.tdpbssd.internal" => "__builtin_ia32_tdpbssd_internal",
+ "llvm.x86.tdpbsud" => "__builtin_ia32_tdpbsud",
+ "llvm.x86.tdpbsud.internal" => "__builtin_ia32_tdpbsud_internal",
+ "llvm.x86.tdpbusd" => "__builtin_ia32_tdpbusd",
+ "llvm.x86.tdpbusd.internal" => "__builtin_ia32_tdpbusd_internal",
+ "llvm.x86.tdpbuud" => "__builtin_ia32_tdpbuud",
+ "llvm.x86.tdpbuud.internal" => "__builtin_ia32_tdpbuud_internal",
+ "llvm.x86.testui" => "__builtin_ia32_testui",
+ "llvm.x86.tileloadd64" => "__builtin_ia32_tileloadd64",
+ "llvm.x86.tileloadd64.internal" => "__builtin_ia32_tileloadd64_internal",
+ "llvm.x86.tileloaddt164" => "__builtin_ia32_tileloaddt164",
+ "llvm.x86.tileloaddt164.internal" => "__builtin_ia32_tileloaddt164_internal",
+ "llvm.x86.tilerelease" => "__builtin_ia32_tilerelease",
+ "llvm.x86.tilestored64" => "__builtin_ia32_tilestored64",
+ "llvm.x86.tilestored64.internal" => "__builtin_ia32_tilestored64_internal",
+ "llvm.x86.tilezero" => "__builtin_ia32_tilezero",
+ "llvm.x86.tilezero.internal" => "__builtin_ia32_tilezero_internal",
+ "llvm.x86.tpause" => "__builtin_ia32_tpause",
+ "llvm.x86.umonitor" => "__builtin_ia32_umonitor",
+ "llvm.x86.umwait" => "__builtin_ia32_umwait",
+ "llvm.x86.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps",
+ "llvm.x86.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256",
+ "llvm.x86.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph",
+ "llvm.x86.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256",
+ "llvm.x86.vgf2p8affineinvqb.128" => "__builtin_ia32_vgf2p8affineinvqb_v16qi",
+ "llvm.x86.vgf2p8affineinvqb.256" => "__builtin_ia32_vgf2p8affineinvqb_v32qi",
+ "llvm.x86.vgf2p8affineinvqb.512" => "__builtin_ia32_vgf2p8affineinvqb_v64qi",
+ "llvm.x86.vgf2p8affineqb.128" => "__builtin_ia32_vgf2p8affineqb_v16qi",
+ "llvm.x86.vgf2p8affineqb.256" => "__builtin_ia32_vgf2p8affineqb_v32qi",
+ "llvm.x86.vgf2p8affineqb.512" => "__builtin_ia32_vgf2p8affineqb_v64qi",
+ "llvm.x86.vgf2p8mulb.128" => "__builtin_ia32_vgf2p8mulb_v16qi",
+ "llvm.x86.vgf2p8mulb.256" => "__builtin_ia32_vgf2p8mulb_v32qi",
+ "llvm.x86.vgf2p8mulb.512" => "__builtin_ia32_vgf2p8mulb_v64qi",
+ "llvm.x86.wbinvd" => "__builtin_ia32_wbinvd",
+ "llvm.x86.wbnoinvd" => "__builtin_ia32_wbnoinvd",
+ "llvm.x86.wrfsbase.32" => "__builtin_ia32_wrfsbase32",
+ "llvm.x86.wrfsbase.64" => "__builtin_ia32_wrfsbase64",
+ "llvm.x86.wrgsbase.32" => "__builtin_ia32_wrgsbase32",
+ "llvm.x86.wrgsbase.64" => "__builtin_ia32_wrgsbase64",
+ "llvm.x86.wrpkru" => "__builtin_ia32_wrpkru",
+ "llvm.x86.wrssd" => "__builtin_ia32_wrssd",
+ "llvm.x86.wrssq" => "__builtin_ia32_wrssq",
+ "llvm.x86.wrussd" => "__builtin_ia32_wrussd",
+ "llvm.x86.wrussq" => "__builtin_ia32_wrussq",
+ "llvm.x86.xabort" => "__builtin_ia32_xabort",
+ "llvm.x86.xbegin" => "__builtin_ia32_xbegin",
+ "llvm.x86.xend" => "__builtin_ia32_xend",
+ "llvm.x86.xop.vfrcz.pd" => "__builtin_ia32_vfrczpd",
+ "llvm.x86.xop.vfrcz.pd.256" => "__builtin_ia32_vfrczpd256",
+ "llvm.x86.xop.vfrcz.ps" => "__builtin_ia32_vfrczps",
+ "llvm.x86.xop.vfrcz.ps.256" => "__builtin_ia32_vfrczps256",
+ "llvm.x86.xop.vfrcz.sd" => "__builtin_ia32_vfrczsd",
+ "llvm.x86.xop.vfrcz.ss" => "__builtin_ia32_vfrczss",
+ "llvm.x86.xop.vpcmov" => "__builtin_ia32_vpcmov",
+ "llvm.x86.xop.vpcmov.256" => "__builtin_ia32_vpcmov_256",
+ "llvm.x86.xop.vpcomb" => "__builtin_ia32_vpcomb",
+ "llvm.x86.xop.vpcomd" => "__builtin_ia32_vpcomd",
+ "llvm.x86.xop.vpcomq" => "__builtin_ia32_vpcomq",
+ "llvm.x86.xop.vpcomub" => "__builtin_ia32_vpcomub",
+ "llvm.x86.xop.vpcomud" => "__builtin_ia32_vpcomud",
+ "llvm.x86.xop.vpcomuq" => "__builtin_ia32_vpcomuq",
+ "llvm.x86.xop.vpcomuw" => "__builtin_ia32_vpcomuw",
+ "llvm.x86.xop.vpcomw" => "__builtin_ia32_vpcomw",
+ "llvm.x86.xop.vpermil2pd" => "__builtin_ia32_vpermil2pd",
+ "llvm.x86.xop.vpermil2pd.256" => "__builtin_ia32_vpermil2pd256",
+ "llvm.x86.xop.vpermil2ps" => "__builtin_ia32_vpermil2ps",
+ "llvm.x86.xop.vpermil2ps.256" => "__builtin_ia32_vpermil2ps256",
+ "llvm.x86.xop.vphaddbd" => "__builtin_ia32_vphaddbd",
+ "llvm.x86.xop.vphaddbq" => "__builtin_ia32_vphaddbq",
+ "llvm.x86.xop.vphaddbw" => "__builtin_ia32_vphaddbw",
+ "llvm.x86.xop.vphadddq" => "__builtin_ia32_vphadddq",
+ "llvm.x86.xop.vphaddubd" => "__builtin_ia32_vphaddubd",
+ "llvm.x86.xop.vphaddubq" => "__builtin_ia32_vphaddubq",
+ "llvm.x86.xop.vphaddubw" => "__builtin_ia32_vphaddubw",
+ "llvm.x86.xop.vphaddudq" => "__builtin_ia32_vphaddudq",
+ "llvm.x86.xop.vphadduwd" => "__builtin_ia32_vphadduwd",
+ "llvm.x86.xop.vphadduwq" => "__builtin_ia32_vphadduwq",
+ "llvm.x86.xop.vphaddwd" => "__builtin_ia32_vphaddwd",
+ "llvm.x86.xop.vphaddwq" => "__builtin_ia32_vphaddwq",
+ "llvm.x86.xop.vphsubbw" => "__builtin_ia32_vphsubbw",
+ "llvm.x86.xop.vphsubdq" => "__builtin_ia32_vphsubdq",
+ "llvm.x86.xop.vphsubwd" => "__builtin_ia32_vphsubwd",
+ "llvm.x86.xop.vpmacsdd" => "__builtin_ia32_vpmacsdd",
+ "llvm.x86.xop.vpmacsdqh" => "__builtin_ia32_vpmacsdqh",
+ "llvm.x86.xop.vpmacsdql" => "__builtin_ia32_vpmacsdql",
+ "llvm.x86.xop.vpmacssdd" => "__builtin_ia32_vpmacssdd",
+ "llvm.x86.xop.vpmacssdqh" => "__builtin_ia32_vpmacssdqh",
+ "llvm.x86.xop.vpmacssdql" => "__builtin_ia32_vpmacssdql",
+ "llvm.x86.xop.vpmacsswd" => "__builtin_ia32_vpmacsswd",
+ "llvm.x86.xop.vpmacssww" => "__builtin_ia32_vpmacssww",
+ "llvm.x86.xop.vpmacswd" => "__builtin_ia32_vpmacswd",
+ "llvm.x86.xop.vpmacsww" => "__builtin_ia32_vpmacsww",
+ "llvm.x86.xop.vpmadcsswd" => "__builtin_ia32_vpmadcsswd",
+ "llvm.x86.xop.vpmadcswd" => "__builtin_ia32_vpmadcswd",
+ "llvm.x86.xop.vpperm" => "__builtin_ia32_vpperm",
+ "llvm.x86.xop.vprotb" => "__builtin_ia32_vprotb",
+ "llvm.x86.xop.vprotbi" => "__builtin_ia32_vprotbi",
+ "llvm.x86.xop.vprotd" => "__builtin_ia32_vprotd",
+ "llvm.x86.xop.vprotdi" => "__builtin_ia32_vprotdi",
+ "llvm.x86.xop.vprotq" => "__builtin_ia32_vprotq",
+ "llvm.x86.xop.vprotqi" => "__builtin_ia32_vprotqi",
+ "llvm.x86.xop.vprotw" => "__builtin_ia32_vprotw",
+ "llvm.x86.xop.vprotwi" => "__builtin_ia32_vprotwi",
+ "llvm.x86.xop.vpshab" => "__builtin_ia32_vpshab",
+ "llvm.x86.xop.vpshad" => "__builtin_ia32_vpshad",
+ "llvm.x86.xop.vpshaq" => "__builtin_ia32_vpshaq",
+ "llvm.x86.xop.vpshaw" => "__builtin_ia32_vpshaw",
+ "llvm.x86.xop.vpshlb" => "__builtin_ia32_vpshlb",
+ "llvm.x86.xop.vpshld" => "__builtin_ia32_vpshld",
+ "llvm.x86.xop.vpshlq" => "__builtin_ia32_vpshlq",
+ "llvm.x86.xop.vpshlw" => "__builtin_ia32_vpshlw",
+ "llvm.x86.xresldtrk" => "__builtin_ia32_xresldtrk",
+ "llvm.x86.xsusldtrk" => "__builtin_ia32_xsusldtrk",
+ "llvm.x86.xtest" => "__builtin_ia32_xtest",
+ // xcore
+ "llvm.xcore.bitrev" => "__builtin_bitrev",
+ "llvm.xcore.getid" => "__builtin_getid",
+ "llvm.xcore.getps" => "__builtin_getps",
+ "llvm.xcore.setps" => "__builtin_setps",
+ _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
new file mode 100644
index 000000000..1b089f08f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
@@ -0,0 +1,250 @@
+use std::borrow::Cow;
+
+use gccjit::{Function, FunctionPtrType, RValue, ToRValue};
+
+use crate::{context::CodegenCx, builder::Builder};
+
+pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str) -> Cow<'b, [RValue<'gcc>]> {
+ // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
+ // arguments here.
+ if gcc_func.get_param_count() != args.len() {
+ match &*func_name {
+ "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask"
+ // FIXME(antoyo): the following intrinsics has 4 (or 5) arguments according to the doc, but is defined with 2 (or 3) arguments in library/stdarch/crates/core_arch/src/x86/avx512f.rs.
+ | "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask"
+ | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
+ | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" | "__builtin_ia32_pmaxuq256_mask"
+ | "__builtin_ia32_pmaxuq128_mask"
+ | "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask"
+ | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask"
+ | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" | "__builtin_ia32_pminuq256_mask"
+ | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask"
+ => {
+ // TODO: refactor by separating those intrinsics outside of this branch.
+ let add_before_last_arg =
+ match &*func_name {
+ "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
+ | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask"
+ | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => true,
+ _ => false,
+ };
+ let new_first_arg_is_zero =
+ match &*func_name {
+ "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask"
+ | "__builtin_ia32_pminuq256_mask" | "__builtin_ia32_pminuq128_mask" => true,
+ _ => false
+ };
+ let arg3_index =
+ match &*func_name {
+ "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 1,
+ _ => 2,
+ };
+ let mut new_args = args.to_vec();
+ let arg3_type = gcc_func.get_param_type(arg3_index);
+ let first_arg =
+ if new_first_arg_is_zero {
+ let vector_type = arg3_type.dyncast_vector().expect("vector type");
+ let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+ let num_units = vector_type.get_num_units();
+ builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units])
+ }
+ else {
+ builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue()
+ };
+ if add_before_last_arg {
+ new_args.insert(new_args.len() - 1, first_arg);
+ }
+ else {
+ new_args.push(first_arg);
+ }
+ let arg4_index =
+ match &*func_name {
+ "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 2,
+ _ => 3,
+ };
+ let arg4_type = gcc_func.get_param_type(arg4_index);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ if add_before_last_arg {
+ new_args.insert(new_args.len() - 1, minus_one);
+ }
+ else {
+ new_args.push(minus_one);
+ }
+ args = new_args.into();
+ },
+ "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask"
+ | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask"
+ | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => {
+ let mut new_args = args.to_vec();
+ let arg5_type = gcc_func.get_param_type(4);
+ let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
+ new_args.push(minus_one);
+ args = new_args.into();
+ },
+ "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
+ let mut new_args = args.to_vec();
+
+ let mut last_arg = None;
+ if args.len() == 4 {
+ last_arg = new_args.pop();
+ }
+
+ let arg4_type = gcc_func.get_param_type(3);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ new_args.push(minus_one);
+
+ if args.len() == 3 {
+ // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to
+ // the same GCC intrinsic, but the former has 3 parameters and the
+ // latter has 4 so it doesn't require this additional argument.
+ let arg5_type = gcc_func.get_param_type(4);
+ new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4));
+ }
+
+ if let Some(last_arg) = last_arg {
+ new_args.push(last_arg);
+ }
+
+ args = new_args.into();
+ },
+ "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
+ | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
+ | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
+ | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" => {
+ let mut new_args = args.to_vec();
+ let last_arg = new_args.pop().expect("last arg");
+ let arg3_type = gcc_func.get_param_type(2);
+ let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue();
+ new_args.push(undefined);
+ let arg4_type = gcc_func.get_param_type(3);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ new_args.push(minus_one);
+ new_args.push(last_arg);
+ args = new_args.into();
+ },
+ "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => {
+ let mut new_args = args.to_vec();
+ let last_arg = new_args.pop().expect("last arg");
+ let arg4_type = gcc_func.get_param_type(3);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ new_args.push(minus_one);
+ new_args.push(last_arg);
+ args = new_args.into();
+ },
+ _ => (),
+ }
+ }
+
+ args
+}
+
+pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
+ // NOTE: these intrinsics have missing parameters before the last one, so ignore the
+ // last argument type check.
+ // FIXME(antoyo): find a way to refactor in order to avoid this hack.
+ match func_name {
+ "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
+ | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask"
+ | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
+ | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
+ | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
+ | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask"
+ | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => {
+ if index == args_len - 1 {
+ return true;
+ }
+ },
+ "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
+ // Since there are two LLVM intrinsics that map to each of these GCC builtins and only
+ // one of them has a missing parameter before the last one, we check the number of
+ // arguments to distinguish those cases.
+ if args_len == 4 && index == args_len - 1 {
+ return true;
+ }
+ },
+ _ => (),
+ }
+
+ false
+}
+
+#[cfg(not(feature="master"))]
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+ match name {
+ "llvm.x86.xgetbv" => {
+ let gcc_name = "__builtin_trap";
+ let func = cx.context.get_builtin_function(gcc_name);
+ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+ return func;
+ },
+ _ => unimplemented!("unsupported LLVM intrinsic {}", name),
+ }
+}
+
+#[cfg(feature="master")]
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+ let gcc_name = match name {
+ "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv",
+ // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+ "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd",
+ "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
+ "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask",
+ "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask",
+ "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask",
+ "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask",
+ "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask",
+ "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512_mask",
+ "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512_mask",
+ "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask",
+ "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask",
+ "llvm.fma.v16f32" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.fma.v8f64" => "__builtin_ia32_vfmaddpd512_mask",
+ "llvm.x86.avx512.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+ "llvm.x86.avx512.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+ "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask",
+ "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask",
+ "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask",
+ "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask",
+ "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask",
+ "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask",
+ "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512_mask",
+ "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512_mask",
+ "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512_mask",
+ "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512_mask",
+ "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512_mask",
+ "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512_mask",
+ "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512_mask",
+ "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512_mask",
+ "llvm.x86.avx512.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.x86.avx512.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+
+ // The above doc points to unknown builtins for the following, so override them:
+ "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si",
+ "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gathersiv8si",
+ "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gathersiv4sf",
+ "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gathersiv8sf",
+ "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gathersiv2di",
+ "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gathersiv4di",
+ "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gathersiv2df",
+ "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gathersiv4df",
+ "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherdiv4si",
+ "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherdiv4si256",
+ "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherdiv4sf",
+ "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherdiv4sf256",
+ "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherdiv2di",
+ "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherdiv4di",
+ "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherdiv2df",
+ "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherdiv4df",
+ "" => "",
+ // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py
+ _ => include!("archs.rs"),
+ };
+
+ let func = cx.context.get_target_builtin_function(gcc_name);
+ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+ func
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
new file mode 100644
index 000000000..5fbdedac0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -0,0 +1,1134 @@
+pub mod llvm;
+mod simd;
+
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_span::{Span, Symbol, symbol::kw, sym};
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::spec::PanicStrategy;
+
+use crate::abi::GccType;
+use crate::builder::Builder;
+use crate::common::{SignType, TypeReflection};
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::intrinsic::simd::generic_simd_intrinsic;
+
+fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
+ let gcc_name = match name {
+ sym::sqrtf32 => "sqrtf",
+ sym::sqrtf64 => "sqrt",
+ sym::powif32 => "__builtin_powif",
+ sym::powif64 => "__builtin_powi",
+ sym::sinf32 => "sinf",
+ sym::sinf64 => "sin",
+ sym::cosf32 => "cosf",
+ sym::cosf64 => "cos",
+ sym::powf32 => "powf",
+ sym::powf64 => "pow",
+ sym::expf32 => "expf",
+ sym::expf64 => "exp",
+ sym::exp2f32 => "exp2f",
+ sym::exp2f64 => "exp2",
+ sym::logf32 => "logf",
+ sym::logf64 => "log",
+ sym::log10f32 => "log10f",
+ sym::log10f64 => "log10",
+ sym::log2f32 => "log2f",
+ sym::log2f64 => "log2",
+ sym::fmaf32 => "fmaf",
+ sym::fmaf64 => "fma",
+ sym::fabsf32 => "fabsf",
+ sym::fabsf64 => "fabs",
+ sym::minnumf32 => "fminf",
+ sym::minnumf64 => "fmin",
+ sym::maxnumf32 => "fmaxf",
+ sym::maxnumf64 => "fmax",
+ sym::copysignf32 => "copysignf",
+ sym::copysignf64 => "copysign",
+ sym::floorf32 => "floorf",
+ sym::floorf64 => "floor",
+ sym::ceilf32 => "ceilf",
+ sym::ceilf64 => "ceil",
+ sym::truncf32 => "truncf",
+ sym::truncf64 => "trunc",
+ sym::rintf32 => "rintf",
+ sym::rintf64 => "rint",
+ sym::nearbyintf32 => "nearbyintf",
+ sym::nearbyintf64 => "nearbyint",
+ sym::roundf32 => "roundf",
+ sym::roundf64 => "round",
+ sym::abort => "abort",
+ _ => return None,
+ };
+ Some(cx.context.get_builtin_function(&gcc_name))
+}
+
+impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
+ let tcx = self.tcx;
+ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+ let (def_id, substs) = match *callee_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+ _ => bug!("expected fn item type, found {}", callee_ty),
+ };
+
+ let sig = callee_ty.fn_sig(tcx);
+ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = tcx.item_name(def_id);
+ let name_str = name.as_str();
+
+ let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let simple = get_simple_intrinsic(self, name);
+ let llval =
+ match name {
+ _ if simple.is_some() => {
+ // FIXME(antoyo): remove this cast when the API supports function.
+ let func = unsafe { std::mem::transmute(simple.expect("simple")) };
+ self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
+ },
+ sym::likely => {
+ self.expect(args[0].immediate(), true)
+ }
+ sym::unlikely => {
+ self.expect(args[0].immediate(), false)
+ }
+ kw::Try => {
+ try_intrinsic(
+ self,
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ llresult,
+ );
+ return;
+ }
+ sym::breakpoint => {
+ unimplemented!();
+ }
+ sym::va_copy => {
+ unimplemented!();
+ }
+ sym::va_arg => {
+ unimplemented!();
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ let tp_ty = substs.type_at(0);
+ let mut ptr = args[0].immediate();
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
+ }
+ let load = self.volatile_load(ptr.get_type(), ptr);
+ // TODO(antoyo): set alignment.
+ self.to_immediate(load, self.layout_of(tp_ty))
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.volatile_store(self, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.unaligned_volatile_store(self, dst);
+ return;
+ }
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => {
+ unimplemented!();
+ }
+ sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctpop
+ | sym::bswap
+ | sym::bitreverse
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::saturating_add
+ | sym::saturating_sub => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, self) {
+ Some((width, signed)) => match name {
+ sym::ctlz | sym::cttz => {
+ let func = self.current_func.borrow().expect("func");
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+
+ let arg = args[0].immediate();
+ let result = func.new_local(None, arg.get_type(), "zeros");
+ let zero = self.cx.gcc_zero(arg.get_type());
+ let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
+ self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+ let zero_result = self.cx.gcc_uint(arg.get_type(), width);
+ then_block.add_assignment(None, result, zero_result);
+ then_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place
+ // count_leading_zeroes() does not expect, the current block
+ // in the state need to be updated.
+ self.switch_to_block(else_block);
+
+ let zeros =
+ match name {
+ sym::ctlz => self.count_leading_zeroes(width, arg),
+ sym::cttz => self.count_trailing_zeroes(width, arg),
+ _ => unreachable!(),
+ };
+ self.llbb().add_assignment(None, result, zeros);
+ self.llbb().end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ sym::ctlz_nonzero => {
+ self.count_leading_zeroes(width, args[0].immediate())
+ },
+ sym::cttz_nonzero => {
+ self.count_trailing_zeroes(width, args[0].immediate())
+ }
+ sym::ctpop => self.pop_count(args[0].immediate()),
+ sym::bswap => {
+ if width == 8 {
+ args[0].immediate() // byte swap a u8/i8 is just a no-op
+ }
+ else {
+ self.gcc_bswap(args[0].immediate(), width)
+ }
+ },
+ sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+ sym::rotate_left | sym::rotate_right => {
+ // TODO(antoyo): implement using algorithm from:
+ // https://blog.regehr.org/archives/1063
+ // for other platforms.
+ let is_left = name == sym::rotate_left;
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ if is_left {
+ self.rotate_left(val, raw_shift, width)
+ }
+ else {
+ self.rotate_right(val, raw_shift, width)
+ }
+ },
+ sym::saturating_add => {
+ self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
+ },
+ sym::saturating_sub => {
+ self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
+ },
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ tcx.sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::raw_eq => {
+ use rustc_target::abi::Abi::*;
+ let tp_ty = substs.type_at(0);
+ let layout = self.layout_of(tp_ty).layout;
+ let _use_integer_compare = match layout.abi() {
+ Scalar(_) | ScalarPair(_, _) => true,
+ Uninhabited | Vector { .. } => false,
+ Aggregate { .. } => {
+ // For rusty ABIs, small aggregates are actually passed
+ // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+ // so we re-use that same threshold here.
+ layout.size() <= self.data_layout().pointer_size * 2
+ }
+ };
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if layout.size().bytes() == 0 {
+ self.const_bool(true)
+ }
+ /*else if use_integer_compare {
+ let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+ let ptr_ty = self.type_ptr_to(integer_ty);
+ let a_ptr = self.bitcast(a, ptr_ty);
+ let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+ let b_ptr = self.bitcast(b, ptr_ty);
+ let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+ self.icmp(IntPredicate::IntEQ, a_val, b_val)
+ }*/
+ else {
+ let void_ptr_type = self.context.new_type::<*const ()>();
+ let a_ptr = self.bitcast(a, void_ptr_type);
+ let b_ptr = self.bitcast(b, void_ptr_type);
+ let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type);
+ let builtin = self.context.get_builtin_function("memcmp");
+ let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+ self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+ }
+ }
+
+ sym::black_box => {
+ args[0].val.store(self, result);
+
+ let block = self.llbb();
+ let extended_asm = block.add_extended_asm(None, "");
+ extended_asm.add_input_operand(None, "r", result.llval);
+ extended_asm.add_clobber("memory");
+ extended_asm.set_volatile_flag(true);
+
+ // We have copied the value to `result` already.
+ return;
+ }
+
+ _ if name_str.starts_with("simd_") => {
+ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ Ok(llval) => llval,
+ Err(()) => return,
+ }
+ }
+
+ _ => bug!("unknown intrinsic '{}'", name),
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
+ let ptr = self.pointercast(result.llval, ptr_llty);
+ self.store(llval, ptr, result.align);
+ }
+ else {
+ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+ .val
+ .store(self, result);
+ }
+ }
+ }
+
+ fn abort(&mut self) {
+ let func = self.context.get_builtin_function("abort");
+ let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+ self.call(self.type_void(), func, &[], None);
+ }
+
+ fn assume(&mut self, value: Self::Value) {
+ // TODO(antoyo): switch to assume when it exists.
+ // Or use something like this:
+ // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
+ self.expect(value, true);
+ }
+
+ fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
+ // TODO(antoyo)
+ cond
+ }
+
+ fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+
+ fn type_checked_load(
+ &mut self,
+ _llvtable: Self::Value,
+ _vtable_byte_offset: u64,
+ _typeid: Self::Value,
+ ) -> Self::Value {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+
+ fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+}
+
+impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
+ arg_abi.store_fn_arg(self, idx, dst)
+ }
+
+ fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ arg_abi.store(self, val, dst)
+ }
+
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ arg_abi.memory_ty(self)
+ }
+}
+
+pub trait ArgAbiExt<'gcc, 'tcx> {
+ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
+ fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
+}
+
+impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ /// Gets the LLVM type for a place of the original Rust type of
+ /// this argument/return, i.e., the result of `type_of::type_of`.
+ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ self.layout.gcc_type(cx, true)
+ }
+
+ /// Stores a direct/indirect value described by this ArgAbi into a
+ /// place for the original Rust type of this argument/return.
+ /// Can be used for both storing formal arguments into Rust variables
+ /// or results of call/invoke instructions into their destinations.
+ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ if self.is_ignore() {
+ return;
+ }
+ if self.is_sized_indirect() {
+ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+ }
+ else if self.is_unsized_indirect() {
+ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+ }
+ else if let PassMode::Cast(cast) = self.mode {
+ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+ let can_store_through_cast_ptr = false;
+ if can_store_through_cast_ptr {
+ let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
+ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+ bx.store(val, cast_dst, self.layout.align.abi);
+ }
+ else {
+ // The actual return type is a struct, but the ABI
+ // adaptation code has cast it into some scalar type. The
+ // code that follows is the only reliable way I have
+ // found to do a transform like i64 -> {i32,i32}.
+ // Basically we dump the data onto the stack then memcpy it.
+ //
+ // Other approaches I tried:
+ // - Casting rust ret pointer to the foreign type and using Store
+ // is (a) unsafe if size of foreign type > size of rust type and
+ // (b) runs afoul of strict aliasing rules, yielding invalid
+ // assembly under -O (specifically, the store gets removed).
+ // - Truncating foreign type to correct integral type and then
+ // bitcasting to the struct type yields invalid cast errors.
+
+ // We instead thus allocate some scratch space...
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+
+ // ... where we first store the value...
+ bx.store(val, llscratch, scratch_align);
+
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(self.layout.size.bytes()),
+ MemFlags::empty(),
+ );
+
+ bx.lifetime_end(llscratch, scratch_size);
+ }
+ }
+ else {
+ OperandValue::Immediate(val).store(bx, dst);
+ }
+ }
+
+ fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ let mut next = || {
+ let val = bx.current_func().get_param(*idx as i32);
+ *idx += 1;
+ val.to_rvalue()
+ };
+ match self.mode {
+ PassMode::Ignore => {},
+ PassMode::Pair(..) => {
+ OperandValue::Pair(next(), next()).store(bx, dst);
+ },
+ PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+ },
+ PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
+ let next_arg = next();
+ self.store(bx, next_arg, dst);
+ },
+ }
+ }
+}
+
+fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => Some((
+ match t {
+ rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
+ rustc_middle::ty::IntTy::I8 => 8,
+ rustc_middle::ty::IntTy::I16 => 16,
+ rustc_middle::ty::IntTy::I32 => 32,
+ rustc_middle::ty::IntTy::I64 => 64,
+ rustc_middle::ty::IntTy::I128 => 128,
+ },
+ true,
+ )),
+ ty::Uint(t) => Some((
+ match t {
+ rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
+ rustc_middle::ty::UintTy::U8 => 8,
+ rustc_middle::ty::UintTy::U16 => 16,
+ rustc_middle::ty::UintTy::U32 => 32,
+ rustc_middle::ty::UintTy::U64 => 64,
+ rustc_middle::ty::UintTy::U128 => 128,
+ },
+ false,
+ )),
+ _ => None,
+ }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
+ let result_type = value.get_type();
+ let typ = result_type.to_unsigned(self.cx);
+
+ let value =
+ if result_type.is_signed(self.cx) {
+ self.gcc_int_cast(value, typ)
+ }
+ else {
+ value
+ };
+
+ let context = &self.cx.context;
+ let result =
+ match width {
+ 8 => {
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
+ let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
+ let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
+ let step3 = self.or(left, right);
+
+ step3
+ },
+ 16 => {
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
+ let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
+ let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
+ let step3 = self.or(left, right);
+
+ // Fourth step.
+ let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
+ let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
+ let step4 = self.or(left, right);
+
+ step4
+ },
+ 32 => {
+ // TODO(antoyo): Refactor with other implementations.
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
+ let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
+ let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
+ let step3 = self.or(left, right);
+
+ // Fourth step.
+ let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
+ let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
+ let step4 = self.or(left, right);
+
+ // Fifth step.
+ let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
+ let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
+ let step5 = self.or(left, right);
+
+ step5
+ },
+ 64 => {
+ // First step.
+ let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
+ let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
+ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
+ let left = self.xor(step2, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
+ let left = self.or(temp, left);
+ let step3 = self.xor(left, step2);
+
+ // Fourth step.
+ let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
+ let left = self.xor(step3, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
+ let left = self.or(temp, left);
+ let step4 = self.xor(left, step3);
+
+ // Fifth step.
+ let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
+ let left = self.xor(step4, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
+ let left = self.or(temp, left);
+ let step5 = self.xor(left, step4);
+
+ step5
+ },
+ 128 => {
+ // TODO(antoyo): find a more efficient implementation?
+ let sixty_four = self.gcc_int(typ, 64);
+ let right_shift = self.gcc_lshr(value, sixty_four);
+ let high = self.gcc_int_cast(right_shift, self.u64_type);
+ let low = self.gcc_int_cast(value, self.u64_type);
+
+ let reversed_high = self.bit_reverse(64, high);
+ let reversed_low = self.bit_reverse(64, low);
+
+ let new_low = self.gcc_int_cast(reversed_high, typ);
+ let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
+
+ self.gcc_or(new_low, new_high)
+ },
+ _ => {
+ panic!("cannot bit reverse with width = {}", width);
+ },
+ };
+
+ self.gcc_int_cast(result, result_type)
+ }
+
+ fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use width?
+ let arg_type = arg.get_type();
+ let count_leading_zeroes =
+ // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+ // instead of using is_uint().
+ if arg_type.is_uint(&self.cx) {
+ "__builtin_clz"
+ }
+ else if arg_type.is_ulong(&self.cx) {
+ "__builtin_clzl"
+ }
+ else if arg_type.is_ulonglong(&self.cx) {
+ "__builtin_clzll"
+ }
+ else if width == 128 {
+ // Algorithm from: https://stackoverflow.com/a/28433850/389119
+ let array_type = self.context.new_array_type(None, arg_type, 3);
+ let result = self.current_func()
+ .new_local(None, array_type, "count_loading_zeroes_results");
+
+ let sixty_four = self.const_uint(arg_type, 64);
+ let shift = self.lshr(arg, sixty_four);
+ let high = self.gcc_int_cast(shift, self.u64_type);
+ let low = self.gcc_int_cast(arg, self.u64_type);
+
+ let zero = self.context.new_rvalue_zero(self.usize_type);
+ let one = self.context.new_rvalue_one(self.usize_type);
+ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+ let clzll = self.context.get_builtin_function("__builtin_clzll");
+
+ let first_elem = self.context.new_array_access(None, result, zero);
+ let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
+ self.llbb()
+ .add_assignment(None, first_elem, first_value);
+
+ let second_elem = self.context.new_array_access(None, result, one);
+ let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
+ let second_value = self.add(cast, sixty_four);
+ self.llbb()
+ .add_assignment(None, second_elem, second_value);
+
+ let third_elem = self.context.new_array_access(None, result, two);
+ let third_value = self.const_uint(arg_type, 128);
+ self.llbb()
+ .add_assignment(None, third_elem, third_value);
+
+ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+ let not_low_and_not_high = not_low & not_high;
+ let index = not_high + not_low_and_not_high;
+ // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
+ // gcc.
+ // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
+ // compilation stage.
+ let index = self.context.new_cast(None, index, self.i32_type);
+
+ let res = self.context.new_array_access(None, result, index);
+
+ return self.gcc_int_cast(res.to_rvalue(), arg_type);
+ }
+ else {
+ let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
+ let arg = self.context.new_cast(None, arg, self.ulonglong_type);
+ let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
+ let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
+ let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
+ return self.context.new_cast(None, res, arg_type);
+ };
+ let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
+ let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
+ self.context.new_cast(None, res, arg_type)
+ }
+
+ fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ let result_type = arg.get_type();
+ let arg =
+ if result_type.is_signed(self.cx) {
+ let new_type = result_type.to_unsigned(self.cx);
+ self.gcc_int_cast(arg, new_type)
+ }
+ else {
+ arg
+ };
+ let arg_type = arg.get_type();
+ let (count_trailing_zeroes, expected_type) =
+ // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+ // instead of using is_uint().
+ if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
+ // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
+ ("__builtin_ctz", self.cx.uint_type)
+ }
+ else if arg_type.is_ulong(&self.cx) {
+ ("__builtin_ctzl", self.cx.ulong_type)
+ }
+ else if arg_type.is_ulonglong(&self.cx) {
+ ("__builtin_ctzll", self.cx.ulonglong_type)
+ }
+ else if arg_type.is_u128(&self.cx) {
+ // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
+ let array_type = self.context.new_array_type(None, arg_type, 3);
+ let result = self.current_func()
+ .new_local(None, array_type, "count_loading_zeroes_results");
+
+ let sixty_four = self.gcc_int(arg_type, 64);
+ let shift = self.gcc_lshr(arg, sixty_four);
+ let high = self.gcc_int_cast(shift, self.u64_type);
+ let low = self.gcc_int_cast(arg, self.u64_type);
+
+ let zero = self.context.new_rvalue_zero(self.usize_type);
+ let one = self.context.new_rvalue_one(self.usize_type);
+ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+ let ctzll = self.context.get_builtin_function("__builtin_ctzll");
+
+ let first_elem = self.context.new_array_access(None, result, zero);
+ let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
+ self.llbb()
+ .add_assignment(None, first_elem, first_value);
+
+ let second_elem = self.context.new_array_access(None, result, one);
+ let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
+ self.llbb()
+ .add_assignment(None, second_elem, second_value);
+
+ let third_elem = self.context.new_array_access(None, result, two);
+ let third_value = self.gcc_int(arg_type, 128);
+ self.llbb()
+ .add_assignment(None, third_elem, third_value);
+
+ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+ let not_low_and_not_high = not_low & not_high;
+ let index = not_low + not_low_and_not_high;
+ // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
+ // gcc.
+ // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
+ // compilation stage.
+ let index = self.context.new_cast(None, index, self.i32_type);
+
+ let res = self.context.new_array_access(None, result, index);
+
+ return self.gcc_int_cast(res.to_rvalue(), result_type);
+ }
+ else {
+ let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
+ let arg_size = arg_type.get_size();
+ let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
+ let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
+ let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
+ let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
+ let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
+ let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
+ let diff = diff * self.context.new_cast(None, cond, self.int_type);
+ let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
+ return self.context.new_cast(None, res, result_type);
+ };
+ let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
+ let arg =
+ if arg_type != expected_type {
+ self.context.new_cast(None, arg, expected_type)
+ }
+ else {
+ arg
+ };
+ let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
+ self.context.new_cast(None, res, result_type)
+ }
+
+ fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use the optimized version with fewer operations.
+ let result_type = value.get_type();
+ let value_type = result_type.to_unsigned(self.cx);
+
+ let value =
+ if result_type.is_signed(self.cx) {
+ self.gcc_int_cast(value, value_type)
+ }
+ else {
+ value
+ };
+
+ if value_type.is_u128(&self.cx) {
+ // TODO(antoyo): implement in the normal algorithm below to have a more efficient
+ // implementation (that does not require a call to __popcountdi2).
+ let popcount = self.context.get_builtin_function("__builtin_popcountll");
+ let sixty_four = self.gcc_int(value_type, 64);
+ let right_shift = self.gcc_lshr(value, sixty_four);
+ let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
+ let high = self.context.new_call(None, popcount, &[high]);
+ let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
+ let low = self.context.new_call(None, popcount, &[low]);
+ let res = high + low;
+ return self.gcc_int_cast(res, result_type);
+ }
+
+ // First step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
+ let right = shifted & mask;
+ let value = left + right;
+
+ // Second step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
+ let right = shifted & mask;
+ let value = left + right;
+
+ // Third step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u8(&self.cx) {
+ return self.context.new_cast(None, value, result_type);
+ }
+
+ // Fourth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u16(&self.cx) {
+ return self.context.new_cast(None, value, result_type);
+ }
+
+ // Fifth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u32(&self.cx) {
+ return self.context.new_cast(None, value, result_type);
+ }
+
+ // Sixth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
+ let right = shifted & mask;
+ let value = left + right;
+
+ self.context.new_cast(None, value, result_type)
+ }
+
+ // Algorithm from: https://blog.regehr.org/archives/1063
+ fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let max = self.const_uint(shift.get_type(), width);
+ let shift = self.urem(shift, max);
+ let lhs = self.shl(value, shift);
+ let result_neg = self.neg(shift);
+ let result_and =
+ self.and(
+ result_neg,
+ self.const_uint(shift.get_type(), width - 1),
+ );
+ let rhs = self.lshr(value, result_and);
+ self.or(lhs, rhs)
+ }
+
+ // Algorithm from: https://blog.regehr.org/archives/1063
+ fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let max = self.const_uint(shift.get_type(), width);
+ let shift = self.urem(shift, max);
+ let lhs = self.lshr(value, shift);
+ let result_neg = self.neg(shift);
+ let result_and =
+ self.and(
+ result_neg,
+ self.const_uint(shift.get_type(), width - 1),
+ );
+ let rhs = self.shl(value, result_and);
+ self.or(lhs, rhs)
+ }
+
+ fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+ let result_type = lhs.get_type();
+ if signed {
+ // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
+ let func = self.current_func.borrow().expect("func");
+ let res = func.new_local(None, result_type, "saturating_sum");
+ let supports_native_type = self.is_native_int_type(result_type);
+ let overflow =
+ if supports_native_type {
+ let func_name =
+ match width {
+ 8 => "__builtin_add_overflow",
+ 16 => "__builtin_add_overflow",
+ 32 => "__builtin_sadd_overflow",
+ 64 => "__builtin_saddll_overflow",
+ 128 => "__builtin_add_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+ }
+ else {
+ let func_name =
+ match width {
+ 128 => "__rust_i128_addo",
+ _ => unreachable!(),
+ };
+ let param_a = self.context.new_parameter(None, result_type, "a");
+ let param_b = self.context.new_parameter(None, result_type, "b");
+ let result_field = self.context.new_field(None, result_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ self.llbb().add_assignment(None, res, int_result);
+ overflow
+ };
+
+ let then_block = func.new_block("then");
+ let after_block = func.new_block("after");
+
+ // Return `result_type`'s maximum or minimum value on overflow
+ // NOTE: convert the type to unsigned to have an unsigned shift.
+ let unsigned_type = result_type.to_unsigned(&self.cx);
+ let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+ let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+ let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+ then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
+ then_block.end_with_jump(None, after_block);
+
+ self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
+
+ res.to_rvalue()
+ }
+ else {
+ // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
+ let res = self.gcc_add(lhs, rhs);
+ let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs);
+ let value = self.gcc_neg(self.gcc_int_cast(cond, result_type));
+ self.gcc_or(res, value)
+ }
+ }
+
+ // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
+ fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+ let result_type = lhs.get_type();
+ if signed {
+ // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
+ let func = self.current_func.borrow().expect("func");
+ let res = func.new_local(None, result_type, "saturating_diff");
+ let supports_native_type = self.is_native_int_type(result_type);
+ let overflow =
+ if supports_native_type {
+ let func_name =
+ match width {
+ 8 => "__builtin_sub_overflow",
+ 16 => "__builtin_sub_overflow",
+ 32 => "__builtin_ssub_overflow",
+ 64 => "__builtin_ssubll_overflow",
+ 128 => "__builtin_sub_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+ }
+ else {
+ let func_name =
+ match width {
+ 128 => "__rust_i128_subo",
+ _ => unreachable!(),
+ };
+ let param_a = self.context.new_parameter(None, result_type, "a");
+ let param_b = self.context.new_parameter(None, result_type, "b");
+ let result_field = self.context.new_field(None, result_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ self.llbb().add_assignment(None, res, int_result);
+ overflow
+ };
+
+ let then_block = func.new_block("then");
+ let after_block = func.new_block("after");
+
+ // Return `result_type`'s maximum or minimum value on overflow
+ // NOTE: convert the type to unsigned to have an unsigned shift.
+ let unsigned_type = result_type.to_unsigned(&self.cx);
+ let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+ let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+ let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+ then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
+ then_block.end_with_jump(None, after_block);
+
+ self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
+
+ res.to_rvalue()
+ }
+ else {
+ let res = self.gcc_sub(lhs, rhs);
+ let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs);
+ let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type));
+ self.gcc_and(res, value)
+ }
+ }
+}
+
+fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+ // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too
+ if bx.sess().panic_strategy() == PanicStrategy::Abort || true {
+ // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done.
+ bx.call(bx.type_void(), try_func, &[data], None);
+ // Return 0 unconditionally from the intrinsic call;
+ // we can never unwind.
+ let ret_align = bx.tcx.data_layout.i32_align.abi;
+ bx.store(bx.const_i32(0), dest, ret_align);
+ }
+ else if wants_msvc_seh(bx.sess()) {
+ unimplemented!();
+ }
+ else {
+ unimplemented!();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
new file mode 100644
index 000000000..2401f3350
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -0,0 +1,751 @@
+use std::cmp::Ordering;
+
+use gccjit::{BinaryOp, RValue, Type, ToRValue};
+use rustc_codegen_ssa::base::compare_simd_types;
+use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods};
+use rustc_hir as hir;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, Symbol, sym};
+use rustc_target::abi::Align;
+
+use crate::builder::Builder;
+use crate::intrinsic;
+
+pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
+ // macros for error handling:
+ #[allow(unused_macro_rules)]
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ macro_rules! require {
+ ($cond: expr, $($fmt: tt)*) => {
+ if !$cond {
+ return_error!($($fmt)*);
+ }
+ };
+ }
+
+ macro_rules! require_simd {
+ ($ty: expr, $position: expr) => {
+ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ };
+ }
+
+ let tcx = bx.tcx();
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+ let arg_tys = sig.inputs();
+
+ if name == sym::simd_select_bitmask {
+ require_simd!(arg_tys[1], "argument");
+ let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+
+ let expected_int_bits = (len.max(8) - 1).next_power_of_two();
+ let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
+
+ let mask_ty = arg_tys[0];
+ let mut mask = match mask_ty.kind() {
+ ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) =>
+ {
+ let place = PlaceRef::alloca(bx, args[0].layout);
+ args[0].val.store(bx, place);
+ let int_ty = bx.type_ix(expected_bytes * 8);
+ let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
+ bx.load(int_ty, ptr, Align::ONE)
+ }
+ _ => return_error!(
+ "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
+ mask_ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ };
+
+ let arg1 = args[1].immediate();
+ let arg1_type = arg1.get_type();
+ let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type");
+ let arg1_element_type = arg1_vector_type.get_element_type();
+
+ let mut elements = vec![];
+ let one = bx.context.new_rvalue_one(mask.get_type());
+ for _ in 0..len {
+ let element = bx.context.new_cast(None, mask & one, arg1_element_type);
+ elements.push(element);
+ mask = mask >> one;
+ }
+ let vector_mask = bx.context.new_rvalue_from_vector(None, arg1_type, &elements);
+
+ return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate()));
+ }
+
+ // every intrinsic below takes a SIMD vector as its first argument
+ require_simd!(arg_tys[0], "input");
+ let in_ty = arg_tys[0];
+
+ let comparison = match name {
+ sym::simd_eq => Some(hir::BinOpKind::Eq),
+ sym::simd_ne => Some(hir::BinOpKind::Ne),
+ sym::simd_lt => Some(hir::BinOpKind::Lt),
+ sym::simd_le => Some(hir::BinOpKind::Le),
+ sym::simd_gt => Some(hir::BinOpKind::Gt),
+ sym::simd_ge => Some(hir::BinOpKind::Ge),
+ _ => None,
+ };
+
+ let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+ if let Some(cmp_op) = comparison {
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ require!(
+ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+ "expected return type with integer elements, found `{}` with non-integer `{}`",
+ ret_ty,
+ out_ty
+ );
+
+ return Ok(compare_simd_types(
+ bx,
+ args[0].immediate(),
+ args[1].immediate(),
+ in_elem,
+ llret_ty,
+ cmp_op,
+ ));
+ }
+
+ if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
+ let n: u64 =
+ if stripped.is_empty() {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ match args[2].layout.ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ }
+ _ => return_error!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ args[2].layout.ty
+ ),
+ }
+ }
+ else {
+ stripped.parse().unwrap_or_else(|_| {
+ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+ })
+ };
+
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ "expected return type of length {}, found `{}` with length {}",
+ n,
+ ret_ty,
+ out_len
+ );
+ require!(
+ in_elem == out_ty,
+ "expected return element type `{}` (element of input `{}`), \
+ found `{}` with element type `{}`",
+ in_elem,
+ in_ty,
+ ret_ty,
+ out_ty
+ );
+
+ let vector = args[2].immediate();
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ vector,
+ ));
+ }
+
+ #[cfg(feature="master")]
+ if name == sym::simd_insert {
+ require!(
+ in_elem == arg_tys[2],
+ "expected inserted type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ arg_tys[2]
+ );
+ let vector = args[0].immediate();
+ let index = args[1].immediate();
+ let value = args[2].immediate();
+ // TODO(antoyo): use a recursive unqualified() here.
+ let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type");
+ let element_type = vector_type.get_element_type();
+ // NOTE: we cannot cast to an array and assign to its element here because the value might
+ // not be an l-value. So, call a builtin to set the element.
+ // TODO(antoyo): perhaps we could create a new vector or maybe there's a GIMPLE instruction for that?
+ // TODO(antoyo): don't use target specific builtins here.
+ let func_name =
+ match in_len {
+ 2 => {
+ if element_type == bx.i64_type {
+ "__builtin_ia32_vec_set_v2di"
+ }
+ else {
+ unimplemented!();
+ }
+ },
+ 4 => {
+ if element_type == bx.i32_type {
+ "__builtin_ia32_vec_set_v4si"
+ }
+ else {
+ unimplemented!();
+ }
+ },
+ 8 => {
+ if element_type == bx.i16_type {
+ "__builtin_ia32_vec_set_v8hi"
+ }
+ else {
+ unimplemented!();
+ }
+ },
+ _ => unimplemented!("Len: {}", in_len),
+ };
+ let builtin = bx.context.get_target_builtin_function(func_name);
+ let param1_type = builtin.get_param(0).to_rvalue().get_type();
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ let vector = bx.cx.bitcast_if_needed(vector, param1_type);
+ let result = bx.context.new_call(None, builtin, &[vector, value, bx.context.new_cast(None, index, bx.int_type)]);
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ return Ok(bx.context.new_bitcast(None, result, vector.get_type()));
+ }
+
+ #[cfg(feature="master")]
+ if name == sym::simd_extract {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ let vector = args[0].immediate();
+ return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue());
+ }
+
+ if name == sym::simd_select {
+ let m_elem_ty = in_elem;
+ let m_len = in_len;
+ require_simd!(arg_tys[1], "argument");
+ let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ require!(
+ m_len == v_len,
+ "mismatched lengths: mask length `{}` != other vector length `{}`",
+ m_len,
+ v_len
+ );
+ match m_elem_ty.kind() {
+ ty::Int(_) => {}
+ _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ }
+ return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
+ }
+
+ if name == sym::simd_cast {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ // casting cares about nominal type, not just structural type
+ if in_elem == out_elem {
+ return Ok(args[0].immediate());
+ }
+
+ enum Style {
+ Float,
+ Int(/* is signed? */ bool),
+ Unsupported,
+ }
+
+ let (in_style, in_width) = match in_elem.kind() {
+ // vectors of pointer-sized integers should've been
+ // disallowed before here, so this unwrap is safe.
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+ let (out_style, out_width) = match out_elem.kind() {
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+
+ let extend = |in_type, out_type| {
+ let vector_type = bx.context.new_vector_type(out_type, 8);
+ let vector = args[0].immediate();
+ let array_type = bx.context.new_array_type(None, in_type, 8);
+ // TODO(antoyo): switch to using new_vector_access or __builtin_convertvector for vector casting.
+ let array = bx.context.new_bitcast(None, vector, array_type);
+
+ let cast_vec_element = |index| {
+ let index = bx.context.new_rvalue_from_int(bx.int_type, index);
+ bx.context.new_cast(None, bx.context.new_array_access(None, array, index).to_rvalue(), out_type)
+ };
+
+ bx.context.new_rvalue_from_vector(None, vector_type, &[
+ cast_vec_element(0),
+ cast_vec_element(1),
+ cast_vec_element(2),
+ cast_vec_element(3),
+ cast_vec_element(4),
+ cast_vec_element(5),
+ cast_vec_element(6),
+ cast_vec_element(7),
+ ])
+ };
+
+ match (in_style, out_style) {
+ (Style::Int(in_is_signed), Style::Int(_)) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => {
+ if in_is_signed {
+ match (in_width, out_width) {
+ // FIXME(antoyo): the function _mm_cvtepi8_epi16 should directly
+ // call an intrinsic equivalent to __builtin_ia32_pmovsxbw128 so that
+ // we can generate a call to it.
+ (8, 16) => extend(bx.i8_type, bx.i16_type),
+ (8, 32) => extend(bx.i8_type, bx.i32_type),
+ (8, 64) => extend(bx.i8_type, bx.i64_type),
+ (16, 32) => extend(bx.i16_type, bx.i32_type),
+ (32, 64) => extend(bx.i32_type, bx.i64_type),
+ (16, 64) => extend(bx.i16_type, bx.i64_type),
+ _ => unimplemented!("in: {}, out: {}", in_width, out_width),
+ }
+ } else {
+ match (in_width, out_width) {
+ (8, 16) => extend(bx.u8_type, bx.u16_type),
+ (8, 32) => extend(bx.u8_type, bx.u32_type),
+ (8, 64) => extend(bx.u8_type, bx.u64_type),
+ (16, 32) => extend(bx.u16_type, bx.u32_type),
+ (16, 64) => extend(bx.u16_type, bx.u64_type),
+ (32, 64) => extend(bx.u32_type, bx.u64_type),
+ _ => unimplemented!("in: {}, out: {}", in_width, out_width),
+ }
+ }
+ }
+ });
+ }
+ (Style::Int(_), Style::Float) => {
+ // TODO: add support for internal functions in libgccjit to get access to IFN_VEC_CONVERT which is
+ // doing like __builtin_convertvector?
+ // Or maybe provide convert_vector as an API since it might not easy to get the
+ // types of internal functions.
+ unimplemented!();
+ }
+ (Style::Float, Style::Int(_)) => {
+ unimplemented!();
+ }
+ (Style::Float, Style::Float) => {
+ unimplemented!();
+ }
+ _ => { /* Unsupported. Fallthrough. */ }
+ }
+ require!(
+ false,
+ "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ );
+ }
+
+ macro_rules! arith_binary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+
+ fn simd_simple_float_intrinsic<'gcc, 'tcx>(
+ name: Symbol,
+ in_elem: Ty<'_>,
+ in_ty: Ty<'_>,
+ in_len: u64,
+ bx: &mut Builder<'_, 'gcc, 'tcx>,
+ span: Span,
+ args: &[OperandRef<'tcx, RValue<'gcc>>],
+ ) -> Result<RValue<'gcc>, ()> {
+ macro_rules! emit_error {
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ let (elem_ty_str, elem_ty) =
+ if let ty::Float(f) = in_elem.kind() {
+ let elem_ty = bx.cx.type_float_from_ty(*f);
+ match f.bit_width() {
+ 32 => ("f32", elem_ty),
+ 64 => ("f64", elem_ty),
+ _ => {
+ return_error!(
+ "unsupported element type `{}` of floating-point vector `{}`",
+ f.name_str(),
+ in_ty
+ );
+ }
+ }
+ }
+ else {
+ return_error!("`{}` is not a floating-point type", in_ty);
+ };
+
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len);
+
+ let (intr_name, fn_ty) =
+ match name {
+ sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103
+ sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+ sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
+ sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
+ sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
+ _ => return_error!("unrecognized intrinsic `{}`", name),
+ };
+ let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+ let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx);
+ let function: RValue<'gcc> = unsafe { std::mem::transmute(function) };
+ let c = bx.call(fn_ty, function, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ Ok(c)
+ }
+
+ if std::matches!(
+ name,
+ sym::simd_ceil
+ | sym::simd_fabs
+ | sym::simd_fcos
+ | sym::simd_fexp2
+ | sym::simd_fexp
+ | sym::simd_flog10
+ | sym::simd_flog2
+ | sym::simd_flog
+ | sym::simd_floor
+ | sym::simd_fma
+ | sym::simd_fpow
+ | sym::simd_fpowi
+ | sym::simd_fsin
+ | sym::simd_fsqrt
+ | sym::simd_round
+ | sym::simd_trunc
+ ) {
+ return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
+ }
+
+ arith_binary! {
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
+ simd_xor: Uint, Int => xor;
+ }
+
+ macro_rules! arith_unary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+
+ arith_unary! {
+ simd_neg: Int => neg, Float => fneg;
+ }
+
+ #[cfg(feature="master")]
+ if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let is_add = name == sym::simd_saturating_add;
+ let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
+ ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
+ _ => {
+ return_error!(
+ "expected element type `{}` of vector type `{}` \
+ to be a signed or unsigned integer type",
+ arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ arg_tys[0]
+ );
+ }
+ };
+ let builtin_name =
+ match (signed, is_add, in_len, elem_width) {
+ (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned.
+ (false, true, 32, 8) => "__builtin_ia32_paddusb256",
+ (true, true, 16, 16) => "__builtin_ia32_paddsw256",
+ (false, true, 16, 16) => "__builtin_ia32_paddusw256",
+ (true, false, 16, 16) => "__builtin_ia32_psubsw256",
+ (false, false, 16, 16) => "__builtin_ia32_psubusw256",
+ (true, false, 32, 8) => "__builtin_ia32_psubsb256",
+ (false, false, 32, 8) => "__builtin_ia32_psubusb256",
+ _ => unimplemented!("signed: {}, is_add: {}, in_len: {}, elem_width: {}", signed, is_add, in_len, elem_width),
+ };
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
+
+ let func = bx.context.get_target_builtin_function(builtin_name);
+ let param1_type = func.get_param(0).to_rvalue().get_type();
+ let param2_type = func.get_param(1).to_rvalue().get_type();
+ let lhs = bx.cx.bitcast_if_needed(lhs, param1_type);
+ let rhs = bx.cx.bitcast_if_needed(rhs, param2_type);
+ let result = bx.context.new_call(None, func, &[lhs, rhs]);
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ return Ok(bx.context.new_bitcast(None, result, vec_ty));
+ }
+
+ macro_rules! arith_red {
+ ($name:ident : $vec_op:expr, $float_reduce:ident, $ordered:expr, $op:ident,
+ $identity:expr) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.vector_reduce_op(args[0].immediate(), $vec_op);
+ if $ordered {
+ // if overflow occurs, the result is the
+ // mathematical result modulo 2^n:
+ Ok(bx.$op(args[1].immediate(), r))
+ }
+ else {
+ Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
+ }
+ }
+ ty::Float(_) => {
+ if $ordered {
+ // ordered arithmetic reductions take an accumulator
+ let acc = args[1].immediate();
+ Ok(bx.$float_reduce(acc, args[0].immediate()))
+ }
+ else {
+ Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
+ }
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ arith_red!(
+ simd_reduce_add_unordered: BinaryOp::Plus,
+ vector_reduce_fadd_fast,
+ false,
+ add,
+ 0.0 // TODO: Use this argument.
+ );
+ arith_red!(
+ simd_reduce_mul_unordered: BinaryOp::Mult,
+ vector_reduce_fmul_fast,
+ false,
+ mul,
+ 1.0
+ );
+
+ macro_rules! minmax_red {
+ ($name:ident: $reduction:ident) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) => Ok(bx.$reduction(args[0].immediate())),
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ minmax_red!(simd_reduce_min: vector_reduce_min);
+ minmax_red!(simd_reduce_max: vector_reduce_max);
+
+ macro_rules! bitwise_red {
+ ($name:ident : $op:expr, $boolean:expr) => {
+ if name == sym::$name {
+ let input = if !$boolean {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ args[0].immediate()
+ } else {
+ match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {}
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ }
+
+ // boolean reductions operate on vectors of i1s:
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len as u64);
+ bx.trunc(args[0].immediate(), i1xn)
+ };
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.vector_reduce_op(input, $op);
+ Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false);
+ bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false);
+
+ unimplemented!("simd {}", name);
+}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
new file mode 100644
index 000000000..8a206c036
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -0,0 +1,331 @@
+/*
+ * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
+ * TODO(antoyo): support #[inline] attributes.
+ * TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto).
+ *
+ * TODO(antoyo): remove the patches.
+ */
+
+#![feature(
+ rustc_private,
+ decl_macro,
+ associated_type_bounds,
+ never_type,
+ trusted_len,
+ hash_raw_entry
+)]
+#![allow(broken_intra_doc_links)]
+#![recursion_limit="256"]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_hir;
+extern crate rustc_metadata;
+extern crate rustc_middle;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+extern crate tempfile;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+mod abi;
+mod allocator;
+mod archive;
+mod asm;
+mod back;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod int;
+mod intrinsic;
+mod mono_item;
+mod type_;
+mod type_of;
+
+use std::any::Any;
+use std::sync::{Arc, Mutex};
+
+use gccjit::{Context, OptimizationLevel, CType};
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::base::codegen_crate;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::target_features::supported_target_features;
+use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{ErrorGuaranteed, Handler};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::query::Providers;
+use rustc_session::config::{Lto, OptLevel, OutputFilenames};
+use rustc_session::Session;
+use rustc_span::Symbol;
+use rustc_span::fatal_error::FatalError;
+use tempfile::TempDir;
+
+pub struct PrintOnPanic<F: Fn() -> String>(pub F);
+
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct GccCodegenBackend {
+ supports_128bit_integers: Arc<Mutex<bool>>,
+}
+
+impl CodegenBackend for GccCodegenBackend {
+ fn init(&self, sess: &Session) {
+ if sess.lto() != Lto::No {
+ sess.warn("LTO is not supported. You may get a linker error.");
+ }
+
+ let temp_dir = TempDir::new().expect("cannot create temporary directory");
+ let temp_file = temp_dir.into_path().join("result.asm");
+ let check_context = Context::default();
+ check_context.set_print_errors_to_stderr(false);
+ let _int128_ty = check_context.new_c_type(CType::UInt128t);
+ // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
+ check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
+ *self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None);
+ }
+
+ fn provide(&self, providers: &mut Providers) {
+ // FIXME(antoyo) compute list of enabled features from cli flags
+ providers.global_backend_features = |_tcx, ()| vec![];
+ }
+
+ fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
+ let target_cpu = target_cpu(tcx.sess);
+ let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
+
+ Box::new(res)
+ }
+
+ fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ let (codegen_results, work_products) = ongoing_codegen
+ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
+ .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
+ .join(sess);
+
+ Ok((codegen_results, work_products))
+ }
+
+ fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ link_binary(
+ sess,
+ &crate::archive::ArArchiveBuilderBuilder,
+ &codegen_results,
+ outputs,
+ )
+ }
+
+ fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ target_features(sess, allow_unstable)
+ }
+}
+
+impl ExtraBackendMethods for GccCodegenBackend {
+ fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) -> Self::Module {
+ let mut mods = GccContext {
+ context: Context::default(),
+ };
+ unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, has_alloc_error_handler); }
+ mods
+ }
+
+ fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
+ base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock"))
+ }
+
+ fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
+ // TODO(antoyo): set opt level.
+ Arc::new(|_| {
+ Ok(())
+ })
+ }
+
+ fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
+ unimplemented!();
+ }
+
+ fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
+ None
+ // TODO(antoyo)
+ }
+}
+
+pub struct ModuleBuffer;
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!();
+ }
+}
+
+pub struct ThinBuffer;
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!();
+ }
+}
+
+pub struct GccContext {
+ context: Context<'static>,
+}
+
+unsafe impl Send for GccContext {}
+// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
+unsafe impl Sync for GccContext {}
+
+impl WriteBackendMethods for GccCodegenBackend {
+ type Module = GccContext;
+ type TargetMachine = ();
+ type ModuleBuffer = ModuleBuffer;
+ type Context = ();
+ type ThinData = ();
+ type ThinBuffer = ThinBuffer;
+
+ fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
+ // NOTE: implemented elsewhere.
+ // TODO(antoyo): what is implemented elsewhere ^ ?
+ let module =
+ match modules.remove(0) {
+ FatLTOInput::InMemory(module) => module,
+ FatLTOInput::Serialized { .. } => {
+ unimplemented!();
+ }
+ };
+ Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: vec![] })
+ }
+
+ fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+ unimplemented!();
+ }
+
+ fn print_pass_timings(&self) {
+ unimplemented!();
+ }
+
+ unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
+ module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+ Ok(())
+ }
+
+ fn optimize_fat(_cgcx: &CodegenContext<Self>, _module: &mut ModuleCodegen<Self::Module>) -> Result<(), FatalError> {
+ // TODO(antoyo)
+ Ok(())
+ }
+
+ unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ unimplemented!();
+ }
+
+ unsafe fn codegen(cgcx: &CodegenContext<Self>, diag_handler: &Handler, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ back::write::codegen(cgcx, diag_handler, module, config)
+ }
+
+ fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+ unimplemented!();
+ }
+
+ fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+ unimplemented!();
+ }
+
+ fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::write::link(cgcx, diag_handler, modules)
+ }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(GccCodegenBackend {
+ supports_128bit_integers: Arc::new(Mutex::new(false)),
+ })
+}
+
+fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
+ match optlevel {
+ None => OptimizationLevel::None,
+ Some(level) => {
+ match level {
+ OptLevel::No => OptimizationLevel::None,
+ OptLevel::Less => OptimizationLevel::Limited,
+ OptLevel::Default => OptimizationLevel::Standard,
+ OptLevel::Aggressive => OptimizationLevel::Aggressive,
+ OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
+ }
+ },
+ }
+}
+
+fn handle_native(name: &str) -> &str {
+ if name != "native" {
+ return name;
+ }
+
+ unimplemented!();
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ match sess.opts.cg.target_cpu {
+ Some(ref name) => handle_native(name),
+ None => handle_native(sess.target.cpu.as_ref()),
+ }
+}
+
+pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ supported_target_features(sess)
+ .iter()
+ .filter_map(
+ |&(feature, gate)| {
+ if sess.is_nightly_build() || allow_unstable || gate.is_none() { Some(feature) } else { None }
+ },
+ )
+ .filter(|_feature| {
+ // TODO(antoyo): implement a way to get enabled feature in libgccjit.
+ // Probably using the equivalent of __builtin_cpu_supports.
+ #[cfg(feature="master")]
+ {
+ _feature.contains("sse") || _feature.contains("avx")
+ }
+ #[cfg(not(feature="master"))]
+ {
+ false
+ }
+ /*
+ adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512gfni,
+ avx512ifma, avx512pf, avx512vaes, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpclmulqdq,
+ avx512vpopcntdq, bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
+ sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, xsave, xsavec, xsaveopt, xsaves
+ */
+ //false
+ })
+ .map(|feature| Symbol::intern(feature))
+ .collect()
+}
diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs
new file mode 100644
index 000000000..9468a1ef4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/mono_item.rs
@@ -0,0 +1,38 @@
+use rustc_codegen_ssa::traits::PreDefineMethods;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_span::def_id::DefId;
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section);
+
+ // TODO(antoyo): set linkage and visibility.
+ self.instances.borrow_mut().insert(instance, global);
+ }
+
+ fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+ assert!(!instance.substs.needs_infer());
+
+ let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+ self.linkage.set(base::linkage_to_gcc(linkage));
+ let _decl = self.declare_fn(symbol_name, &fn_abi);
+ //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+
+ // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
+ // TODO(antoyo): set unique comdat.
+ // TODO(antoyo): use inline attribute from there in linkage.set() above.
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
new file mode 100644
index 000000000..68bdb8d4e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/type_.rs
@@ -0,0 +1,303 @@
+use std::convert::TryInto;
+
+use gccjit::{RValue, Struct, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods};
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_middle::{bug, ty};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use crate::common::TypeReflection;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
+ // gcc only supports 1, 2, 4 or 8-byte integers.
+ // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
+ // sometimes use 96-bit numbers and the following code will give an integer of a different
+ // size.
+ let bytes = (num_bits / 8).next_power_of_two() as i32;
+ match bytes {
+ 1 => self.i8_type,
+ 2 => self.i16_type,
+ 4 => self.i32_type,
+ 8 => self.i64_type,
+ 16 => self.i128_type,
+ _ => panic!("unexpected num_bits: {}", num_bits),
+ }
+ }
+
+ pub fn type_void(&self) -> Type<'gcc> {
+ self.context.new_type::<()>()
+ }
+
+ pub fn type_size_t(&self) -> Type<'gcc> {
+ self.context.new_type::<usize>()
+ }
+
+ pub fn type_u8(&self) -> Type<'gcc> {
+ self.u8_type
+ }
+
+ pub fn type_u16(&self) -> Type<'gcc> {
+ self.u16_type
+ }
+
+ pub fn type_u32(&self) -> Type<'gcc> {
+ self.u32_type
+ }
+
+ pub fn type_u64(&self) -> Type<'gcc> {
+ self.u64_type
+ }
+
+ pub fn type_u128(&self) -> Type<'gcc> {
+ self.u128_type
+ }
+
+ pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
+ // FIXME(eddyb) We could find a better approximation if ity.align < align.
+ let ity = Integer::approximate_align(self, align);
+ self.type_from_integer(ity)
+ }
+
+ pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
+ self.context.new_vector_type(ty, len)
+ }
+
+ pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
+ match t {
+ ty::FloatTy::F32 => self.type_f32(),
+ ty::FloatTy::F64 => self.type_f64(),
+ }
+ }
+}
+
+impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn type_i1(&self) -> Type<'gcc> {
+ self.bool_type
+ }
+
+ fn type_i8(&self) -> Type<'gcc> {
+ self.i8_type
+ }
+
+ fn type_i16(&self) -> Type<'gcc> {
+ self.i16_type
+ }
+
+ fn type_i32(&self) -> Type<'gcc> {
+ self.i32_type
+ }
+
+ fn type_i64(&self) -> Type<'gcc> {
+ self.i64_type
+ }
+
+ fn type_i128(&self) -> Type<'gcc> {
+ self.i128_type
+ }
+
+ fn type_isize(&self) -> Type<'gcc> {
+ self.isize_type
+ }
+
+ fn type_f32(&self) -> Type<'gcc> {
+ self.context.new_type::<f32>()
+ }
+
+ fn type_f64(&self) -> Type<'gcc> {
+ self.context.new_type::<f64>()
+ }
+
+ fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
+ self.context.new_function_pointer_type(None, return_type, params, false)
+ }
+
+ fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
+ let types = fields.to_vec();
+ if let Some(typ) = self.struct_types.borrow().get(fields) {
+ return typ.clone();
+ }
+ let fields: Vec<_> = fields.iter().enumerate()
+ .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
+ .collect();
+ let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
+ if packed {
+ #[cfg(feature="master")]
+ typ.set_packed();
+ }
+ self.struct_types.borrow_mut().insert(types, typ);
+ typ
+ }
+
+ fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
+ if self.is_int_type_or_bool(typ) {
+ TypeKind::Integer
+ }
+ else if typ.is_compatible_with(self.float_type) {
+ TypeKind::Float
+ }
+ else if typ.is_compatible_with(self.double_type) {
+ TypeKind::Double
+ }
+ else if typ.is_vector() {
+ TypeKind::Vector
+ }
+ else {
+ // TODO(antoyo): support other types.
+ TypeKind::Void
+ }
+ }
+
+ fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
+ ty.make_pointer()
+ }
+
+ fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
+ // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
+ ty.make_pointer()
+ }
+
+ fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
+ if let Some(typ) = ty.dyncast_array() {
+ typ
+ }
+ else if let Some(vector_type) = ty.dyncast_vector() {
+ vector_type.get_element_type()
+ }
+ else if let Some(typ) = ty.get_pointee() {
+ typ
+ }
+ else {
+ unreachable!()
+ }
+ }
+
+ fn vector_length(&self, _ty: Type<'gcc>) -> usize {
+ unimplemented!();
+ }
+
+ fn float_width(&self, typ: Type<'gcc>) -> usize {
+ let f32 = self.context.new_type::<f32>();
+ let f64 = self.context.new_type::<f64>();
+ if typ.is_compatible_with(f32) {
+ 32
+ }
+ else if typ.is_compatible_with(f64) {
+ 64
+ }
+ else {
+ panic!("Cannot get width of float type {:?}", typ);
+ }
+ // TODO(antoyo): support other sizes.
+ }
+
+ fn int_width(&self, typ: Type<'gcc>) -> u64 {
+ self.gcc_int_width(typ)
+ }
+
+ fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
+ value.get_type()
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
+ let unit = Integer::approximate_align(self, align);
+ let size = size.bytes();
+ let unit_size = unit.size().bytes();
+ assert_eq!(size % unit_size, 0);
+ self.type_array(self.type_from_integer(unit), size / unit_size)
+ }
+
+ pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) {
+ let fields: Vec<_> = fields.iter().enumerate()
+ .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
+ .collect();
+ typ.set_fields(None, &fields);
+ if packed {
+ #[cfg(feature="master")]
+ typ.as_type().set_packed();
+ }
+ }
+
+ pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
+ self.context.new_opaque_struct_type(None, name)
+ }
+
+ pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
+ if let Some(struct_type) = ty.is_struct() {
+ if struct_type.get_field_count() == 0 {
+ // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
+ // size of usize::MAX in test_binary_search, we workaround this by setting the size to
+ // zero for ZSTs.
+ // FIXME(antoyo): fix gccjit API.
+ len = 0;
+ }
+ }
+
+ // NOTE: see note above. Some other test uses usize::MAX.
+ if len == u64::MAX {
+ len = 0;
+ }
+
+ let len: i32 = len.try_into().expect("array len");
+
+ self.context.new_array_type(None, ty, len)
+ }
+
+ pub fn type_bool(&self) -> Type<'gcc> {
+ self.context.new_type::<bool>()
+ }
+}
+
+pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
+ let field_count = layout.fields.count();
+
+ let mut packed = false;
+ let mut offset = Size::ZERO;
+ let mut prev_effective_align = layout.align.abi;
+ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+ for i in layout.fields.index_by_increasing_offset() {
+ let target_offset = layout.fields.offset(i as usize);
+ let field = layout.field(cx, i);
+ let effective_field_align =
+ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+ packed |= effective_field_align < field.align.abi;
+
+ assert!(target_offset >= offset);
+ let padding = target_offset - offset;
+ let padding_align = prev_effective_align.min(effective_field_align);
+ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+ result.push(cx.type_padding_filler(padding, padding_align));
+
+ result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box<Type>.
+ offset = target_offset + field.size;
+ prev_effective_align = effective_field_align;
+ }
+ if !layout.is_unsized() && field_count > 0 {
+ if offset > layout.size {
+ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+ }
+ let padding = layout.size - offset;
+ let padding_align = prev_effective_align;
+ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+ result.push(cx.type_padding_filler(padding, padding_align));
+ assert_eq!(result.len(), 1 + field_count * 2);
+ }
+
+ (result, packed)
+}
+
+impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn set_type_metadata(&self, _function: RValue<'gcc>, _typeid: String) {
+ // Unsupported.
+ }
+
+ fn typeid_metadata(&self, _typeid: String) -> RValue<'gcc> {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
new file mode 100644
index 000000000..524d10fb5
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -0,0 +1,385 @@
+use std::fmt::Write;
+
+use gccjit::{Struct, Type};
+use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Ty, TypeVisitable};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+
+use crate::abi::{FnAbiGccExt, GccType};
+use crate::context::CodegenCx;
+use crate::type_::struct_fields;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
+ use Integer::*;
+ match i {
+ I8 => self.type_u8(),
+ I16 => self.type_u16(),
+ I32 => self.type_u32(),
+ I64 => self.type_u64(),
+ I128 => self.type_u128(),
+ }
+ }
+
+ #[cfg(feature="master")]
+ pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
+ match t {
+ ty::IntTy::Isize => self.type_isize(),
+ ty::IntTy::I8 => self.type_i8(),
+ ty::IntTy::I16 => self.type_i16(),
+ ty::IntTy::I32 => self.type_i32(),
+ ty::IntTy::I64 => self.type_i64(),
+ ty::IntTy::I128 => self.type_i128(),
+ }
+ }
+
+ #[cfg(feature="master")]
+ pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
+ match t {
+ ty::UintTy::Usize => self.type_isize(),
+ ty::UintTy::U8 => self.type_i8(),
+ ty::UintTy::U16 => self.type_i16(),
+ ty::UintTy::U32 => self.type_i32(),
+ ty::UintTy::U64 => self.type_i64(),
+ ty::UintTy::U128 => self.type_i128(),
+ }
+ }
+}
+
+pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
+ match layout.abi {
+ Abi::Scalar(_) => bug!("handled elsewhere"),
+ Abi::Vector { ref element, count } => {
+ let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
+ return cx.context.new_vector_type(element, count);
+ },
+ Abi::ScalarPair(..) => {
+ return cx.type_struct(
+ &[
+ layout.scalar_pair_element_gcc_type(cx, 0, false),
+ layout.scalar_pair_element_gcc_type(cx, 1, false),
+ ],
+ false,
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ }
+
+ let name = match layout.ty.kind() {
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::Dynamic(..) |
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ if !cx.sess().fewer_names() =>
+ {
+ let mut name = with_no_trimmed_paths!(layout.ty.to_string());
+ if let (&ty::Adt(def, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ if def.is_enum() && !def.variants().is_empty() {
+ write!(&mut name, "::{}", def.variant(index).name).unwrap();
+ }
+ }
+ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ }
+ Some(name)
+ }
+ ty::Adt(..) => {
+ // If `Some` is returned then a named struct is created in LLVM. Name collisions are
+ // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
+ // can improve perf.
+ // FIXME(antoyo): I don't think that's true for libgccjit.
+ Some(String::new())
+ }
+ _ => None,
+ };
+
+ match layout.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+ let packed = false;
+ match name {
+ None => cx.type_struct(&[fill], packed),
+ Some(ref name) => {
+ let gcc_type = cx.type_named_struct(name);
+ cx.set_struct_body(gcc_type, &[fill], packed);
+ gcc_type.as_type()
+ },
+ }
+ }
+ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count),
+ FieldsShape::Arbitrary { .. } =>
+ match name {
+ None => {
+ let (gcc_fields, packed) = struct_fields(cx, layout);
+ cx.type_struct(&gcc_fields, packed)
+ },
+ Some(ref name) => {
+ let gcc_type = cx.type_named_struct(name);
+ *defer = Some((gcc_type, layout));
+ gcc_type.as_type()
+ },
+ },
+ }
+}
+
+pub trait LayoutGccExt<'tcx> {
+ fn is_gcc_immediate(&self) -> bool;
+ fn is_gcc_scalar_pair(&self) -> bool;
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>;
+ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
+ fn gcc_field_index(&self, index: usize) -> u64;
+ fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
+ fn is_gcc_immediate(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::Vector { .. } => true,
+ Abi::ScalarPair(..) => false,
+ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ }
+ }
+
+ fn is_gcc_scalar_pair(&self) -> bool {
+ match self.abi {
+ Abi::ScalarPair(..) => true,
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ }
+ }
+
+ /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+ /// The pointee type of the pointer in `PlaceRef` is always this type.
+ /// For sized types, it is also the right LLVM type for an `alloca`
+ /// containing a value of that type, and most immediates (except `bool`).
+ /// Unsized types, however, are represented by a "minimal unit", e.g.
+ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+ /// If the type is an unsized struct, the regular layout is generated,
+ /// with the inner-most trailing unsized field using the "minimal unit"
+ /// of that field's type - this is useful for taking the address of
+ /// that field and ensuring the struct has the right alignment.
+ //TODO(antoyo): do we still need the set_fields parameter?
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ // Use a different cache for scalars because pointers to DSTs
+ // can be either fat or thin (data pointers of fat pointers).
+ if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
+ return ty;
+ }
+ let ty =
+ match *self.ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields))
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true))
+ }
+ ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
+ _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+ };
+ cx.scalar_types.borrow_mut().insert(self.ty, ty);
+ return ty;
+ }
+
+ // Check the cache.
+ let variant_index =
+ match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+ let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
+ if let Some(ty) = cached_type {
+ let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty);
+ if let Some((struct_type, layout)) = type_to_set_fields {
+ // Since we might be trying to generate a type containing another type which is not
+ // completely generated yet, we deferred setting the fields until now.
+ let (fields, packed) = struct_fields(cx, layout);
+ cx.set_struct_body(struct_type, &fields, packed);
+ }
+ return ty;
+ }
+
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+ // Make sure lifetimes are erased, to avoid generating distinct LLVM
+ // types for Rust types that only differ in the choice of lifetimes.
+ let normal_ty = cx.tcx.erase_regions(self.ty);
+
+ let mut defer = None;
+ let ty =
+ if self.ty != normal_ty {
+ let mut layout = cx.layout_of(normal_ty);
+ if let Some(v) = variant_index {
+ layout = layout.for_variant(cx, v);
+ }
+ layout.gcc_type(cx, true)
+ }
+ else {
+ uncached_gcc_type(cx, *self, &mut defer)
+ };
+
+ cx.types.borrow_mut().insert((self.ty, variant_index), ty);
+
+ if let Some((ty, layout)) = defer {
+ let (fields, packed) = struct_fields(cx, layout);
+ cx.set_struct_body(ty, &fields, packed);
+ }
+
+ ty
+ }
+
+ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+ }
+ self.gcc_type(cx, true)
+ }
+
+ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
+ match scalar.primitive() {
+ Int(i, true) => cx.type_from_integer(i),
+ Int(i, false) => cx.type_from_unsigned_integer(i),
+ F32 => cx.type_f32(),
+ F64 => cx.type_f64(),
+ Pointer => {
+ // If we know the alignment, pick something better than i8.
+ let pointee =
+ if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ cx.type_pointee_for_align(pointee.align)
+ }
+ else {
+ cx.type_i8()
+ };
+ cx.type_ptr_to(pointee)
+ }
+ }
+ }
+
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+ // TODO(antoyo): remove llvm hack:
+ // HACK(eddyb) special-case fat pointers until LLVM removes
+ // pointee types, to avoid bitcasting every `OperandRef::deref`.
+ match self.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(_) => {
+ return self.field(cx, index).gcc_type(cx, true);
+ }
+ // only wide pointer boxes are handled as pointers
+ // thin pointer boxes with scalar allocators are handled by the general logic below
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
+ }
+ _ => {}
+ }
+
+ let (a, b) = match self.abi {
+ Abi::ScalarPair(ref a, ref b) => (a, b),
+ _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
+ };
+ let scalar = [a, b][index];
+
+ // Make sure to return the same type `immediate_gcc_type` would when
+ // dealing with an immediate pair. This means that `(bool, bool)` is
+ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+ // immediate, just like `bool` is typically `i8` in memory and only `i1`
+ // when immediate. We need to load/store `bool` as `i8` to avoid
+ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+ // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+
+ let offset =
+ if index == 0 {
+ Size::ZERO
+ }
+ else {
+ a.size(cx).align_to(b.align(cx).abi)
+ };
+ self.scalar_gcc_type_at(cx, scalar, offset)
+ }
+
+ fn gcc_field_index(&self, index: usize) -> u64 {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+ }
+ _ => {}
+ }
+ match self.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+ }
+
+ FieldsShape::Array { .. } => index as u64,
+
+ FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
+ }
+ }
+
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+ return pointee;
+ }
+
+ let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
+
+ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+ result
+ }
+}
+
+impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+ layout.gcc_type(self, true)
+ }
+
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+ layout.immediate_gcc_type(self)
+ }
+
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_gcc_immediate()
+ }
+
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_gcc_scalar_pair()
+ }
+
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+ layout.gcc_field_index(index)
+ }
+
+ fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+ layout.scalar_pair_element_gcc_type(self, index, immediate)
+ }
+
+ fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
+ ty.gcc_type(self)
+ }
+
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ fn_abi.ptr_to_gcc_type(self)
+ }
+
+ fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
+ unimplemented!();
+ }
+
+ fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ // FIXME(antoyo): return correct type.
+ self.type_void()
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/test.sh b/compiler/rustc_codegen_gcc/test.sh
new file mode 100755
index 000000000..8b390f95a
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/test.sh
@@ -0,0 +1,291 @@
+#!/usr/bin/env bash
+
+# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
+
+set -e
+
+if [ -f ./gcc_path ]; then
+ export GCC_PATH=$(cat gcc_path)
+else
+ echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+ exit 1
+fi
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+flags=
+gcc_master_branch=1
+channel="debug"
+func=all
+build_only=0
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --release)
+ codegen_channel=release
+ shift
+ ;;
+ --release-sysroot)
+ sysroot_channel=release
+ shift
+ ;;
+ --no-default-features)
+ gcc_master_branch=0
+ flags="$flags --no-default-features"
+ shift
+ ;;
+ --features)
+ shift
+ flags="$flags --features $1"
+ shift
+ ;;
+ --release)
+ channel="release"
+ shift
+ ;;
+ "--test-rustc")
+ func=test_rustc
+ shift
+ ;;
+
+ "--test-libcore")
+ func=test_libcore
+ shift
+ ;;
+
+ "--clean-ui-tests")
+ func=clean_ui_tests
+ shift
+ ;;
+
+ "--std-tests")
+ func=std_tests
+ shift
+ ;;
+
+ "--extended-tests")
+ func=extended_sysroot_tests
+ shift
+ ;;
+
+ "--build-sysroot")
+ func=build_sysroot
+ shift
+ ;;
+ "--build")
+ build_only=1
+ shift
+ ;;
+ *)
+ echo "Unknown option $1"
+ exit 1
+ ;;
+ esac
+done
+
+if [[ $channel == "release" ]]; then
+ export CHANNEL='release'
+ CARGO_INCREMENTAL=1 cargo rustc --release $flags
+ shift
+else
+ echo $LD_LIBRARY_PATH
+ export CHANNEL='debug'
+ cargo rustc $flags
+fi
+
+if (( $build_only == 1 )); then
+ exit
+fi
+
+source config.sh
+
+function clean() {
+ rm -r target/out || true
+ mkdir -p target/out/gccjit
+}
+
+function mini_tests() {
+ echo "[BUILD] mini_core"
+ $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
+
+ echo "[BUILD] example"
+ $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
+
+ echo "[AOT] mini_core_hello_world"
+ $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+}
+
+function build_sysroot() {
+ echo "[BUILD] sysroot"
+ time ./build_sysroot/build_sysroot.sh
+}
+
+function std_tests() {
+ echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+ $RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+ echo "[AOT] alloc_system"
+ $RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+ echo "[AOT] alloc_example"
+ $RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/alloc_example
+
+ echo "[AOT] dst_field_align"
+ # FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+ $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+ echo "[AOT] std_example"
+ std_flags="--cfg feature=\"master\""
+ if (( $gcc_master_branch == 0 )); then
+ std_flags=""
+ fi
+ $RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE $std_flags
+ $RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE
+
+ echo "[AOT] subslice-patterns-const-eval"
+ $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+ echo "[AOT] track-caller-attribute"
+ $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/track-caller-attribute
+
+ echo "[BUILD] mod_bench"
+ $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
+}
+
+# FIXME(antoyo): linker gives multiple definitions error on Linux
+#echo "[BUILD] sysroot in release mode"
+#./build_sysroot/build_sysroot.sh --release
+
+function test_libcore() {
+ pushd build_sysroot/sysroot_src/library/core/tests
+ echo "[TEST] libcore"
+ rm -r ./target || true
+ ../../../../../cargo.sh test
+ popd
+}
+
+#echo
+#echo "[BENCH COMPILE] mod_bench"
+
+#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline"
+#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o target/out/mod_bench_llvm_0 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o target/out/mod_bench_llvm_1 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o target/out/mod_bench_llvm_2 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o target/out/mod_bench_llvm_3 -Cpanic=abort"
+
+## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow
+#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3"
+
+#echo
+#echo "[BENCH RUN] mod_bench"
+#hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_*
+
+function extended_sysroot_tests() {
+ if (( $gcc_master_branch == 0 )); then
+ return
+ fi
+
+ pushd rand
+ cargo clean
+ echo "[TEST] rust-random/rand"
+ ../cargo.sh test --workspace
+ popd
+
+ #pushd simple-raytracer
+ #echo "[BENCH COMPILE] ebobby/simple-raytracer"
+ #hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \
+ #"RUSTC=rustc RUSTFLAGS='' cargo build" \
+ #"../cargo.sh build"
+
+ #echo "[BENCH RUN] ebobby/simple-raytracer"
+ #cp ./target/debug/main ./raytracer_cg_gcc
+ #hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_gcc
+ #popd
+
+ pushd regex
+ echo "[TEST] rust-lang/regex example shootout-regex-dna"
+ cargo clean
+ export CG_RUSTFLAGS="--cap-lints warn" # newer aho_corasick versions throw a deprecation warning
+ # Make sure `[codegen mono items] start` doesn't poison the diff
+ ../cargo.sh build --example shootout-regex-dna
+ cat examples/regexdna-input.txt \
+ | ../cargo.sh run --example shootout-regex-dna \
+ | grep -v "Spawned thread" > res.txt
+ diff -u res.txt examples/regexdna-output.txt
+
+ echo "[TEST] rust-lang/regex tests"
+ ../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+ popd
+}
+
+function test_rustc() {
+ echo
+ echo "[TEST] rust-lang/rust"
+
+ rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
+
+ git clone https://github.com/rust-lang/rust.git || true
+ cd rust
+ git fetch
+ git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
+ export RUSTFLAGS=
+
+ git apply ../rustc_patches/compile_test.patch || true
+
+ rm config.toml || true
+
+ cat > config.toml <<EOF
+[rust]
+codegen-backends = []
+deny-warnings = false
+
+[build]
+cargo = "$(which cargo)"
+local-rebuild = true
+rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc"
+EOF
+
+ rustc -V | cut -d' ' -f3 | tr -d '('
+ git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') src/test
+
+ for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+ rm $test
+ done
+
+ git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+ rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,borrowck/,test*,*lto*.rs} || true
+ for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do
+ rm $test
+ done
+ git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
+ git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
+
+ RUSTC_ARGS="-Zpanic-abort-tests -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
+
+ echo "[TEST] rustc test suite"
+ COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS"
+}
+
+function clean_ui_tests() {
+ find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -exec rm -rf {} \;
+}
+
+function all() {
+ clean
+ mini_tests
+ build_sysroot
+ std_tests
+ test_libcore
+ extended_sysroot_tests
+ test_rustc
+}
+
+$func
diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs
new file mode 100644
index 000000000..8e378177e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs
@@ -0,0 +1,68 @@
+//! The common code for `tests/lang_tests_*.rs`
+use std::{
+ env::{self, current_dir},
+ path::PathBuf,
+ process::Command,
+};
+
+use lang_tester::LangTester;
+use tempfile::TempDir;
+
+/// Controls the compile options (e.g., optimization level) used to compile
+/// test code.
+#[allow(dead_code)] // Each test crate picks one variant
+pub enum Profile {
+ Debug,
+ Release,
+}
+
+pub fn main_inner(profile: Profile) {
+ let tempdir = TempDir::new().expect("temp dir");
+ let current_dir = current_dir().expect("current dir");
+ let current_dir = current_dir.to_str().expect("current dir").to_string();
+ let gcc_path = include_str!("../gcc_path");
+ let gcc_path = gcc_path.trim();
+ env::set_var("LD_LIBRARY_PATH", gcc_path);
+ LangTester::new()
+ .test_dir("tests/run")
+ .test_file_filter(|path| path.extension().expect("extension").to_str().expect("to_str") == "rs")
+ .test_extract(|source| {
+ let lines =
+ source.lines()
+ .skip_while(|l| !l.starts_with("//"))
+ .take_while(|l| l.starts_with("//"))
+ .map(|l| &l[2..])
+ .collect::<Vec<_>>()
+ .join("\n");
+ Some(lines)
+ })
+ .test_cmds(move |path| {
+ // Test command 1: Compile `x.rs` into `tempdir/x`.
+ let mut exe = PathBuf::new();
+ exe.push(&tempdir);
+ exe.push(path.file_stem().expect("file_stem"));
+ let mut compiler = Command::new("rustc");
+ compiler.args(&[
+ &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir),
+ "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir),
+ "-Zno-parallel-llvm",
+ "-C", "panic=abort",
+ "-C", "link-arg=-lc",
+ "-o", exe.to_str().expect("to_str"),
+ path.to_str().expect("to_str"),
+ ]);
+ match profile {
+ Profile::Debug => {}
+ Profile::Release => {
+ compiler.args(&[
+ "-C", "opt-level=3",
+ "-C", "lto=no",
+ ]);
+ }
+ }
+ // Test command 2: run `tempdir/x`.
+ let runtime = Command::new(exe);
+ vec![("Compiler", compiler), ("Run-time", runtime)]
+ })
+ .run();
+}
diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs
new file mode 100644
index 000000000..96bd74883
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs
@@ -0,0 +1,5 @@
+mod lang_tests_common;
+
+fn main() {
+ lang_tests_common::main_inner(lang_tests_common::Profile::Debug);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_release.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_release.rs
new file mode 100644
index 000000000..35d5d60c3
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/lang_tests_release.rs
@@ -0,0 +1,5 @@
+mod lang_tests_common;
+
+fn main() {
+ lang_tests_common::main_inner(lang_tests_common::Profile::Release);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/abort1.rs b/compiler/rustc_codegen_gcc/tests/run/abort1.rs
new file mode 100644
index 000000000..291af5993
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/abort1.rs
@@ -0,0 +1,51 @@
+// Compiler:
+//
+// Run-time:
+// status: signal
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Code
+ */
+
+fn test_fail() -> ! {
+ unsafe { intrinsics::abort() };
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ test_fail();
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/abort2.rs b/compiler/rustc_codegen_gcc/tests/run/abort2.rs
new file mode 100644
index 000000000..3c87c5678
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/abort2.rs
@@ -0,0 +1,53 @@
+// Compiler:
+//
+// Run-time:
+// status: signal
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Code
+ */
+
+fn fail() -> i32 {
+ unsafe { intrinsics::abort() };
+ 0
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ fail();
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/array.rs b/compiler/rustc_codegen_gcc/tests/run/array.rs
new file mode 100644
index 000000000..8b621d8a3
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/array.rs
@@ -0,0 +1,229 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 42
+// 7
+// 5
+// 10
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+
+/*
+ * Code
+ */
+
+static mut ONE: usize = 1;
+
+fn make_array() -> [u8; 3] {
+ [42, 10, 5]
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ let array = [42, 7, 5];
+ let array2 = make_array();
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE - 1]);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE]);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE + 1]);
+
+ libc::printf(b"%d\n\0" as *const u8 as *const i8, array2[argc as usize] as u32);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/asm.rs b/compiler/rustc_codegen_gcc/tests/run/asm.rs
new file mode 100644
index 000000000..46abbb553
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/asm.rs
@@ -0,0 +1,172 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(asm_const, asm_sym)]
+
+use std::arch::{asm, global_asm};
+
+global_asm!("
+ .global add_asm
+add_asm:
+ mov rax, rdi
+ add rax, rsi
+ ret"
+);
+
+extern "C" {
+ fn add_asm(a: i64, b: i64) -> i64;
+}
+
+pub unsafe fn mem_cpy(dst: *mut u8, src: *const u8, len: usize) {
+ asm!(
+ "rep movsb",
+ inout("rdi") dst => _,
+ inout("rsi") src => _,
+ inout("rcx") len => _,
+ options(preserves_flags, nostack)
+ );
+}
+
+fn main() {
+ unsafe {
+ asm!("nop");
+ }
+
+ let x: u64;
+ unsafe {
+ asm!("mov $5, {}",
+ out(reg) x,
+ options(att_syntax)
+ );
+ }
+ assert_eq!(x, 5);
+
+ let x: u64;
+ let input: u64 = 42;
+ unsafe {
+ asm!("mov {input}, {output}",
+ "add $1, {output}",
+ input = in(reg) input,
+ output = out(reg) x,
+ options(att_syntax)
+ );
+ }
+ assert_eq!(x, 43);
+
+ let x: u64;
+ unsafe {
+ asm!("mov {}, 6",
+ out(reg) x,
+ );
+ }
+ assert_eq!(x, 6);
+
+ let x: u64;
+ let input: u64 = 42;
+ unsafe {
+ asm!("mov {output}, {input}",
+ "add {output}, 1",
+ input = in(reg) input,
+ output = out(reg) x,
+ );
+ }
+ assert_eq!(x, 43);
+
+ // check inout(reg_class) x
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add {0}, {0}",
+ inout(reg) x
+ );
+ }
+ assert_eq!(x, 84);
+
+ // check inout("reg") x
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add r11, r11",
+ inout("r11") x
+ );
+ }
+ assert_eq!(x, 84);
+
+ // check a mix of
+ // in("reg")
+ // inout(class) x => y
+ // inout (class) x
+ let x: u64 = 702;
+ let y: u64 = 100;
+ let res: u64;
+ let mut rem: u64 = 0;
+ unsafe {
+ asm!("div r11",
+ in("r11") y,
+ inout("eax") x => res,
+ inout("edx") rem,
+ );
+ }
+ assert_eq!(res, 7);
+ assert_eq!(rem, 2);
+
+ // check const
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add {}, {}",
+ inout(reg) x,
+ const 1
+ );
+ }
+ assert_eq!(x, 43);
+
+ // check const (ATT syntax)
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add {}, {}",
+ const 1,
+ inout(reg) x,
+ options(att_syntax)
+ );
+ }
+ assert_eq!(x, 43);
+
+ // check sym fn
+ extern "C" fn foo() -> u64 { 42 }
+ let x: u64;
+ unsafe {
+ asm!("call {}", sym foo, lateout("rax") x);
+ }
+ assert_eq!(x, 42);
+
+ // check sym fn (ATT syntax)
+ let x: u64;
+ unsafe {
+ asm!("call {}", sym foo, lateout("rax") x, options(att_syntax));
+ }
+ assert_eq!(x, 42);
+
+ // check sym static
+ static FOO: u64 = 42;
+ let x: u64;
+ unsafe {
+ asm!("mov {1}, qword ptr [rip + {0}]", sym FOO, lateout(reg) x);
+ }
+ assert_eq!(x, 42);
+
+ // check sym static (ATT syntax)
+ let x: u64;
+ unsafe {
+ asm!("movq {0}(%rip), {1}", sym FOO, lateout(reg) x, options(att_syntax));
+ }
+ assert_eq!(x, 42);
+
+ assert_eq!(unsafe { add_asm(40, 2) }, 42);
+
+ let array1 = [1u8, 2, 3];
+ let mut array2 = [0u8, 0, 0];
+ unsafe {
+ mem_cpy(array2.as_mut_ptr(), array1.as_ptr(), 3);
+ }
+ assert_eq!(array1, array2);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/assign.rs b/compiler/rustc_codegen_gcc/tests/run/assign.rs
new file mode 100644
index 000000000..eb38a8a38
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/assign.rs
@@ -0,0 +1,153 @@
+// Compiler:
+//
+// Run-time:
+// stdout: 2
+// 7 8
+// 10
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+fn inc_ref(num: &mut isize) -> isize {
+ *num = *num + 5;
+ *num + 1
+}
+
+fn inc(num: isize) -> isize {
+ num + 1
+}
+
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ argc = inc(argc);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+
+ let b = inc_ref(&mut argc);
+ unsafe {
+ libc::printf(b"%ld %ld\n\0" as *const u8 as *const i8, argc, b);
+ }
+
+ argc = 10;
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/closure.rs b/compiler/rustc_codegen_gcc/tests/run/closure.rs
new file mode 100644
index 000000000..7121a5f0d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/closure.rs
@@ -0,0 +1,230 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: Arg: 1
+// Argument: 1
+// String arg: 1
+// Int argument: 2
+// Both args: 11
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics,
+ unboxed_closures)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let string = "Arg: %d\n\0";
+ let mut closure = || {
+ unsafe {
+ libc::printf(string as *const str as *const i8, argc);
+ }
+ };
+ closure();
+
+ let mut closure = || {
+ unsafe {
+ libc::printf("Argument: %d\n\0" as *const str as *const i8, argc);
+ }
+ };
+ closure();
+
+ let mut closure = |string| {
+ unsafe {
+ libc::printf(string as *const str as *const i8, argc);
+ }
+ };
+ closure("String arg: %d\n\0");
+
+ let mut closure = |arg: isize| {
+ unsafe {
+ libc::printf("Int argument: %d\n\0" as *const str as *const i8, arg);
+ }
+ };
+ closure(argc + 1);
+
+ let mut closure = |string, arg: isize| {
+ unsafe {
+ libc::printf(string as *const str as *const i8, arg);
+ }
+ };
+ closure("Both args: %d\n\0", argc + 10);
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/condition.rs b/compiler/rustc_codegen_gcc/tests/run/condition.rs
new file mode 100644
index 000000000..6a2e2d5bb
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/condition.rs
@@ -0,0 +1,320 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: true
+// 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for u64 {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+impl Copy for bool {}
+impl Copy for u16 {}
+impl Copy for i16 {}
+impl Copy for char {}
+impl Copy for i8 {}
+impl Copy for u8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ if argc == 1 {
+ libc::printf(b"true\n\0" as *const u8 as *const i8);
+ }
+
+ let string =
+ match argc {
+ 1 => b"1\n\0",
+ 2 => b"2\n\0",
+ 3 => b"3\n\0",
+ 4 => b"4\n\0",
+ 5 => b"5\n\0",
+ _ => b"_\n\0",
+ };
+ libc::printf(string as *const u8 as *const i8);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/empty_main.rs b/compiler/rustc_codegen_gcc/tests/run/empty_main.rs
new file mode 100644
index 000000000..c02cfd2a8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/empty_main.rs
@@ -0,0 +1,39 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(auto_traits, lang_items, no_core, start)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/exit.rs b/compiler/rustc_codegen_gcc/tests/run/exit.rs
new file mode 100644
index 000000000..956e53dd4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/exit.rs
@@ -0,0 +1,49 @@
+// Compiler:
+//
+// Run-time:
+// status: 2
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn exit(status: i32);
+ }
+}
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ libc::exit(2);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/exit_code.rs b/compiler/rustc_codegen_gcc/tests/run/exit_code.rs
new file mode 100644
index 000000000..eeab35209
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/exit_code.rs
@@ -0,0 +1,39 @@
+// Compiler:
+//
+// Run-time:
+// status: 1
+
+#![feature(auto_traits, lang_items, no_core, start)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ 1
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs
new file mode 100644
index 000000000..a226fff79
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs
@@ -0,0 +1,223 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+
+/*
+ * Code
+ */
+
+fn i16_as_i8(a: i16) -> i8 {
+ a as i8
+}
+
+fn call_func(func: fn(i16) -> i8, param: i16) -> i8 {
+ func(param)
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ let result = call_func(i16_as_i8, argc as i16) as isize;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, result);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/int.rs b/compiler/rustc_codegen_gcc/tests/run/int.rs
new file mode 100644
index 000000000..2b90e4ae8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/int.rs
@@ -0,0 +1,340 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(bench_black_box, const_black_box, core_intrinsics, start)]
+
+#![no_std]
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ use core::hint::black_box;
+
+ macro_rules! check {
+ ($ty:ty, $expr:expr) => {
+ {
+ const EXPECTED: $ty = $expr;
+ assert_eq!($expr, EXPECTED);
+ }
+ };
+ }
+
+ check!(u32, (2220326408_u32 + black_box(1)) >> (32 - 6));
+
+ /// Generate `check!` tests for integer types at least as wide as 128 bits.
+ macro_rules! check_ops128 {
+ () => {
+ check_ops64!();
+
+ // Shifts.
+ check!(T, VAL1 << black_box(64));
+ check!(T, VAL1 << black_box(81));
+ check!(T, VAL3 << black_box(63));
+ check!(T, VAL3 << black_box(64));
+
+ check!(T, VAL1 >> black_box(64));
+ check!(T, VAL2 >> black_box(64));
+ check!(T, VAL3 >> black_box(64));
+ check!(T, VAL3 >> black_box(81));
+ };
+ }
+
+ /// Generate `check!` tests for integer types at least as wide as 64 bits.
+ macro_rules! check_ops64 {
+ () => {
+ check_ops32!();
+
+ // Shifts.
+ check!(T, VAL2 << black_box(33));
+ check!(T, VAL2 << black_box(49));
+ check!(T, VAL2 << black_box(61));
+ check!(T, VAL2 << black_box(63));
+
+ check!(T, VAL3 << black_box(33));
+ check!(T, VAL3 << black_box(49));
+ check!(T, VAL3 << black_box(61));
+
+ check!(T, VAL1 >> black_box(33));
+ check!(T, VAL1 >> black_box(49));
+ check!(T, VAL1 >> black_box(61));
+ check!(T, VAL1 >> black_box(63));
+
+ check!(T, VAL2 >> black_box(33));
+ check!(T, VAL2 >> black_box(49));
+ check!(T, VAL2 >> black_box(61));
+ check!(T, VAL2 >> black_box(63));
+
+ check!(T, VAL3 >> black_box(33));
+ check!(T, VAL3 >> black_box(49));
+ check!(T, VAL3 >> black_box(61));
+ check!(T, VAL3 >> black_box(63));
+ };
+ }
+
+ /// Generate `check!` tests for integer types at least as wide as 32 bits.
+ macro_rules! check_ops32 {
+ () => {
+ // Shifts.
+ check!(T, VAL2 << black_box(1));
+ check!(T, VAL2 << black_box(0));
+
+ check!(T, VAL3 << black_box(1));
+ check!(T, VAL3 << black_box(0));
+
+ check!(T, VAL1.wrapping_shl(black_box(0)));
+ check!(T, VAL1.wrapping_shl(black_box(1)));
+ check!(T, VAL1.wrapping_shl(black_box(33)));
+ check!(T, VAL1.wrapping_shl(black_box(49)));
+ check!(T, VAL1.wrapping_shl(black_box(61)));
+ check!(T, VAL1.wrapping_shl(black_box(63)));
+ check!(T, VAL1.wrapping_shl(black_box(64)));
+ check!(T, VAL1.wrapping_shl(black_box(81)));
+
+ check!(Option<T>, VAL1.checked_shl(black_box(0)));
+ check!(Option<T>, VAL1.checked_shl(black_box(1)));
+ check!(Option<T>, VAL1.checked_shl(black_box(33)));
+ check!(Option<T>, VAL1.checked_shl(black_box(49)));
+ check!(Option<T>, VAL1.checked_shl(black_box(61)));
+ check!(Option<T>, VAL1.checked_shl(black_box(63)));
+ check!(Option<T>, VAL1.checked_shl(black_box(64)));
+ check!(Option<T>, VAL1.checked_shl(black_box(81)));
+
+ check!(T, VAL1 >> black_box(0));
+ check!(T, VAL1 >> black_box(1));
+
+ check!(T, VAL2 >> black_box(1));
+ check!(T, VAL2 >> black_box(0));
+
+ check!(T, VAL3 >> black_box(0));
+ check!(T, VAL3 >> black_box(1));
+
+ check!(T, VAL1.wrapping_shr(black_box(0)));
+ check!(T, VAL1.wrapping_shr(black_box(1)));
+ check!(T, VAL1.wrapping_shr(black_box(33)));
+ check!(T, VAL1.wrapping_shr(black_box(49)));
+ check!(T, VAL1.wrapping_shr(black_box(61)));
+ check!(T, VAL1.wrapping_shr(black_box(63)));
+ check!(T, VAL1.wrapping_shr(black_box(64)));
+ check!(T, VAL1.wrapping_shr(black_box(81)));
+
+ check!(Option<T>, VAL1.checked_shr(black_box(0)));
+ check!(Option<T>, VAL1.checked_shr(black_box(1)));
+ check!(Option<T>, VAL1.checked_shr(black_box(33)));
+ check!(Option<T>, VAL1.checked_shr(black_box(49)));
+ check!(Option<T>, VAL1.checked_shr(black_box(61)));
+ check!(Option<T>, VAL1.checked_shr(black_box(63)));
+ check!(Option<T>, VAL1.checked_shr(black_box(64)));
+ check!(Option<T>, VAL1.checked_shr(black_box(81)));
+
+ // Casts
+ check!(u64, (VAL1 >> black_box(1)) as u64);
+
+ // Addition.
+ check!(T, VAL1 + black_box(1));
+ check!(T, VAL2 + black_box(1));
+ check!(T, VAL2 + (VAL2 + black_box(1)));
+ check!(T, VAL3 + black_box(1));
+
+ check!(Option<T>, VAL1.checked_add(black_box(1)));
+ check!(Option<T>, VAL2.checked_add(black_box(1)));
+ check!(Option<T>, VAL2.checked_add(VAL2 + black_box(1)));
+ check!(Option<T>, VAL3.checked_add(T::MAX));
+ check!(Option<T>, VAL3.checked_add(T::MIN));
+
+ check!(T, VAL1.wrapping_add(black_box(1)));
+ check!(T, VAL2.wrapping_add(black_box(1)));
+ check!(T, VAL2.wrapping_add(VAL2 + black_box(1)));
+ check!(T, VAL3.wrapping_add(T::MAX));
+ check!(T, VAL3.wrapping_add(T::MIN));
+
+ check!((T, bool), VAL1.overflowing_add(black_box(1)));
+ check!((T, bool), VAL2.overflowing_add(black_box(1)));
+ check!((T, bool), VAL2.overflowing_add(VAL2 + black_box(1)));
+ check!((T, bool), VAL3.overflowing_add(T::MAX));
+ check!((T, bool), VAL3.overflowing_add(T::MIN));
+
+ check!(T, VAL1.saturating_add(black_box(1)));
+ check!(T, VAL2.saturating_add(black_box(1)));
+ check!(T, VAL2.saturating_add(VAL2 + black_box(1)));
+ check!(T, VAL3.saturating_add(T::MAX));
+ check!(T, VAL3.saturating_add(T::MIN));
+
+ // Subtraction
+ check!(T, VAL1 - black_box(1));
+ check!(T, VAL2 - black_box(1));
+ check!(T, VAL3 - black_box(1));
+
+ check!(Option<T>, VAL1.checked_sub(black_box(1)));
+ check!(Option<T>, VAL2.checked_sub(black_box(1)));
+ check!(Option<T>, VAL2.checked_sub(VAL2 + black_box(1)));
+ check!(Option<T>, VAL3.checked_sub(T::MAX));
+ check!(Option<T>, VAL3.checked_sub(T::MIN));
+
+ check!(T, VAL1.wrapping_sub(black_box(1)));
+ check!(T, VAL2.wrapping_sub(black_box(1)));
+ check!(T, VAL2.wrapping_sub(VAL2 + black_box(1)));
+ check!(T, VAL3.wrapping_sub(T::MAX));
+ check!(T, VAL3.wrapping_sub(T::MIN));
+
+ check!((T, bool), VAL1.overflowing_sub(black_box(1)));
+ check!((T, bool), VAL2.overflowing_sub(black_box(1)));
+ check!((T, bool), VAL2.overflowing_sub(VAL2 + black_box(1)));
+ check!((T, bool), VAL3.overflowing_sub(T::MAX));
+ check!((T, bool), VAL3.overflowing_sub(T::MIN));
+
+ check!(T, VAL1.saturating_sub(black_box(1)));
+ check!(T, VAL2.saturating_sub(black_box(1)));
+ check!(T, VAL2.saturating_sub(VAL2 + black_box(1)));
+ check!(T, VAL3.saturating_sub(T::MAX));
+ check!(T, VAL3.saturating_sub(T::MIN));
+
+ // Multiplication
+ check!(T, VAL1 * black_box(2));
+ check!(T, VAL1 * (black_box(1) + VAL2));
+ check!(T, VAL2 * black_box(2));
+ check!(T, VAL2 * (black_box(1) + VAL2));
+ check!(T, VAL3 * black_box(1));
+ check!(T, VAL4 * black_box(2));
+ check!(T, VAL5 * black_box(2));
+
+ check!(Option<T>, VAL1.checked_mul(black_box(2)));
+ check!(Option<T>, VAL1.checked_mul(black_box(1) + VAL2));
+ check!(Option<T>, VAL3.checked_mul(VAL3));
+ check!(Option<T>, VAL4.checked_mul(black_box(2)));
+ check!(Option<T>, VAL5.checked_mul(black_box(2)));
+
+ check!(T, VAL1.wrapping_mul(black_box(2)));
+ check!(T, VAL1.wrapping_mul((black_box(1) + VAL2)));
+ check!(T, VAL3.wrapping_mul(VAL3));
+ check!(T, VAL4.wrapping_mul(black_box(2)));
+ check!(T, VAL5.wrapping_mul(black_box(2)));
+
+ check!((T, bool), VAL1.overflowing_mul(black_box(2)));
+ check!((T, bool), VAL1.overflowing_mul(black_box(1) + VAL2));
+ check!((T, bool), VAL3.overflowing_mul(VAL3));
+ check!((T, bool), VAL4.overflowing_mul(black_box(2)));
+ check!((T, bool), VAL5.overflowing_mul(black_box(2)));
+
+ check!(T, VAL1.saturating_mul(black_box(2)));
+ check!(T, VAL1.saturating_mul(black_box(1) + VAL2));
+ check!(T, VAL3.saturating_mul(VAL3));
+ check!(T, VAL4.saturating_mul(black_box(2)));
+ check!(T, VAL5.saturating_mul(black_box(2)));
+
+ // Division.
+ check!(T, VAL1 / black_box(2));
+ check!(T, VAL1 / black_box(3));
+
+ check!(T, VAL2 / black_box(2));
+ check!(T, VAL2 / black_box(3));
+
+ check!(T, VAL3 / black_box(2));
+ check!(T, VAL3 / black_box(3));
+ check!(T, VAL3 / (black_box(1) + VAL4));
+ check!(T, VAL3 / (black_box(1) + VAL2));
+
+ check!(T, VAL4 / black_box(2));
+ check!(T, VAL4 / black_box(3));
+
+ check!(Option<T>, VAL1.checked_div(black_box(2)));
+ check!(Option<T>, VAL1.checked_div(black_box(1) + VAL2));
+ check!(Option<T>, VAL3.checked_div(VAL3));
+ check!(Option<T>, VAL4.checked_div(black_box(2)));
+ check!(Option<T>, VAL5.checked_div(black_box(2)));
+ check!(Option<T>, (T::MIN).checked_div(black_box(0 as T).wrapping_sub(1)));
+ check!(Option<T>, VAL5.checked_div(black_box(0))); // var5 / 0
+
+ check!(T, VAL1.wrapping_div(black_box(2)));
+ check!(T, VAL1.wrapping_div(black_box(1) + VAL2));
+ check!(T, VAL3.wrapping_div(VAL3));
+ check!(T, VAL4.wrapping_div(black_box(2)));
+ check!(T, VAL5.wrapping_div(black_box(2)));
+ check!(T, (T::MIN).wrapping_div(black_box(0 as T).wrapping_sub(1)));
+
+ check!((T, bool), VAL1.overflowing_div(black_box(2)));
+ check!((T, bool), VAL1.overflowing_div(black_box(1) + VAL2));
+ check!((T, bool), VAL3.overflowing_div(VAL3));
+ check!((T, bool), VAL4.overflowing_div(black_box(2)));
+ check!((T, bool), VAL5.overflowing_div(black_box(2)));
+ check!((T, bool), (T::MIN).overflowing_div(black_box(0 as T).wrapping_sub(1)));
+
+ check!(T, VAL1.saturating_div(black_box(2)));
+ check!(T, VAL1.saturating_div((black_box(1) + VAL2)));
+ check!(T, VAL3.saturating_div(VAL3));
+ check!(T, VAL4.saturating_div(black_box(2)));
+ check!(T, VAL5.saturating_div(black_box(2)));
+ check!(T, (T::MIN).saturating_div((0 as T).wrapping_sub(black_box(1))));
+ };
+ }
+
+ {
+ type T = u32;
+ const VAL1: T = 14162_u32;
+ const VAL2: T = 14556_u32;
+ const VAL3: T = 323656954_u32;
+ const VAL4: T = 2023651954_u32;
+ const VAL5: T = 1323651954_u32;
+ check_ops32!();
+ }
+
+ {
+ type T = i32;
+ const VAL1: T = 13456_i32;
+ const VAL2: T = 10475_i32;
+ const VAL3: T = 923653954_i32;
+ const VAL4: T = 993198738_i32;
+ const VAL5: T = 1023653954_i32;
+ check_ops32!();
+ }
+
+ {
+ type T = u64;
+ const VAL1: T = 134217856_u64;
+ const VAL2: T = 104753732_u64;
+ const VAL3: T = 12323651988970863954_u64;
+ const VAL4: T = 7323651988970863954_u64;
+ const VAL5: T = 8323651988970863954_u64;
+ check_ops64!();
+ }
+
+ {
+ type T = i64;
+ const VAL1: T = 134217856_i64;
+ const VAL2: T = 104753732_i64;
+ const VAL3: T = 6323651988970863954_i64;
+ const VAL4: T = 2323651988970863954_i64;
+ const VAL5: T = 3323651988970863954_i64;
+ check_ops64!();
+ }
+
+ {
+ type T = u128;
+ const VAL1: T = 134217856_u128;
+ const VAL2: T = 10475372733397991552_u128;
+ const VAL3: T = 193236519889708027473620326106273939584_u128;
+ const VAL4: T = 123236519889708027473620326106273939584_u128;
+ const VAL5: T = 153236519889708027473620326106273939584_u128;
+ check_ops128!();
+ }
+ {
+ type T = i128;
+ const VAL1: T = 134217856_i128;
+ const VAL2: T = 10475372733397991552_i128;
+ const VAL3: T = 83236519889708027473620326106273939584_i128;
+ const VAL4: T = 63236519889708027473620326106273939584_i128;
+ const VAL5: T = 73236519889708027473620326106273939584_i128;
+ check_ops128!();
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs
new file mode 100644
index 000000000..ea2c5add9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs
@@ -0,0 +1,140 @@
+// Compiler:
+//
+// Run-time:
+// stdout: Success
+// status: signal
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ // Panicking is expected iff overflow checking is enabled.
+ #[cfg(debug_assertions)]
+ libc::puts("Success\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let int = 9223372036854775807isize;
+ let int = int + argc; // overflow
+
+ // If overflow checking is disabled, we should reach here.
+ #[cfg(not(debug_assertions))]
+ unsafe {
+ libc::puts("Success\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+
+ int
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs
new file mode 100644
index 000000000..52de20021
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs
@@ -0,0 +1,165 @@
+
+// Compiler:
+//
+// Run-time:
+// stdout: 2
+// 7
+// 6
+// 11
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+ field: isize,
+}
+
+fn test(num: isize) -> Test {
+ Test {
+ field: num + 1,
+ }
+}
+
+fn update_num(num: &mut isize) {
+ *num = *num + 5;
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let mut test = test(argc);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+ }
+ update_num(&mut test.field);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+ }
+
+ update_num(&mut argc);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+
+ let refe = &mut argc;
+ *refe = *refe + 5;
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/operations.rs b/compiler/rustc_codegen_gcc/tests/run/operations.rs
new file mode 100644
index 000000000..e078b37b4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/operations.rs
@@ -0,0 +1,221 @@
+// Compiler:
+//
+// Run-time:
+// stdout: 41
+// 39
+// 10
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, arbitrary_self_types)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+impl Copy for i32 {}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for isize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 + argc);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 - argc);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 10 * argc);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs
new file mode 100644
index 000000000..6ac099ea1
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs
@@ -0,0 +1,222 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+
+/*
+ * Code
+ */
+
+static mut ONE: usize = 1;
+
+fn make_array() -> [u8; 3] {
+ [42, 10, 5]
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ let ptr = ONE as *mut usize;
+ let value = ptr as usize;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, value);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs
new file mode 100644
index 000000000..6fa10dca0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs
@@ -0,0 +1,72 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 10
+// 10
+// 42
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+ (
+ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+ b as u32,
+ )
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ let (a, b, c, d, e, f, g, h, i, j) = int_cast(10, 42);
+ unsafe {
+ libc::printf(b"%d\n\0" as *const u8 as *const i8, c);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, d);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, j);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/slice.rs b/compiler/rustc_codegen_gcc/tests/run/slice.rs
new file mode 100644
index 000000000..ad9258ed0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/slice.rs
@@ -0,0 +1,128 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 5
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Code
+ */
+
+static mut TWO: usize = 2;
+
+fn index_slice(s: &[u32]) -> u32 {
+ unsafe {
+ s[TWO]
+ }
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let array = [42, 7, 5];
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, index_slice(&array));
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/static.rs b/compiler/rustc_codegen_gcc/tests/run/static.rs
new file mode 100644
index 000000000..294add968
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/static.rs
@@ -0,0 +1,112 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 10
+// 14
+// 1
+// 12
+// 12
+// 1
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "drop"]
+pub trait Drop {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+ field: isize,
+}
+
+struct WithRef {
+ refe: &'static Test,
+}
+
+static mut CONSTANT: isize = 10;
+
+static mut TEST: Test = Test {
+ field: 12,
+};
+
+static mut TEST2: Test = Test {
+ field: 14,
+};
+
+static mut WITH_REF: WithRef = WithRef {
+ refe: unsafe { &TEST },
+};
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, CONSTANT);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
+ TEST2.field = argc;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
+ WITH_REF.refe = &TEST2;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST.field);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/structs.rs b/compiler/rustc_codegen_gcc/tests/run/structs.rs
new file mode 100644
index 000000000..6c8884855
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/structs.rs
@@ -0,0 +1,70 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 1
+// 2
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+ field: isize,
+}
+
+struct Two {
+ two: isize,
+}
+
+fn one() -> isize {
+ 1
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let test = Test {
+ field: one(),
+ };
+ let two = Two {
+ two: 2,
+ };
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, two.two);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/tuple.rs b/compiler/rustc_codegen_gcc/tests/run/tuple.rs
new file mode 100644
index 000000000..0b670bf26
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/tuple.rs
@@ -0,0 +1,51 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 3
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let test: (isize, isize, isize) = (3, 1, 4);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py b/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py
new file mode 100644
index 000000000..849c6e9c9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py
@@ -0,0 +1,238 @@
+import json
+import os
+import re
+import sys
+import subprocess
+from os import walk
+
+
+def run_command(command, cwd=None):
+ p = subprocess.Popen(command, cwd=cwd)
+ if p.wait() != 0:
+ print("command `{}` failed...".format(" ".join(command)))
+ sys.exit(1)
+
+
+def clone_repository(repo_name, path, repo_url, sub_path=None):
+ if os.path.exists(path):
+ while True:
+ choice = input("There is already a `{}` folder, do you want to update it? [y/N]".format(path))
+ if choice == "" or choice.lower() == "n":
+ print("Skipping repository update.")
+ return
+ elif choice.lower() == "y":
+ print("Updating repository...")
+ run_command(["git", "pull", "origin"], cwd=path)
+ return
+ else:
+ print("Didn't understand answer...")
+ print("Cloning {} repository...".format(repo_name))
+ if sub_path is None:
+ run_command(["git", "clone", repo_url, "--depth", "1", path])
+ else:
+ run_command(["git", "clone", repo_url, "--filter=tree:0", "--no-checkout", path])
+ run_command(["git", "sparse-checkout", "init"], cwd=path)
+ run_command(["git", "sparse-checkout", "set", "add", sub_path], cwd=path)
+ run_command(["git", "checkout"], cwd=path)
+
+
+def append_intrinsic(array, intrinsic_name, translation):
+ array.append((intrinsic_name, translation))
+
+
+def extract_instrinsics(intrinsics, file):
+ print("Extracting intrinsics from `{}`...".format(file))
+ with open(file, "r", encoding="utf8") as f:
+ content = f.read()
+
+ lines = content.splitlines()
+ pos = 0
+ current_arch = None
+ while pos < len(lines):
+ line = lines[pos].strip()
+ if line.startswith("let TargetPrefix ="):
+ current_arch = line.split('"')[1].strip()
+ if len(current_arch) == 0:
+ current_arch = None
+ elif current_arch is None:
+ pass
+ elif line == "}":
+ current_arch = None
+ elif line.startswith("def "):
+ content = ""
+ while not content.endswith(";") and not content.endswith("}") and pos < len(lines):
+ line = lines[pos].split(" // ")[0].strip()
+ content += line
+ pos += 1
+ entries = re.findall('GCCBuiltin<"(\\w+)">', content)
+ if len(entries) > 0:
+ intrinsic = content.split("def ")[1].strip().split(":")[0].strip()
+ intrinsic = intrinsic.split("_")
+ if len(intrinsic) < 2 or intrinsic[0] != "int":
+ continue
+ intrinsic[0] = "llvm"
+ intrinsic = ".".join(intrinsic)
+ if current_arch not in intrinsics:
+ intrinsics[current_arch] = []
+ for entry in entries:
+ append_intrinsic(intrinsics[current_arch], intrinsic, entry)
+ continue
+ pos += 1
+ continue
+ print("Done!")
+
+
+def extract_instrinsics_from_llvm(llvm_path, intrinsics):
+ files = []
+ intrinsics_path = os.path.join(llvm_path, "llvm/include/llvm/IR")
+ for (dirpath, dirnames, filenames) in walk(intrinsics_path):
+ files.extend([os.path.join(intrinsics_path, f) for f in filenames if f.endswith(".td")])
+
+ for file in files:
+ extract_instrinsics(intrinsics, file)
+
+
+def append_translation(json_data, p, array):
+ it = json_data["index"][p]
+ content = it["docs"].split('`')
+ if len(content) != 5:
+ return
+ append_intrinsic(array, content[1], content[3])
+
+
+def extract_instrinsics_from_llvmint(llvmint, intrinsics):
+ archs = [
+ "AMDGPU",
+ "aarch64",
+ "arm",
+ "cuda",
+ "hexagon",
+ "mips",
+ "nvvm",
+ "ppc",
+ "ptx",
+ "x86",
+ "xcore",
+ ]
+
+ json_file = os.path.join(llvmint, "target/doc/llvmint.json")
+ # We need to regenerate the documentation!
+ run_command(
+ ["cargo", "rustdoc", "--", "-Zunstable-options", "--output-format", "json"],
+ cwd=llvmint,
+ )
+ with open(json_file, "r", encoding="utf8") as f:
+ json_data = json.loads(f.read())
+ for p in json_data["paths"]:
+ it = json_data["paths"][p]
+ if it["crate_id"] != 0:
+ # This is from an external crate.
+ continue
+ if it["kind"] != "function":
+ # We're only looking for functions.
+ continue
+ # if len(it["path"]) == 2:
+ # # This is a "general" intrinsic, not bound to a specific arch.
+ # append_translation(json_data, p, general)
+ # continue
+ if len(it["path"]) != 3 or it["path"][1] not in archs:
+ continue
+ arch = it["path"][1]
+ if arch not in intrinsics:
+ intrinsics[arch] = []
+ append_translation(json_data, p, intrinsics[arch])
+
+
+def fill_intrinsics(intrinsics, from_intrinsics, all_intrinsics):
+ for arch in from_intrinsics:
+ if arch not in intrinsics:
+ intrinsics[arch] = []
+ for entry in from_intrinsics[arch]:
+ if entry[0] in all_intrinsics:
+ if all_intrinsics[entry[0]] == entry[1]:
+ # This is a "full" duplicate, both the LLVM instruction and the GCC
+ # translation are the same.
+ continue
+ intrinsics[arch].append((entry[0], entry[1], True))
+ else:
+ intrinsics[arch].append((entry[0], entry[1], False))
+ all_intrinsics[entry[0]] = entry[1]
+
+
+def update_intrinsics(llvm_path, llvmint, llvmint2):
+ intrinsics_llvm = {}
+ intrinsics_llvmint = {}
+ all_intrinsics = {}
+
+ extract_instrinsics_from_llvm(llvm_path, intrinsics_llvm)
+ extract_instrinsics_from_llvmint(llvmint, intrinsics_llvmint)
+ extract_instrinsics_from_llvmint(llvmint2, intrinsics_llvmint)
+
+ intrinsics = {}
+ # We give priority to translations from LLVM over the ones from llvmint.
+ fill_intrinsics(intrinsics, intrinsics_llvm, all_intrinsics)
+ fill_intrinsics(intrinsics, intrinsics_llvmint, all_intrinsics)
+
+ archs = [arch for arch in intrinsics]
+ archs.sort()
+
+ output_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "../src/intrinsic/archs.rs",
+ )
+ print("Updating content of `{}`...".format(output_file))
+ with open(output_file, "w", encoding="utf8") as out:
+ out.write("// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py`\n")
+ out.write("// DO NOT EDIT IT!\n")
+ out.write("match name {\n")
+ for arch in archs:
+ if len(intrinsics[arch]) == 0:
+ continue
+ intrinsics[arch].sort(key=lambda x: (x[0], x[2]))
+ out.write(' // {}\n'.format(arch))
+ for entry in intrinsics[arch]:
+ if entry[2] == True: # if it is a duplicate
+ out.write(' // [DUPLICATE]: "{}" => "{}",\n'.format(entry[0], entry[1]))
+ else:
+ out.write(' "{}" => "{}",\n'.format(entry[0], entry[1]))
+ out.write(' _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),\n')
+ out.write("}\n")
+ print("Done!")
+
+
+def main():
+ llvm_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "llvm-project",
+ )
+ llvmint_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "llvmint",
+ )
+ llvmint2_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "llvmint-2",
+ )
+
+ # First, we clone the LLVM repository if it's not already here.
+ clone_repository(
+ "llvm-project",
+ llvm_path,
+ "https://github.com/llvm/llvm-project",
+ sub_path="llvm/include/llvm/IR",
+ )
+ clone_repository(
+ "llvmint",
+ llvmint_path,
+ "https://github.com/GuillaumeGomez/llvmint",
+ )
+ clone_repository(
+ "llvmint2",
+ llvmint2_path,
+ "https://github.com/antoyo/llvmint",
+ )
+ update_intrinsics(llvm_path, llvmint_path, llvmint2_path)
+
+
+if __name__ == "__main__":
+ sys.exit(main())