summaryrefslogtreecommitdiffstats
path: root/src/tools/compiletest
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /src/tools/compiletest
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/tools/compiletest')
-rw-r--r--src/tools/compiletest/Cargo.toml26
-rw-r--r--src/tools/compiletest/src/common.rs473
-rw-r--r--src/tools/compiletest/src/compute_diff.rs157
-rw-r--r--src/tools/compiletest/src/errors.rs179
-rw-r--r--src/tools/compiletest/src/header.rs1060
-rw-r--r--src/tools/compiletest/src/header/tests.rs283
-rw-r--r--src/tools/compiletest/src/json.rs321
-rw-r--r--src/tools/compiletest/src/main.rs1014
-rw-r--r--src/tools/compiletest/src/raise_fd_limit.rs54
-rw-r--r--src/tools/compiletest/src/read2.rs317
-rw-r--r--src/tools/compiletest/src/read2/tests.rs123
-rw-r--r--src/tools/compiletest/src/runtest.rs3948
-rw-r--r--src/tools/compiletest/src/runtest/debugger.rs122
-rw-r--r--src/tools/compiletest/src/runtest/tests.rs50
-rw-r--r--src/tools/compiletest/src/tests.rs78
-rw-r--r--src/tools/compiletest/src/util.rs259
-rw-r--r--src/tools/compiletest/src/util/tests.rs51
17 files changed, 8515 insertions, 0 deletions
diff --git a/src/tools/compiletest/Cargo.toml b/src/tools/compiletest/Cargo.toml
new file mode 100644
index 000000000..23e495399
--- /dev/null
+++ b/src/tools/compiletest/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "compiletest"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+colored = "2"
+diff = "0.1.10"
+unified-diff = "0.2.1"
+getopts = "0.2"
+tracing = "0.1"
+tracing-subscriber = { version = "0.3.3", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] }
+regex = "1.0"
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+rustfix = "0.6.0"
+lazy_static = "1.0"
+walkdir = "2"
+glob = "0.3.0"
+
+[target.'cfg(unix)'.dependencies]
+libc = "0.2"
+
+[target.'cfg(windows)'.dependencies]
+miow = "0.3"
+winapi = { version = "0.3", features = ["winerror"] }
diff --git a/src/tools/compiletest/src/common.rs b/src/tools/compiletest/src/common.rs
new file mode 100644
index 000000000..be81ff881
--- /dev/null
+++ b/src/tools/compiletest/src/common.rs
@@ -0,0 +1,473 @@
+pub use self::Mode::*;
+
+use std::ffi::OsString;
+use std::fmt;
+use std::path::{Path, PathBuf};
+use std::str::FromStr;
+
+use crate::util::PathBufExt;
+use test::ColorConfig;
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub enum Mode {
+ RunPassValgrind,
+ Pretty,
+ DebugInfo,
+ Codegen,
+ Rustdoc,
+ RustdocJson,
+ CodegenUnits,
+ Incremental,
+ RunMake,
+ Ui,
+ JsDocTest,
+ MirOpt,
+ Assembly,
+}
+
+impl Mode {
+ pub fn disambiguator(self) -> &'static str {
+ // Pretty-printing tests could run concurrently, and if they do,
+ // they need to keep their output segregated.
+ match self {
+ Pretty => ".pretty",
+ _ => "",
+ }
+ }
+}
+
+impl FromStr for Mode {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Mode, ()> {
+ match s {
+ "run-pass-valgrind" => Ok(RunPassValgrind),
+ "pretty" => Ok(Pretty),
+ "debuginfo" => Ok(DebugInfo),
+ "codegen" => Ok(Codegen),
+ "rustdoc" => Ok(Rustdoc),
+ "rustdoc-json" => Ok(RustdocJson),
+ "codegen-units" => Ok(CodegenUnits),
+ "incremental" => Ok(Incremental),
+ "run-make" => Ok(RunMake),
+ "ui" => Ok(Ui),
+ "js-doc-test" => Ok(JsDocTest),
+ "mir-opt" => Ok(MirOpt),
+ "assembly" => Ok(Assembly),
+ _ => Err(()),
+ }
+ }
+}
+
+impl fmt::Display for Mode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let s = match *self {
+ RunPassValgrind => "run-pass-valgrind",
+ Pretty => "pretty",
+ DebugInfo => "debuginfo",
+ Codegen => "codegen",
+ Rustdoc => "rustdoc",
+ RustdocJson => "rustdoc-json",
+ CodegenUnits => "codegen-units",
+ Incremental => "incremental",
+ RunMake => "run-make",
+ Ui => "ui",
+ JsDocTest => "js-doc-test",
+ MirOpt => "mir-opt",
+ Assembly => "assembly",
+ };
+ fmt::Display::fmt(s, f)
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Debug, Hash)]
+pub enum PassMode {
+ Check,
+ Build,
+ Run,
+}
+
+impl FromStr for PassMode {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, ()> {
+ match s {
+ "check" => Ok(PassMode::Check),
+ "build" => Ok(PassMode::Build),
+ "run" => Ok(PassMode::Run),
+ _ => Err(()),
+ }
+ }
+}
+
+impl fmt::Display for PassMode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let s = match *self {
+ PassMode::Check => "check",
+ PassMode::Build => "build",
+ PassMode::Run => "run",
+ };
+ fmt::Display::fmt(s, f)
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
+pub enum FailMode {
+ Check,
+ Build,
+ Run,
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum CompareMode {
+ Polonius,
+ Chalk,
+ SplitDwarf,
+ SplitDwarfSingle,
+}
+
+impl CompareMode {
+ pub(crate) fn to_str(&self) -> &'static str {
+ match *self {
+ CompareMode::Polonius => "polonius",
+ CompareMode::Chalk => "chalk",
+ CompareMode::SplitDwarf => "split-dwarf",
+ CompareMode::SplitDwarfSingle => "split-dwarf-single",
+ }
+ }
+
+ pub fn parse(s: String) -> CompareMode {
+ match s.as_str() {
+ "polonius" => CompareMode::Polonius,
+ "chalk" => CompareMode::Chalk,
+ "split-dwarf" => CompareMode::SplitDwarf,
+ "split-dwarf-single" => CompareMode::SplitDwarfSingle,
+ x => panic!("unknown --compare-mode option: {}", x),
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum Debugger {
+ Cdb,
+ Gdb,
+ Lldb,
+}
+
+impl Debugger {
+ fn to_str(&self) -> &'static str {
+ match self {
+ Debugger::Cdb => "cdb",
+ Debugger::Gdb => "gdb",
+ Debugger::Lldb => "lldb",
+ }
+ }
+}
+
+impl fmt::Display for Debugger {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.to_str(), f)
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum PanicStrategy {
+ Unwind,
+ Abort,
+}
+
+/// Configuration for compiletest
+#[derive(Debug, Clone)]
+pub struct Config {
+ /// `true` to overwrite stderr/stdout files instead of complaining about changes in output.
+ pub bless: bool,
+
+ /// The library paths required for running the compiler.
+ pub compile_lib_path: PathBuf,
+
+ /// The library paths required for running compiled programs.
+ pub run_lib_path: PathBuf,
+
+ /// The rustc executable.
+ pub rustc_path: PathBuf,
+
+ /// The rustdoc executable.
+ pub rustdoc_path: Option<PathBuf>,
+
+ /// The rust-demangler executable.
+ pub rust_demangler_path: Option<PathBuf>,
+
+ /// The Python executable to use for LLDB and htmldocck.
+ pub python: String,
+
+ /// The jsondocck executable.
+ pub jsondocck_path: Option<String>,
+
+ /// The LLVM `FileCheck` binary path.
+ pub llvm_filecheck: Option<PathBuf>,
+
+ /// Path to LLVM's bin directory.
+ pub llvm_bin_dir: Option<PathBuf>,
+
+ /// The valgrind path.
+ pub valgrind_path: Option<String>,
+
+ /// Whether to fail if we can't run run-pass-valgrind tests under valgrind
+ /// (or, alternatively, to silently run them like regular run-pass tests).
+ pub force_valgrind: bool,
+
+ /// The path to the Clang executable to run Clang-based tests with. If
+ /// `None` then these tests will be ignored.
+ pub run_clang_based_tests_with: Option<String>,
+
+ /// The directory containing the tests to run
+ pub src_base: PathBuf,
+
+ /// The directory where programs should be built
+ pub build_base: PathBuf,
+
+ /// The name of the stage being built (stage1, etc)
+ pub stage_id: String,
+
+ /// The test mode, e.g. ui or debuginfo.
+ pub mode: Mode,
+
+ /// The test suite (essentially which directory is running, but without the
+ /// directory prefix such as src/test)
+ pub suite: String,
+
+ /// The debugger to use in debuginfo mode. Unset otherwise.
+ pub debugger: Option<Debugger>,
+
+ /// Run ignored tests
+ pub run_ignored: bool,
+
+ /// Only run tests that match these filters
+ pub filters: Vec<String>,
+
+ /// Skip tests tests matching these substrings. Corresponds to
+ /// `test::TestOpts::skip`. `filter_exact` does not apply to these flags.
+ pub skip: Vec<String>,
+
+ /// Exactly match the filter, rather than a substring
+ pub filter_exact: bool,
+
+ /// Force the pass mode of a check/build/run-pass test to this mode.
+ pub force_pass_mode: Option<PassMode>,
+
+ /// Explicitly enable or disable running.
+ pub run: Option<bool>,
+
+ /// Write out a parseable log of tests that were run
+ pub logfile: Option<PathBuf>,
+
+ /// A command line to prefix program execution with,
+ /// for running under valgrind
+ pub runtool: Option<String>,
+
+ /// Flags to pass to the compiler when building for the host
+ pub host_rustcflags: Option<String>,
+
+ /// Flags to pass to the compiler when building for the target
+ pub target_rustcflags: Option<String>,
+
+ /// Whether tests should be optimized by default. Individual test-suites and test files may
+ /// override this setting.
+ pub optimize_tests: bool,
+
+ /// What panic strategy the target is built with. Unwind supports Abort, but
+ /// not vice versa.
+ pub target_panic: PanicStrategy,
+
+ /// Target system to be tested
+ pub target: String,
+
+ /// Host triple for the compiler being invoked
+ pub host: String,
+
+ /// Path to / name of the Microsoft Console Debugger (CDB) executable
+ pub cdb: Option<OsString>,
+
+ /// Version of CDB
+ pub cdb_version: Option<[u16; 4]>,
+
+ /// Path to / name of the GDB executable
+ pub gdb: Option<String>,
+
+ /// Version of GDB, encoded as ((major * 1000) + minor) * 1000 + patch
+ pub gdb_version: Option<u32>,
+
+ /// Whether GDB has native rust support
+ pub gdb_native_rust: bool,
+
+ /// Version of LLDB
+ pub lldb_version: Option<u32>,
+
+ /// Whether LLDB has native rust support
+ pub lldb_native_rust: bool,
+
+ /// Version of LLVM
+ pub llvm_version: Option<u32>,
+
+ /// Is LLVM a system LLVM
+ pub system_llvm: bool,
+
+ /// Path to the android tools
+ pub android_cross_path: PathBuf,
+
+ /// Extra parameter to run adb on arm-linux-androideabi
+ pub adb_path: String,
+
+ /// Extra parameter to run test suite on arm-linux-androideabi
+ pub adb_test_dir: String,
+
+ /// status whether android device available or not
+ pub adb_device_status: bool,
+
+ /// the path containing LLDB's Python module
+ pub lldb_python_dir: Option<String>,
+
+ /// Explain what's going on
+ pub verbose: bool,
+
+ /// Print one character per test instead of one line
+ pub quiet: bool,
+
+ /// Whether to use colors in test.
+ pub color: ColorConfig,
+
+ /// where to find the remote test client process, if we're using it
+ pub remote_test_client: Option<PathBuf>,
+
+ /// mode describing what file the actual ui output will be compared to
+ pub compare_mode: Option<CompareMode>,
+
+ /// If true, this will generate a coverage file with UI test files that run `MachineApplicable`
+ /// diagnostics but are missing `run-rustfix` annotations. The generated coverage file is
+ /// created in `/<build_base>/rustfix_missing_coverage.txt`
+ pub rustfix_coverage: bool,
+
+ /// whether to run `tidy` when a rustdoc test fails
+ pub has_tidy: bool,
+
+ /// The current Rust channel
+ pub channel: String,
+
+ /// The default Rust edition
+ pub edition: Option<String>,
+
+ // Configuration for various run-make tests frobbing things like C compilers
+ // or querying about various LLVM component information.
+ pub cc: String,
+ pub cxx: String,
+ pub cflags: String,
+ pub cxxflags: String,
+ pub ar: String,
+ pub linker: Option<String>,
+ pub llvm_components: String,
+
+ /// Path to a NodeJS executable. Used for JS doctests, emscripten and WASM tests
+ pub nodejs: Option<String>,
+ /// Path to a npm executable. Used for rustdoc GUI tests
+ pub npm: Option<String>,
+
+ /// Whether to rerun tests even if the inputs are unchanged.
+ pub force_rerun: bool,
+}
+
+impl Config {
+ pub fn run_enabled(&self) -> bool {
+ self.run.unwrap_or_else(|| {
+ // Auto-detect whether to run based on the platform.
+ !self.target.ends_with("-fuchsia")
+ })
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct TestPaths {
+ pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
+ pub relative_dir: PathBuf, // e.g., foo/bar
+}
+
+/// Used by `ui` tests to generate things like `foo.stderr` from `foo.rs`.
+pub fn expected_output_path(
+ testpaths: &TestPaths,
+ revision: Option<&str>,
+ compare_mode: &Option<CompareMode>,
+ kind: &str,
+) -> PathBuf {
+ assert!(UI_EXTENSIONS.contains(&kind));
+ let mut parts = Vec::new();
+
+ if let Some(x) = revision {
+ parts.push(x);
+ }
+ if let Some(ref x) = *compare_mode {
+ parts.push(x.to_str());
+ }
+ parts.push(kind);
+
+ let extension = parts.join(".");
+ testpaths.file.with_extension(extension)
+}
+
+pub const UI_EXTENSIONS: &[&str] = &[
+ UI_STDERR,
+ UI_STDOUT,
+ UI_FIXED,
+ UI_RUN_STDERR,
+ UI_RUN_STDOUT,
+ UI_STDERR_64,
+ UI_STDERR_32,
+ UI_STDERR_16,
+];
+pub const UI_STDERR: &str = "stderr";
+pub const UI_STDOUT: &str = "stdout";
+pub const UI_FIXED: &str = "fixed";
+pub const UI_RUN_STDERR: &str = "run.stderr";
+pub const UI_RUN_STDOUT: &str = "run.stdout";
+pub const UI_STDERR_64: &str = "64bit.stderr";
+pub const UI_STDERR_32: &str = "32bit.stderr";
+pub const UI_STDERR_16: &str = "16bit.stderr";
+
+/// Absolute path to the directory where all output for all tests in the given
+/// `relative_dir` group should reside. Example:
+/// /path/to/build/host-triple/test/ui/relative/
+/// This is created early when tests are collected to avoid race conditions.
+pub fn output_relative_path(config: &Config, relative_dir: &Path) -> PathBuf {
+ config.build_base.join(relative_dir)
+}
+
+/// Generates a unique name for the test, such as `testname.revision.mode`.
+pub fn output_testname_unique(
+ config: &Config,
+ testpaths: &TestPaths,
+ revision: Option<&str>,
+) -> PathBuf {
+ let mode = config.compare_mode.as_ref().map_or("", |m| m.to_str());
+ let debugger = config.debugger.as_ref().map_or("", |m| m.to_str());
+ PathBuf::from(&testpaths.file.file_stem().unwrap())
+ .with_extra_extension(revision.unwrap_or(""))
+ .with_extra_extension(mode)
+ .with_extra_extension(debugger)
+}
+
+/// Absolute path to the directory where all output for the given
+/// test/revision should reside. Example:
+/// /path/to/build/host-triple/test/ui/relative/testname.revision.mode/
+pub fn output_base_dir(config: &Config, testpaths: &TestPaths, revision: Option<&str>) -> PathBuf {
+ output_relative_path(config, &testpaths.relative_dir)
+ .join(output_testname_unique(config, testpaths, revision))
+}
+
+/// Absolute path to the base filename used as output for the given
+/// test/revision. Example:
+/// /path/to/build/host-triple/test/ui/relative/testname.revision.mode/testname
+pub fn output_base_name(config: &Config, testpaths: &TestPaths, revision: Option<&str>) -> PathBuf {
+ output_base_dir(config, testpaths, revision).join(testpaths.file.file_stem().unwrap())
+}
+
+/// Absolute path to the directory to use for incremental compilation. Example:
+/// /path/to/build/host-triple/test/ui/relative/testname.mode/testname.inc
+pub fn incremental_dir(config: &Config, testpaths: &TestPaths) -> PathBuf {
+ output_base_name(config, testpaths, None).with_extension("inc")
+}
diff --git a/src/tools/compiletest/src/compute_diff.rs b/src/tools/compiletest/src/compute_diff.rs
new file mode 100644
index 000000000..92c80c27d
--- /dev/null
+++ b/src/tools/compiletest/src/compute_diff.rs
@@ -0,0 +1,157 @@
+use std::collections::VecDeque;
+use std::fs::{File, FileType};
+use std::path::Path;
+
+#[derive(Debug, PartialEq)]
+pub enum DiffLine {
+ Context(String),
+ Expected(String),
+ Resulting(String),
+}
+
+#[derive(Debug, PartialEq)]
+pub struct Mismatch {
+ pub line_number: u32,
+ pub lines: Vec<DiffLine>,
+}
+
+impl Mismatch {
+ fn new(line_number: u32) -> Mismatch {
+ Mismatch { line_number, lines: Vec::new() }
+ }
+}
+
+// Produces a diff between the expected output and actual output.
+pub fn make_diff(expected: &str, actual: &str, context_size: usize) -> Vec<Mismatch> {
+ let mut line_number = 1;
+ let mut context_queue: VecDeque<&str> = VecDeque::with_capacity(context_size);
+ let mut lines_since_mismatch = context_size + 1;
+ let mut results = Vec::new();
+ let mut mismatch = Mismatch::new(0);
+
+ for result in diff::lines(expected, actual) {
+ match result {
+ diff::Result::Left(str) => {
+ if lines_since_mismatch >= context_size && lines_since_mismatch > 0 {
+ results.push(mismatch);
+ mismatch = Mismatch::new(line_number - context_queue.len() as u32);
+ }
+
+ while let Some(line) = context_queue.pop_front() {
+ mismatch.lines.push(DiffLine::Context(line.to_owned()));
+ }
+
+ mismatch.lines.push(DiffLine::Expected(str.to_owned()));
+ line_number += 1;
+ lines_since_mismatch = 0;
+ }
+ diff::Result::Right(str) => {
+ if lines_since_mismatch >= context_size && lines_since_mismatch > 0 {
+ results.push(mismatch);
+ mismatch = Mismatch::new(line_number - context_queue.len() as u32);
+ }
+
+ while let Some(line) = context_queue.pop_front() {
+ mismatch.lines.push(DiffLine::Context(line.to_owned()));
+ }
+
+ mismatch.lines.push(DiffLine::Resulting(str.to_owned()));
+ lines_since_mismatch = 0;
+ }
+ diff::Result::Both(str, _) => {
+ if context_queue.len() >= context_size {
+ let _ = context_queue.pop_front();
+ }
+
+ if lines_since_mismatch < context_size {
+ mismatch.lines.push(DiffLine::Context(str.to_owned()));
+ } else if context_size > 0 {
+ context_queue.push_back(str);
+ }
+
+ line_number += 1;
+ lines_since_mismatch += 1;
+ }
+ }
+ }
+
+ results.push(mismatch);
+ results.remove(0);
+
+ results
+}
+
+pub(crate) fn write_diff(expected: &str, actual: &str, context_size: usize) -> String {
+ use std::fmt::Write;
+ let mut output = String::new();
+ let diff_results = make_diff(expected, actual, context_size);
+ for result in diff_results {
+ let mut line_number = result.line_number;
+ for line in result.lines {
+ match line {
+ DiffLine::Expected(e) => {
+ writeln!(output, "-\t{}", e).unwrap();
+ line_number += 1;
+ }
+ DiffLine::Context(c) => {
+ writeln!(output, "{}\t{}", line_number, c).unwrap();
+ line_number += 1;
+ }
+ DiffLine::Resulting(r) => {
+ writeln!(output, "+\t{}", r).unwrap();
+ }
+ }
+ }
+ writeln!(output).unwrap();
+ }
+ output
+}
+
+/// Filters based on filetype and extension whether to diff a file.
+///
+/// Returns whether any data was actually written.
+pub(crate) fn write_filtered_diff<Filter>(
+ diff_filename: &str,
+ out_dir: &Path,
+ compare_dir: &Path,
+ verbose: bool,
+ filter: Filter,
+) -> bool
+where
+ Filter: Fn(FileType, Option<&str>) -> bool,
+{
+ use std::io::{Read, Write};
+ let mut diff_output = File::create(diff_filename).unwrap();
+ let mut wrote_data = false;
+ for entry in walkdir::WalkDir::new(out_dir) {
+ let entry = entry.expect("failed to read file");
+ let extension = entry.path().extension().and_then(|p| p.to_str());
+ if filter(entry.file_type(), extension) {
+ let expected_path = compare_dir.join(entry.path().strip_prefix(&out_dir).unwrap());
+ let expected = if let Ok(s) = std::fs::read(&expected_path) { s } else { continue };
+ let actual_path = entry.path();
+ let actual = std::fs::read(&actual_path).unwrap();
+ let diff = unified_diff::diff(
+ &expected,
+ &expected_path.to_string_lossy(),
+ &actual,
+ &actual_path.to_string_lossy(),
+ 3,
+ );
+ wrote_data |= !diff.is_empty();
+ diff_output.write_all(&diff).unwrap();
+ }
+ }
+
+ if !wrote_data {
+ println!("note: diff is identical to nightly rustdoc");
+ assert!(diff_output.metadata().unwrap().len() == 0);
+ return false;
+ } else if verbose {
+ eprintln!("printing diff:");
+ let mut buf = Vec::new();
+ diff_output.read_to_end(&mut buf).unwrap();
+ std::io::stderr().lock().write_all(&mut buf).unwrap();
+ }
+ true
+}
diff --git a/src/tools/compiletest/src/errors.rs b/src/tools/compiletest/src/errors.rs
new file mode 100644
index 000000000..054235ec1
--- /dev/null
+++ b/src/tools/compiletest/src/errors.rs
@@ -0,0 +1,179 @@
+use self::WhichLine::*;
+
+use std::fmt;
+use std::fs::File;
+use std::io::prelude::*;
+use std::io::BufReader;
+use std::path::Path;
+use std::str::FromStr;
+
+use lazy_static::lazy_static;
+use regex::Regex;
+use tracing::*;
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum ErrorKind {
+ Help,
+ Error,
+ Note,
+ Suggestion,
+ Warning,
+}
+
+impl FromStr for ErrorKind {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ let s = s.to_uppercase();
+ let part0: &str = s.split(':').next().unwrap();
+ match part0 {
+ "HELP" => Ok(ErrorKind::Help),
+ "ERROR" => Ok(ErrorKind::Error),
+ "NOTE" => Ok(ErrorKind::Note),
+ "SUGGESTION" => Ok(ErrorKind::Suggestion),
+ "WARN" | "WARNING" => Ok(ErrorKind::Warning),
+ _ => Err(()),
+ }
+ }
+}
+
+impl fmt::Display for ErrorKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ErrorKind::Help => write!(f, "help message"),
+ ErrorKind::Error => write!(f, "error"),
+ ErrorKind::Note => write!(f, "note"),
+ ErrorKind::Suggestion => write!(f, "suggestion"),
+ ErrorKind::Warning => write!(f, "warning"),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Error {
+ pub line_num: usize,
+ /// What kind of message we expect (e.g., warning, error, suggestion).
+ /// `None` if not specified or unknown message kind.
+ pub kind: Option<ErrorKind>,
+ pub msg: String,
+}
+
+#[derive(PartialEq, Debug)]
+enum WhichLine {
+ ThisLine,
+ FollowPrevious(usize),
+ AdjustBackward(usize),
+}
+
+/// Looks for either "//~| KIND MESSAGE" or "//~^^... KIND MESSAGE"
+/// The former is a "follow" that inherits its target from the preceding line;
+/// the latter is an "adjusts" that goes that many lines up.
+///
+/// Goal is to enable tests both like: //~^^^ ERROR go up three
+/// and also //~^ ERROR message one for the preceding line, and
+/// //~| ERROR message two for that same line.
+///
+/// If cfg is not None (i.e., in an incremental test), then we look
+/// for `//[X]~` instead, where `X` is the current `cfg`.
+pub fn load_errors(testfile: &Path, cfg: Option<&str>) -> Vec<Error> {
+ let rdr = BufReader::new(File::open(testfile).unwrap());
+
+ // `last_nonfollow_error` tracks the most recently seen
+ // line with an error template that did not use the
+ // follow-syntax, "//~| ...".
+ //
+ // (pnkfelix could not find an easy way to compose Iterator::scan
+ // and Iterator::filter_map to pass along this information into
+ // `parse_expected`. So instead I am storing that state here and
+ // updating it in the map callback below.)
+ let mut last_nonfollow_error = None;
+
+ rdr.lines()
+ .enumerate()
+ .filter_map(|(line_num, line)| {
+ parse_expected(last_nonfollow_error, line_num + 1, &line.unwrap(), cfg).map(
+ |(which, error)| {
+ match which {
+ FollowPrevious(_) => {}
+ _ => last_nonfollow_error = Some(error.line_num),
+ }
+
+ error
+ },
+ )
+ })
+ .collect()
+}
+
+fn parse_expected(
+ last_nonfollow_error: Option<usize>,
+ line_num: usize,
+ line: &str,
+ cfg: Option<&str>,
+) -> Option<(WhichLine, Error)> {
+ // Matches comments like:
+ // //~
+ // //~|
+ // //~^
+ // //~^^^^^
+ // //[cfg1]~
+ // //[cfg1,cfg2]~^^
+ lazy_static! {
+ static ref RE: Regex =
+ Regex::new(r"//(?:\[(?P<cfgs>[\w,]+)])?~(?P<adjust>\||\^*)").unwrap();
+ }
+
+ let captures = RE.captures(line)?;
+
+ match (cfg, captures.name("cfgs")) {
+ // Only error messages that contain our `cfg` between the square brackets apply to us.
+ (Some(cfg), Some(filter)) if !filter.as_str().split(',').any(|s| s == cfg) => return None,
+ (Some(_), Some(_)) => {}
+
+ (None, Some(_)) => panic!("Only tests with revisions should use `//[X]~`"),
+
+ // If an error has no list of revisions, it applies to all revisions.
+ (Some(_), None) | (None, None) => {}
+ }
+
+ let (follow, adjusts) = match &captures["adjust"] {
+ "|" => (true, 0),
+ circumflexes => (false, circumflexes.len()),
+ };
+
+ // Get the part of the comment after the sigil (e.g. `~^^` or ~|).
+ let whole_match = captures.get(0).unwrap();
+ let (_, mut msg) = line.split_at(whole_match.end());
+
+ let first_word = msg.split_whitespace().next().expect("Encountered unexpected empty comment");
+
+ // If we find `//~ ERROR foo` or something like that, skip the first word.
+ let kind = first_word.parse::<ErrorKind>().ok();
+ if kind.is_some() {
+ msg = &msg.trim_start().split_at(first_word.len()).1;
+ }
+
+ let msg = msg.trim().to_owned();
+
+ let (which, line_num) = if follow {
+ assert_eq!(adjusts, 0, "use either //~| or //~^, not both.");
+ let line_num = last_nonfollow_error.expect(
+ "encountered //~| without \
+ preceding //~^ line.",
+ );
+ (FollowPrevious(line_num), line_num)
+ } else {
+ let which = if adjusts > 0 { AdjustBackward(adjusts) } else { ThisLine };
+ let line_num = line_num - adjusts;
+ (which, line_num)
+ };
+
+ debug!(
+ "line={} tag={:?} which={:?} kind={:?} msg={:?}",
+ line_num,
+ whole_match.as_str(),
+ which,
+ kind,
+ msg
+ );
+ Some((which, Error { line_num, kind, msg }))
+}
diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs
new file mode 100644
index 000000000..f8f193ddf
--- /dev/null
+++ b/src/tools/compiletest/src/header.rs
@@ -0,0 +1,1060 @@
+use std::collections::HashSet;
+use std::env;
+use std::fs::File;
+use std::io::prelude::*;
+use std::io::BufReader;
+use std::path::{Path, PathBuf};
+
+use tracing::*;
+
+use crate::common::{CompareMode, Config, Debugger, FailMode, Mode, PanicStrategy, PassMode};
+use crate::util;
+use crate::{extract_cdb_version, extract_gdb_version};
+
+#[cfg(test)]
+mod tests;
+
+/// The result of parse_cfg_name_directive.
+#[derive(Clone, Copy, PartialEq, Debug)]
+enum ParsedNameDirective {
+ /// No match.
+ NoMatch,
+ /// Match.
+ Match,
+}
+
+/// Properties which must be known very early, before actually running
+/// the test.
+#[derive(Default)]
+pub struct EarlyProps {
+ pub aux: Vec<String>,
+ pub aux_crate: Vec<(String, String)>,
+ pub revisions: Vec<String>,
+}
+
+impl EarlyProps {
+ pub fn from_file(config: &Config, testfile: &Path) -> Self {
+ let file = File::open(testfile).expect("open test file to parse earlyprops");
+ Self::from_reader(config, testfile, file)
+ }
+
+ pub fn from_reader<R: Read>(config: &Config, testfile: &Path, rdr: R) -> Self {
+ let mut props = EarlyProps::default();
+ iter_header(testfile, rdr, &mut |_, ln| {
+ config.push_name_value_directive(ln, directives::AUX_BUILD, &mut props.aux, |r| {
+ r.trim().to_string()
+ });
+ config.push_name_value_directive(
+ ln,
+ directives::AUX_CRATE,
+ &mut props.aux_crate,
+ Config::parse_aux_crate,
+ );
+ config.parse_and_update_revisions(ln, &mut props.revisions);
+ });
+ return props;
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct TestProps {
+ // Lines that should be expected, in order, on standard out
+ pub error_patterns: Vec<String>,
+ // Regexes that should be expected, in order, on standard out
+ pub regex_error_patterns: Vec<String>,
+ // Extra flags to pass to the compiler
+ pub compile_flags: Vec<String>,
+ // Extra flags to pass when the compiled code is run (such as --bench)
+ pub run_flags: Option<String>,
+ // If present, the name of a file that this test should match when
+ // pretty-printed
+ pub pp_exact: Option<PathBuf>,
+ // Other crates that should be compiled (typically from the same
+ // directory as the test, but for backwards compatibility reasons
+ // we also check the auxiliary directory)
+ pub aux_builds: Vec<String>,
+ // Similar to `aux_builds`, but a list of NAME=somelib.rs of dependencies
+ // to build and pass with the `--extern` flag.
+ pub aux_crates: Vec<(String, String)>,
+ // Environment settings to use for compiling
+ pub rustc_env: Vec<(String, String)>,
+ // Environment variables to unset prior to compiling.
+ // Variables are unset before applying 'rustc_env'.
+ pub unset_rustc_env: Vec<String>,
+ // Environment settings to use during execution
+ pub exec_env: Vec<(String, String)>,
+ // Build documentation for all specified aux-builds as well
+ pub build_aux_docs: bool,
+ // Flag to force a crate to be built with the host architecture
+ pub force_host: bool,
+ // Check stdout for error-pattern output as well as stderr
+ pub check_stdout: bool,
+ // Check stdout & stderr for output of run-pass test
+ pub check_run_results: bool,
+ // For UI tests, allows compiler to generate arbitrary output to stdout
+ pub dont_check_compiler_stdout: bool,
+ // For UI tests, allows compiler to generate arbitrary output to stderr
+ pub dont_check_compiler_stderr: bool,
+ // Don't force a --crate-type=dylib flag on the command line
+ //
+ // Set this for example if you have an auxiliary test file that contains
+ // a proc-macro and needs `#![crate_type = "proc-macro"]`. This ensures
+ // that the aux file is compiled as a `proc-macro` and not as a `dylib`.
+ pub no_prefer_dynamic: bool,
+ // Run -Zunpretty expanded when running pretty printing tests
+ pub pretty_expanded: bool,
+ // Which pretty mode are we testing with, default to 'normal'
+ pub pretty_mode: String,
+ // Only compare pretty output and don't try compiling
+ pub pretty_compare_only: bool,
+ // Patterns which must not appear in the output of a cfail test.
+ pub forbid_output: Vec<String>,
+ // Revisions to test for incremental compilation.
+ pub revisions: Vec<String>,
+ // Directory (if any) to use for incremental compilation. This is
+ // not set by end-users; rather it is set by the incremental
+ // testing harness and used when generating compilation
+ // arguments. (In particular, it propagates to the aux-builds.)
+ pub incremental_dir: Option<PathBuf>,
+ // If `true`, this test will use incremental compilation.
+ //
+ // This can be set manually with the `incremental` header, or implicitly
+ // by being a part of an incremental mode test. Using the `incremental`
+ // header should be avoided if possible; using an incremental mode test is
+ // preferred. Incremental mode tests support multiple passes, which can
+ // verify that the incremental cache can be loaded properly after being
+ // created. Just setting the header will only verify the behavior with
+ // creating an incremental cache, but doesn't check that it is created
+ // correctly.
+ //
+ // Compiletest will create the incremental directory, and ensure it is
+ // empty before the test starts. Incremental mode tests will reuse the
+ // incremental directory between passes in the same test.
+ pub incremental: bool,
+ // If `true`, this test is a known bug.
+ //
+ // When set, some requirements are relaxed. Currently, this only means no
+ // error annotations are needed, but this may be updated in the future to
+ // include other relaxations.
+ pub known_bug: bool,
+ // How far should the test proceed while still passing.
+ pass_mode: Option<PassMode>,
+ // Ignore `--pass` overrides from the command line for this test.
+ ignore_pass: bool,
+ // How far this test should proceed to start failing.
+ pub fail_mode: Option<FailMode>,
+ // rustdoc will test the output of the `--test` option
+ pub check_test_line_numbers_match: bool,
+ // customized normalization rules
+ pub normalize_stdout: Vec<(String, String)>,
+ pub normalize_stderr: Vec<(String, String)>,
+ pub failure_status: i32,
+ // Whether or not `rustfix` should apply the `CodeSuggestion`s of this test and compile the
+ // resulting Rust code.
+ pub run_rustfix: bool,
+ // If true, `rustfix` will only apply `MachineApplicable` suggestions.
+ pub rustfix_only_machine_applicable: bool,
+ pub assembly_output: Option<String>,
+ // If true, the test is expected to ICE
+ pub should_ice: bool,
+ // If true, the stderr is expected to be different across bit-widths.
+ pub stderr_per_bitwidth: bool,
+ // The MIR opt to unit test, if any
+ pub mir_unit_test: Option<String>,
+}
+
+mod directives {
+ pub const ERROR_PATTERN: &'static str = "error-pattern";
+ pub const REGEX_ERROR_PATTERN: &'static str = "regex-error-pattern";
+ pub const COMPILE_FLAGS: &'static str = "compile-flags";
+ pub const RUN_FLAGS: &'static str = "run-flags";
+ pub const SHOULD_ICE: &'static str = "should-ice";
+ pub const BUILD_AUX_DOCS: &'static str = "build-aux-docs";
+ pub const FORCE_HOST: &'static str = "force-host";
+ pub const CHECK_STDOUT: &'static str = "check-stdout";
+ pub const CHECK_RUN_RESULTS: &'static str = "check-run-results";
+ pub const DONT_CHECK_COMPILER_STDOUT: &'static str = "dont-check-compiler-stdout";
+ pub const DONT_CHECK_COMPILER_STDERR: &'static str = "dont-check-compiler-stderr";
+ pub const NO_PREFER_DYNAMIC: &'static str = "no-prefer-dynamic";
+ pub const PRETTY_EXPANDED: &'static str = "pretty-expanded";
+ pub const PRETTY_MODE: &'static str = "pretty-mode";
+ pub const PRETTY_COMPARE_ONLY: &'static str = "pretty-compare-only";
+ pub const AUX_BUILD: &'static str = "aux-build";
+ pub const AUX_CRATE: &'static str = "aux-crate";
+ pub const EXEC_ENV: &'static str = "exec-env";
+ pub const RUSTC_ENV: &'static str = "rustc-env";
+ pub const UNSET_RUSTC_ENV: &'static str = "unset-rustc-env";
+ pub const FORBID_OUTPUT: &'static str = "forbid-output";
+ pub const CHECK_TEST_LINE_NUMBERS_MATCH: &'static str = "check-test-line-numbers-match";
+ pub const IGNORE_PASS: &'static str = "ignore-pass";
+ pub const FAILURE_STATUS: &'static str = "failure-status";
+ pub const RUN_RUSTFIX: &'static str = "run-rustfix";
+ pub const RUSTFIX_ONLY_MACHINE_APPLICABLE: &'static str = "rustfix-only-machine-applicable";
+ pub const ASSEMBLY_OUTPUT: &'static str = "assembly-output";
+ pub const STDERR_PER_BITWIDTH: &'static str = "stderr-per-bitwidth";
+ pub const INCREMENTAL: &'static str = "incremental";
+ pub const KNOWN_BUG: &'static str = "known-bug";
+ pub const MIR_UNIT_TEST: &'static str = "unit-test";
+ // This isn't a real directive, just one that is probably mistyped often
+ pub const INCORRECT_COMPILER_FLAGS: &'static str = "compiler-flags";
+}
+
+impl TestProps {
+ pub fn new() -> Self {
+ TestProps {
+ error_patterns: vec![],
+ regex_error_patterns: vec![],
+ compile_flags: vec![],
+ run_flags: None,
+ pp_exact: None,
+ aux_builds: vec![],
+ aux_crates: vec![],
+ revisions: vec![],
+ rustc_env: vec![],
+ unset_rustc_env: vec![],
+ exec_env: vec![],
+ build_aux_docs: false,
+ force_host: false,
+ check_stdout: false,
+ check_run_results: false,
+ dont_check_compiler_stdout: false,
+ dont_check_compiler_stderr: false,
+ no_prefer_dynamic: false,
+ pretty_expanded: false,
+ pretty_mode: "normal".to_string(),
+ pretty_compare_only: false,
+ forbid_output: vec![],
+ incremental_dir: None,
+ incremental: false,
+ known_bug: false,
+ pass_mode: None,
+ fail_mode: None,
+ ignore_pass: false,
+ check_test_line_numbers_match: false,
+ normalize_stdout: vec![],
+ normalize_stderr: vec![],
+ failure_status: -1,
+ run_rustfix: false,
+ rustfix_only_machine_applicable: false,
+ assembly_output: None,
+ should_ice: false,
+ stderr_per_bitwidth: false,
+ mir_unit_test: None,
+ }
+ }
+
+ pub fn from_aux_file(&self, testfile: &Path, cfg: Option<&str>, config: &Config) -> Self {
+ let mut props = TestProps::new();
+
+ // copy over select properties to the aux build:
+ props.incremental_dir = self.incremental_dir.clone();
+ props.ignore_pass = true;
+ props.load_from(testfile, cfg, config);
+
+ props
+ }
+
+ pub fn from_file(testfile: &Path, cfg: Option<&str>, config: &Config) -> Self {
+ let mut props = TestProps::new();
+ props.load_from(testfile, cfg, config);
+
+ match (props.pass_mode, props.fail_mode) {
+ (None, None) => props.fail_mode = Some(FailMode::Check),
+ (Some(_), None) | (None, Some(_)) => {}
+ (Some(_), Some(_)) => panic!("cannot use a *-fail and *-pass mode together"),
+ }
+
+ props
+ }
+
+ /// Loads properties from `testfile` into `props`. If a property is
+ /// tied to a particular revision `foo` (indicated by writing
+ /// `//[foo]`), then the property is ignored unless `cfg` is
+ /// `Some("foo")`.
+ fn load_from(&mut self, testfile: &Path, cfg: Option<&str>, config: &Config) {
+ let mut has_edition = false;
+ if !testfile.is_dir() {
+ let file = File::open(testfile).unwrap();
+
+ iter_header(testfile, file, &mut |revision, ln| {
+ if revision.is_some() && revision != cfg {
+ return;
+ }
+
+ use directives::*;
+
+ config.push_name_value_directive(
+ ln,
+ ERROR_PATTERN,
+ &mut self.error_patterns,
+ |r| r,
+ );
+ config.push_name_value_directive(
+ ln,
+ REGEX_ERROR_PATTERN,
+ &mut self.regex_error_patterns,
+ |r| r,
+ );
+
+ if let Some(flags) = config.parse_name_value_directive(ln, COMPILE_FLAGS) {
+ self.compile_flags.extend(flags.split_whitespace().map(|s| s.to_owned()));
+ }
+ if config.parse_name_value_directive(ln, INCORRECT_COMPILER_FLAGS).is_some() {
+ panic!("`compiler-flags` directive should be spelled `compile-flags`");
+ }
+
+ if let Some(edition) = config.parse_edition(ln) {
+ self.compile_flags.push(format!("--edition={}", edition.trim()));
+ has_edition = true;
+ }
+
+ config.parse_and_update_revisions(ln, &mut self.revisions);
+
+ config.set_name_value_directive(ln, RUN_FLAGS, &mut self.run_flags, |r| r);
+
+ if self.pp_exact.is_none() {
+ self.pp_exact = config.parse_pp_exact(ln, testfile);
+ }
+
+ config.set_name_directive(ln, SHOULD_ICE, &mut self.should_ice);
+ config.set_name_directive(ln, BUILD_AUX_DOCS, &mut self.build_aux_docs);
+ config.set_name_directive(ln, FORCE_HOST, &mut self.force_host);
+ config.set_name_directive(ln, CHECK_STDOUT, &mut self.check_stdout);
+ config.set_name_directive(ln, CHECK_RUN_RESULTS, &mut self.check_run_results);
+ config.set_name_directive(
+ ln,
+ DONT_CHECK_COMPILER_STDOUT,
+ &mut self.dont_check_compiler_stdout,
+ );
+ config.set_name_directive(
+ ln,
+ DONT_CHECK_COMPILER_STDERR,
+ &mut self.dont_check_compiler_stderr,
+ );
+ config.set_name_directive(ln, NO_PREFER_DYNAMIC, &mut self.no_prefer_dynamic);
+ config.set_name_directive(ln, PRETTY_EXPANDED, &mut self.pretty_expanded);
+
+ if let Some(m) = config.parse_name_value_directive(ln, PRETTY_MODE) {
+ self.pretty_mode = m;
+ }
+
+ config.set_name_directive(ln, PRETTY_COMPARE_ONLY, &mut self.pretty_compare_only);
+ config.push_name_value_directive(ln, AUX_BUILD, &mut self.aux_builds, |r| {
+ r.trim().to_string()
+ });
+ config.push_name_value_directive(
+ ln,
+ AUX_CRATE,
+ &mut self.aux_crates,
+ Config::parse_aux_crate,
+ );
+ config.push_name_value_directive(
+ ln,
+ EXEC_ENV,
+ &mut self.exec_env,
+ Config::parse_env,
+ );
+ config.push_name_value_directive(
+ ln,
+ RUSTC_ENV,
+ &mut self.rustc_env,
+ Config::parse_env,
+ );
+ config.push_name_value_directive(
+ ln,
+ UNSET_RUSTC_ENV,
+ &mut self.unset_rustc_env,
+ |r| r,
+ );
+ config.push_name_value_directive(ln, FORBID_OUTPUT, &mut self.forbid_output, |r| r);
+ config.set_name_directive(
+ ln,
+ CHECK_TEST_LINE_NUMBERS_MATCH,
+ &mut self.check_test_line_numbers_match,
+ );
+
+ self.update_pass_mode(ln, cfg, config);
+ self.update_fail_mode(ln, config);
+
+ config.set_name_directive(ln, IGNORE_PASS, &mut self.ignore_pass);
+
+ if let Some(rule) = config.parse_custom_normalization(ln, "normalize-stdout") {
+ self.normalize_stdout.push(rule);
+ }
+ if let Some(rule) = config.parse_custom_normalization(ln, "normalize-stderr") {
+ self.normalize_stderr.push(rule);
+ }
+
+ if let Some(code) = config
+ .parse_name_value_directive(ln, FAILURE_STATUS)
+ .and_then(|code| code.trim().parse::<i32>().ok())
+ {
+ self.failure_status = code;
+ }
+
+ config.set_name_directive(ln, RUN_RUSTFIX, &mut self.run_rustfix);
+ config.set_name_directive(
+ ln,
+ RUSTFIX_ONLY_MACHINE_APPLICABLE,
+ &mut self.rustfix_only_machine_applicable,
+ );
+ config.set_name_value_directive(
+ ln,
+ ASSEMBLY_OUTPUT,
+ &mut self.assembly_output,
+ |r| r.trim().to_string(),
+ );
+ config.set_name_directive(ln, STDERR_PER_BITWIDTH, &mut self.stderr_per_bitwidth);
+ config.set_name_directive(ln, INCREMENTAL, &mut self.incremental);
+
+ // Unlike the other `name_value_directive`s this needs to be handled manually,
+ // because it sets a `bool` flag.
+ if let Some(known_bug) = config.parse_name_value_directive(ln, KNOWN_BUG) {
+ let known_bug = known_bug.trim();
+ if known_bug == "unknown"
+ || known_bug.split(',').all(|issue_ref| {
+ issue_ref
+ .trim()
+ .split_once('#')
+ .filter(|(_, number)| {
+ number.chars().all(|digit| digit.is_numeric())
+ })
+ .is_some()
+ })
+ {
+ self.known_bug = true;
+ } else {
+ panic!(
+ "Invalid known-bug value: {known_bug}\nIt requires comma-separated issue references (`#000` or `chalk#000`) or `unknown`."
+ );
+ }
+ }
+ config.set_name_value_directive(ln, MIR_UNIT_TEST, &mut self.mir_unit_test, |s| {
+ s.trim().to_string()
+ });
+ });
+ }
+
+ if self.failure_status == -1 {
+ self.failure_status = 1;
+ }
+ if self.should_ice {
+ self.failure_status = 101;
+ }
+
+ if config.mode == Mode::Incremental {
+ self.incremental = true;
+ }
+
+ for key in &["RUST_TEST_NOCAPTURE", "RUST_TEST_THREADS"] {
+ if let Ok(val) = env::var(key) {
+ if self.exec_env.iter().find(|&&(ref x, _)| x == key).is_none() {
+ self.exec_env.push(((*key).to_owned(), val))
+ }
+ }
+ }
+
+ if let (Some(edition), false) = (&config.edition, has_edition) {
+ self.compile_flags.push(format!("--edition={}", edition));
+ }
+ }
+
+ fn update_fail_mode(&mut self, ln: &str, config: &Config) {
+ let check_ui = |mode: &str| {
+ if config.mode != Mode::Ui {
+ panic!("`{}-fail` header is only supported in UI tests", mode);
+ }
+ };
+ if config.mode == Mode::Ui && config.parse_name_directive(ln, "compile-fail") {
+ panic!("`compile-fail` header is useless in UI tests");
+ }
+ let fail_mode = if config.parse_name_directive(ln, "check-fail") {
+ check_ui("check");
+ Some(FailMode::Check)
+ } else if config.parse_name_directive(ln, "build-fail") {
+ check_ui("build");
+ Some(FailMode::Build)
+ } else if config.parse_name_directive(ln, "run-fail") {
+ check_ui("run");
+ Some(FailMode::Run)
+ } else {
+ None
+ };
+ match (self.fail_mode, fail_mode) {
+ (None, Some(_)) => self.fail_mode = fail_mode,
+ (Some(_), Some(_)) => panic!("multiple `*-fail` headers in a single test"),
+ (_, None) => {}
+ }
+ }
+
+ fn update_pass_mode(&mut self, ln: &str, revision: Option<&str>, config: &Config) {
+ let check_no_run = |s| {
+ if config.mode != Mode::Ui && config.mode != Mode::Incremental {
+ panic!("`{}` header is only supported in UI and incremental tests", s);
+ }
+ if config.mode == Mode::Incremental
+ && !revision.map_or(false, |r| r.starts_with("cfail"))
+ && !self.revisions.iter().all(|r| r.starts_with("cfail"))
+ {
+ panic!("`{}` header is only supported in `cfail` incremental tests", s);
+ }
+ };
+ let pass_mode = if config.parse_name_directive(ln, "check-pass") {
+ check_no_run("check-pass");
+ Some(PassMode::Check)
+ } else if config.parse_name_directive(ln, "build-pass") {
+ check_no_run("build-pass");
+ Some(PassMode::Build)
+ } else if config.parse_name_directive(ln, "run-pass") {
+ if config.mode != Mode::Ui {
+ panic!("`run-pass` header is only supported in UI tests")
+ }
+ Some(PassMode::Run)
+ } else {
+ None
+ };
+ match (self.pass_mode, pass_mode) {
+ (None, Some(_)) => self.pass_mode = pass_mode,
+ (Some(_), Some(_)) => panic!("multiple `*-pass` headers in a single test"),
+ (_, None) => {}
+ }
+ }
+
+ pub fn pass_mode(&self, config: &Config) -> Option<PassMode> {
+ if !self.ignore_pass && self.fail_mode.is_none() && config.mode == Mode::Ui {
+ if let (mode @ Some(_), Some(_)) = (config.force_pass_mode, self.pass_mode) {
+ return mode;
+ }
+ }
+ self.pass_mode
+ }
+
+ // does not consider CLI override for pass mode
+ pub fn local_pass_mode(&self) -> Option<PassMode> {
+ self.pass_mode
+ }
+}
+
+pub fn line_directive<'line>(
+ comment: &str,
+ ln: &'line str,
+) -> Option<(Option<&'line str>, &'line str)> {
+ if ln.starts_with(comment) {
+ let ln = ln[comment.len()..].trim_start();
+ if ln.starts_with('[') {
+ // A comment like `//[foo]` is specific to revision `foo`
+ if let Some(close_brace) = ln.find(']') {
+ let lncfg = &ln[1..close_brace];
+
+ Some((Some(lncfg), ln[(close_brace + 1)..].trim_start()))
+ } else {
+ panic!("malformed condition directive: expected `{}[foo]`, found `{}`", comment, ln)
+ }
+ } else {
+ Some((None, ln))
+ }
+ } else {
+ None
+ }
+}
+
+fn iter_header<R: Read>(testfile: &Path, rdr: R, it: &mut dyn FnMut(Option<&str>, &str)) {
+ if testfile.is_dir() {
+ return;
+ }
+
+ let comment = if testfile.extension().map(|e| e == "rs") == Some(true) { "//" } else { "#" };
+
+ let mut rdr = BufReader::new(rdr);
+ let mut ln = String::new();
+
+ loop {
+ ln.clear();
+ if rdr.read_line(&mut ln).unwrap() == 0 {
+ break;
+ }
+
+ // Assume that any directives will be found before the first
+ // module or function. This doesn't seem to be an optimization
+ // with a warm page cache. Maybe with a cold one.
+ let ln = ln.trim();
+ if ln.starts_with("fn") || ln.starts_with("mod") {
+ return;
+ } else if let Some((lncfg, ln)) = line_directive(comment, ln) {
+ it(lncfg, ln);
+ }
+ }
+}
+
+impl Config {
+ fn parse_aux_crate(r: String) -> (String, String) {
+ let mut parts = r.trim().splitn(2, '=');
+ (
+ parts.next().expect("missing aux-crate name (e.g. log=log.rs)").to_string(),
+ parts.next().expect("missing aux-crate value (e.g. log=log.rs)").to_string(),
+ )
+ }
+
+ fn parse_and_update_revisions(&self, line: &str, existing: &mut Vec<String>) {
+ if let Some(raw) = self.parse_name_value_directive(line, "revisions") {
+ let mut duplicates: HashSet<_> = existing.iter().cloned().collect();
+ for revision in raw.split_whitespace().map(|r| r.to_string()) {
+ if !duplicates.insert(revision.clone()) {
+ panic!("Duplicate revision: `{}` in line `{}`", revision, raw);
+ }
+ existing.push(revision);
+ }
+ }
+ }
+
+ fn parse_env(nv: String) -> (String, String) {
+ // nv is either FOO or FOO=BAR
+ let mut strs: Vec<String> = nv.splitn(2, '=').map(str::to_owned).collect();
+
+ match strs.len() {
+ 1 => (strs.pop().unwrap(), String::new()),
+ 2 => {
+ let end = strs.pop().unwrap();
+ (strs.pop().unwrap(), end)
+ }
+ n => panic!("Expected 1 or 2 strings, not {}", n),
+ }
+ }
+
+ fn parse_pp_exact(&self, line: &str, testfile: &Path) -> Option<PathBuf> {
+ if let Some(s) = self.parse_name_value_directive(line, "pp-exact") {
+ Some(PathBuf::from(&s))
+ } else if self.parse_name_directive(line, "pp-exact") {
+ testfile.file_name().map(PathBuf::from)
+ } else {
+ None
+ }
+ }
+
+ fn parse_custom_normalization(&self, mut line: &str, prefix: &str) -> Option<(String, String)> {
+ if self.parse_cfg_name_directive(line, prefix) == ParsedNameDirective::Match {
+ let from = parse_normalization_string(&mut line)?;
+ let to = parse_normalization_string(&mut line)?;
+ Some((from, to))
+ } else {
+ None
+ }
+ }
+
+ fn parse_needs_matching_clang(&self, line: &str) -> bool {
+ self.parse_name_directive(line, "needs-matching-clang")
+ }
+
+ fn parse_needs_profiler_support(&self, line: &str) -> bool {
+ self.parse_name_directive(line, "needs-profiler-support")
+ }
+
+ /// Parses a name-value directive which contains config-specific information, e.g., `ignore-x86`
+ /// or `normalize-stderr-32bit`.
+ fn parse_cfg_name_directive(&self, line: &str, prefix: &str) -> ParsedNameDirective {
+ if !line.as_bytes().starts_with(prefix.as_bytes()) {
+ return ParsedNameDirective::NoMatch;
+ }
+ if line.as_bytes().get(prefix.len()) != Some(&b'-') {
+ return ParsedNameDirective::NoMatch;
+ }
+
+ let name = line[prefix.len() + 1..].split(&[':', ' '][..]).next().unwrap();
+
+ let is_match = name == "test" ||
+ self.target == name || // triple
+ util::matches_os(&self.target, name) || // target
+ util::matches_env(&self.target, name) || // env
+ self.target.ends_with(name) || // target and env
+ name == util::get_arch(&self.target) || // architecture
+ name == util::get_pointer_width(&self.target) || // pointer width
+ name == self.stage_id.split('-').next().unwrap() || // stage
+ name == self.channel || // channel
+ (self.target != self.host && name == "cross-compile") ||
+ (name == "endian-big" && util::is_big_endian(&self.target)) ||
+ (self.remote_test_client.is_some() && name == "remote") ||
+ match self.compare_mode {
+ Some(CompareMode::Polonius) => name == "compare-mode-polonius",
+ Some(CompareMode::Chalk) => name == "compare-mode-chalk",
+ Some(CompareMode::SplitDwarf) => name == "compare-mode-split-dwarf",
+ Some(CompareMode::SplitDwarfSingle) => name == "compare-mode-split-dwarf-single",
+ None => false,
+ } ||
+ (cfg!(debug_assertions) && name == "debug") ||
+ match self.debugger {
+ Some(Debugger::Cdb) => name == "cdb",
+ Some(Debugger::Gdb) => name == "gdb",
+ Some(Debugger::Lldb) => name == "lldb",
+ None => false,
+ };
+
+ if is_match { ParsedNameDirective::Match } else { ParsedNameDirective::NoMatch }
+ }
+
+ fn has_cfg_prefix(&self, line: &str, prefix: &str) -> bool {
+ // returns whether this line contains this prefix or not. For prefix
+ // "ignore", returns true if line says "ignore-x86_64", "ignore-arch",
+ // "ignore-android" etc.
+ line.starts_with(prefix) && line.as_bytes().get(prefix.len()) == Some(&b'-')
+ }
+
+ fn parse_name_directive(&self, line: &str, directive: &str) -> bool {
+ // Ensure the directive is a whole word. Do not match "ignore-x86" when
+ // the line says "ignore-x86_64".
+ line.starts_with(directive)
+ && matches!(line.as_bytes().get(directive.len()), None | Some(&b' ') | Some(&b':'))
+ }
+
+ pub fn parse_name_value_directive(&self, line: &str, directive: &str) -> Option<String> {
+ let colon = directive.len();
+ if line.starts_with(directive) && line.as_bytes().get(colon) == Some(&b':') {
+ let value = line[(colon + 1)..].to_owned();
+ debug!("{}: {}", directive, value);
+ Some(expand_variables(value, self))
+ } else {
+ None
+ }
+ }
+
+ pub fn find_rust_src_root(&self) -> Option<PathBuf> {
+ let mut path = self.src_base.clone();
+ let path_postfix = Path::new("src/etc/lldb_batchmode.py");
+
+ while path.pop() {
+ if path.join(&path_postfix).is_file() {
+ return Some(path);
+ }
+ }
+
+ None
+ }
+
+ fn parse_edition(&self, line: &str) -> Option<String> {
+ self.parse_name_value_directive(line, "edition")
+ }
+
+ fn set_name_directive(&self, line: &str, directive: &str, value: &mut bool) {
+ if !*value {
+ *value = self.parse_name_directive(line, directive)
+ }
+ }
+
+ fn set_name_value_directive<T>(
+ &self,
+ line: &str,
+ directive: &str,
+ value: &mut Option<T>,
+ parse: impl FnOnce(String) -> T,
+ ) {
+ if value.is_none() {
+ *value = self.parse_name_value_directive(line, directive).map(parse);
+ }
+ }
+
+ fn push_name_value_directive<T>(
+ &self,
+ line: &str,
+ directive: &str,
+ values: &mut Vec<T>,
+ parse: impl FnOnce(String) -> T,
+ ) {
+ if let Some(value) = self.parse_name_value_directive(line, directive).map(parse) {
+ values.push(value);
+ }
+ }
+}
+
+fn expand_variables(mut value: String, config: &Config) -> String {
+ const CWD: &str = "{{cwd}}";
+ const SRC_BASE: &str = "{{src-base}}";
+ const BUILD_BASE: &str = "{{build-base}}";
+
+ if value.contains(CWD) {
+ let cwd = env::current_dir().unwrap();
+ value = value.replace(CWD, &cwd.to_string_lossy());
+ }
+
+ if value.contains(SRC_BASE) {
+ value = value.replace(SRC_BASE, &config.src_base.to_string_lossy());
+ }
+
+ if value.contains(BUILD_BASE) {
+ value = value.replace(BUILD_BASE, &config.build_base.to_string_lossy());
+ }
+
+ value
+}
+
+/// Finds the next quoted string `"..."` in `line`, and extract the content from it. Move the `line`
+/// variable after the end of the quoted string.
+///
+/// # Examples
+///
+/// ```
+/// let mut s = "normalize-stderr-32bit: \"something (32 bits)\" -> \"something ($WORD bits)\".";
+/// let first = parse_normalization_string(&mut s);
+/// assert_eq!(first, Some("something (32 bits)".to_owned()));
+/// assert_eq!(s, " -> \"something ($WORD bits)\".");
+/// ```
+fn parse_normalization_string(line: &mut &str) -> Option<String> {
+ // FIXME support escapes in strings.
+ let begin = line.find('"')? + 1;
+ let end = line[begin..].find('"')? + begin;
+ let result = line[begin..end].to_owned();
+ *line = &line[end + 1..];
+ Some(result)
+}
+
+pub fn extract_llvm_version(version: &str) -> Option<u32> {
+ let pat = |c: char| !c.is_ascii_digit() && c != '.';
+ let version_without_suffix = match version.find(pat) {
+ Some(pos) => &version[..pos],
+ None => version,
+ };
+ let components: Vec<u32> = version_without_suffix
+ .split('.')
+ .map(|s| s.parse().expect("Malformed version component"))
+ .collect();
+ let version = match *components {
+ [a] => a * 10_000,
+ [a, b] => a * 10_000 + b * 100,
+ [a, b, c] => a * 10_000 + b * 100 + c,
+ _ => panic!("Malformed version"),
+ };
+ Some(version)
+}
+
+/// Takes a directive of the form "<version1> [- <version2>]",
+/// returns the numeric representation of <version1> and <version2> as
+/// tuple: (<version1> as u32, <version2> as u32)
+///
+/// If the <version2> part is omitted, the second component of the tuple
+/// is the same as <version1>.
+fn extract_version_range<F>(line: &str, parse: F) -> Option<(u32, u32)>
+where
+ F: Fn(&str) -> Option<u32>,
+{
+ let mut splits = line.splitn(2, "- ").map(str::trim);
+ let min = splits.next().unwrap();
+ if min.ends_with('-') {
+ return None;
+ }
+
+ let max = splits.next();
+
+ if min.is_empty() {
+ return None;
+ }
+
+ let min = parse(min)?;
+ let max = match max {
+ Some(max) if max.is_empty() => return None,
+ Some(max) => parse(max)?,
+ _ => min,
+ };
+
+ Some((min, max))
+}
+
+pub fn make_test_description<R: Read>(
+ config: &Config,
+ name: test::TestName,
+ path: &Path,
+ src: R,
+ cfg: Option<&str>,
+) -> test::TestDesc {
+ let mut ignore = false;
+ let ignore_message = None;
+ let mut should_fail = false;
+
+ let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some();
+ let rustc_has_sanitizer_support = env::var_os("RUSTC_SANITIZER_SUPPORT").is_some();
+ let has_asm_support = util::has_asm_support(&config.target);
+ let has_asan = util::ASAN_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_cfi = util::CFI_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_lsan = util::LSAN_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_msan = util::MSAN_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_tsan = util::TSAN_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_hwasan = util::HWASAN_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_memtag = util::MEMTAG_SUPPORTED_TARGETS.contains(&&*config.target);
+ let has_shadow_call_stack = util::SHADOWCALLSTACK_SUPPORTED_TARGETS.contains(&&*config.target);
+ // for `-Z gcc-ld=lld`
+ let has_rust_lld = config
+ .compile_lib_path
+ .join("rustlib")
+ .join(&config.target)
+ .join("bin")
+ .join("gcc-ld")
+ .join(if config.host.contains("windows") { "ld.exe" } else { "ld" })
+ .exists();
+ iter_header(path, src, &mut |revision, ln| {
+ if revision.is_some() && revision != cfg {
+ return;
+ }
+ ignore = match config.parse_cfg_name_directive(ln, "ignore") {
+ ParsedNameDirective::Match => true,
+ ParsedNameDirective::NoMatch => ignore,
+ };
+ if config.has_cfg_prefix(ln, "only") {
+ ignore = match config.parse_cfg_name_directive(ln, "only") {
+ ParsedNameDirective::Match => ignore,
+ ParsedNameDirective::NoMatch => true,
+ };
+ }
+ ignore |= ignore_llvm(config, ln);
+ ignore |=
+ config.run_clang_based_tests_with.is_none() && config.parse_needs_matching_clang(ln);
+ ignore |= !has_asm_support && config.parse_name_directive(ln, "needs-asm-support");
+ ignore |= !rustc_has_profiler_support && config.parse_needs_profiler_support(ln);
+ ignore |= !config.run_enabled() && config.parse_name_directive(ln, "needs-run-enabled");
+ ignore |= !rustc_has_sanitizer_support
+ && config.parse_name_directive(ln, "needs-sanitizer-support");
+ ignore |= !has_asan && config.parse_name_directive(ln, "needs-sanitizer-address");
+ ignore |= !has_cfi && config.parse_name_directive(ln, "needs-sanitizer-cfi");
+ ignore |= !has_lsan && config.parse_name_directive(ln, "needs-sanitizer-leak");
+ ignore |= !has_msan && config.parse_name_directive(ln, "needs-sanitizer-memory");
+ ignore |= !has_tsan && config.parse_name_directive(ln, "needs-sanitizer-thread");
+ ignore |= !has_hwasan && config.parse_name_directive(ln, "needs-sanitizer-hwaddress");
+ ignore |= !has_memtag && config.parse_name_directive(ln, "needs-sanitizer-memtag");
+ ignore |= !has_shadow_call_stack
+ && config.parse_name_directive(ln, "needs-sanitizer-shadow-call-stack");
+ ignore |= config.target_panic == PanicStrategy::Abort
+ && config.parse_name_directive(ln, "needs-unwind");
+ ignore |= config.target == "wasm32-unknown-unknown"
+ && config.parse_name_directive(ln, directives::CHECK_RUN_RESULTS);
+ ignore |= config.debugger == Some(Debugger::Cdb) && ignore_cdb(config, ln);
+ ignore |= config.debugger == Some(Debugger::Gdb) && ignore_gdb(config, ln);
+ ignore |= config.debugger == Some(Debugger::Lldb) && ignore_lldb(config, ln);
+ ignore |= !has_rust_lld && config.parse_name_directive(ln, "needs-rust-lld");
+ should_fail |= config.parse_name_directive(ln, "should-fail");
+ });
+
+ // The `should-fail` annotation doesn't apply to pretty tests,
+ // since we run the pretty printer across all tests by default.
+ // If desired, we could add a `should-fail-pretty` annotation.
+ let should_panic = match config.mode {
+ crate::common::Pretty => test::ShouldPanic::No,
+ _ if should_fail => test::ShouldPanic::Yes,
+ _ => test::ShouldPanic::No,
+ };
+
+ test::TestDesc {
+ name,
+ ignore,
+ ignore_message,
+ should_panic,
+ compile_fail: false,
+ no_run: false,
+ test_type: test::TestType::Unknown,
+ }
+}
+
+fn ignore_cdb(config: &Config, line: &str) -> bool {
+ if let Some(actual_version) = config.cdb_version {
+ if let Some(min_version) = line.strip_prefix("min-cdb-version:").map(str::trim) {
+ let min_version = extract_cdb_version(min_version).unwrap_or_else(|| {
+ panic!("couldn't parse version range: {:?}", min_version);
+ });
+
+ // Ignore if actual version is smaller than the minimum
+ // required version
+ return actual_version < min_version;
+ }
+ }
+ false
+}
+
+fn ignore_gdb(config: &Config, line: &str) -> bool {
+ if let Some(actual_version) = config.gdb_version {
+ if let Some(rest) = line.strip_prefix("min-gdb-version:").map(str::trim) {
+ let (start_ver, end_ver) = extract_version_range(rest, extract_gdb_version)
+ .unwrap_or_else(|| {
+ panic!("couldn't parse version range: {:?}", rest);
+ });
+
+ if start_ver != end_ver {
+ panic!("Expected single GDB version")
+ }
+ // Ignore if actual version is smaller than the minimum
+ // required version
+ return actual_version < start_ver;
+ } else if let Some(rest) = line.strip_prefix("ignore-gdb-version:").map(str::trim) {
+ let (min_version, max_version) = extract_version_range(rest, extract_gdb_version)
+ .unwrap_or_else(|| {
+ panic!("couldn't parse version range: {:?}", rest);
+ });
+
+ if max_version < min_version {
+ panic!("Malformed GDB version range: max < min")
+ }
+
+ return actual_version >= min_version && actual_version <= max_version;
+ }
+ }
+ false
+}
+
+fn ignore_lldb(config: &Config, line: &str) -> bool {
+ if let Some(actual_version) = config.lldb_version {
+ if let Some(min_version) = line.strip_prefix("min-lldb-version:").map(str::trim) {
+ let min_version = min_version.parse().unwrap_or_else(|e| {
+ panic!("Unexpected format of LLDB version string: {}\n{:?}", min_version, e);
+ });
+ // Ignore if actual version is smaller the minimum required
+ // version
+ actual_version < min_version
+ } else {
+ line.starts_with("rust-lldb") && !config.lldb_native_rust
+ }
+ } else {
+ false
+ }
+}
+
+fn ignore_llvm(config: &Config, line: &str) -> bool {
+ if config.system_llvm && line.starts_with("no-system-llvm") {
+ return true;
+ }
+ if let Some(needed_components) =
+ config.parse_name_value_directive(line, "needs-llvm-components")
+ {
+ let components: HashSet<_> = config.llvm_components.split_whitespace().collect();
+ if let Some(missing_component) = needed_components
+ .split_whitespace()
+ .find(|needed_component| !components.contains(needed_component))
+ {
+ if env::var_os("COMPILETEST_NEEDS_ALL_LLVM_COMPONENTS").is_some() {
+ panic!("missing LLVM component: {}", missing_component);
+ }
+ return true;
+ }
+ }
+ if let Some(actual_version) = config.llvm_version {
+ if let Some(rest) = line.strip_prefix("min-llvm-version:").map(str::trim) {
+ let min_version = extract_llvm_version(rest).unwrap();
+ // Ignore if actual version is smaller the minimum required
+ // version
+ actual_version < min_version
+ } else if let Some(rest) = line.strip_prefix("min-system-llvm-version:").map(str::trim) {
+ let min_version = extract_llvm_version(rest).unwrap();
+ // Ignore if using system LLVM and actual version
+ // is smaller the minimum required version
+ config.system_llvm && actual_version < min_version
+ } else if let Some(rest) = line.strip_prefix("ignore-llvm-version:").map(str::trim) {
+ // Syntax is: "ignore-llvm-version: <version1> [- <version2>]"
+ let (v_min, v_max) =
+ extract_version_range(rest, extract_llvm_version).unwrap_or_else(|| {
+ panic!("couldn't parse version range: {:?}", rest);
+ });
+ if v_max < v_min {
+ panic!("Malformed LLVM version range: max < min")
+ }
+ // Ignore if version lies inside of range.
+ actual_version >= v_min && actual_version <= v_max
+ } else {
+ false
+ }
+ } else {
+ false
+ }
+}
diff --git a/src/tools/compiletest/src/header/tests.rs b/src/tools/compiletest/src/header/tests.rs
new file mode 100644
index 000000000..a8fd4880f
--- /dev/null
+++ b/src/tools/compiletest/src/header/tests.rs
@@ -0,0 +1,283 @@
+use std::path::Path;
+
+use crate::common::{Config, Debugger};
+use crate::header::{make_test_description, parse_normalization_string, EarlyProps};
+
+#[test]
+fn test_parse_normalization_string() {
+ let mut s = "normalize-stderr-32bit: \"something (32 bits)\" -> \"something ($WORD bits)\".";
+ let first = parse_normalization_string(&mut s);
+ assert_eq!(first, Some("something (32 bits)".to_owned()));
+ assert_eq!(s, " -> \"something ($WORD bits)\".");
+
+ // Nothing to normalize (No quotes)
+ let mut s = "normalize-stderr-32bit: something (32 bits) -> something ($WORD bits).";
+ let first = parse_normalization_string(&mut s);
+ assert_eq!(first, None);
+ assert_eq!(s, r#"normalize-stderr-32bit: something (32 bits) -> something ($WORD bits)."#);
+
+ // Nothing to normalize (Only a single quote)
+ let mut s = "normalize-stderr-32bit: \"something (32 bits) -> something ($WORD bits).";
+ let first = parse_normalization_string(&mut s);
+ assert_eq!(first, None);
+ assert_eq!(s, "normalize-stderr-32bit: \"something (32 bits) -> something ($WORD bits).");
+
+ // Nothing to normalize (Three quotes)
+ let mut s = "normalize-stderr-32bit: \"something (32 bits)\" -> \"something ($WORD bits).";
+ let first = parse_normalization_string(&mut s);
+ assert_eq!(first, Some("something (32 bits)".to_owned()));
+ assert_eq!(s, " -> \"something ($WORD bits).");
+
+ // Nothing to normalize (No quotes, 16-bit)
+ let mut s = "normalize-stderr-16bit: something (16 bits) -> something ($WORD bits).";
+ let first = parse_normalization_string(&mut s);
+ assert_eq!(first, None);
+ assert_eq!(s, r#"normalize-stderr-16bit: something (16 bits) -> something ($WORD bits)."#);
+}
+
+fn config() -> Config {
+ let args = &[
+ "compiletest",
+ "--mode=ui",
+ "--suite=ui",
+ "--compile-lib-path=",
+ "--run-lib-path=",
+ "--rustc-path=",
+ "--python=",
+ "--jsondocck-path=",
+ "--src-base=",
+ "--build-base=",
+ "--stage-id=stage2",
+ "--cc=c",
+ "--cxx=c++",
+ "--cflags=",
+ "--cxxflags=",
+ "--llvm-components=",
+ "--android-cross-path=",
+ "--target=x86_64-unknown-linux-gnu",
+ "--channel=nightly",
+ ];
+ let args = args.iter().map(ToString::to_string).collect();
+ crate::parse_config(args)
+}
+
+fn parse_rs(config: &Config, contents: &str) -> EarlyProps {
+ let bytes = contents.as_bytes();
+ EarlyProps::from_reader(config, Path::new("a.rs"), bytes)
+}
+
+fn check_ignore(config: &Config, contents: &str) -> bool {
+ let tn = test::DynTestName(String::new());
+ let p = Path::new("a.rs");
+ let d = make_test_description(&config, tn, p, std::io::Cursor::new(contents), None);
+ d.ignore
+}
+
+fn parse_makefile(config: &Config, contents: &str) -> EarlyProps {
+ let bytes = contents.as_bytes();
+ EarlyProps::from_reader(config, Path::new("Makefile"), bytes)
+}
+
+#[test]
+fn should_fail() {
+ let config = config();
+ let tn = test::DynTestName(String::new());
+ let p = Path::new("a.rs");
+
+ let d = make_test_description(&config, tn.clone(), p, std::io::Cursor::new(""), None);
+ assert_eq!(d.should_panic, test::ShouldPanic::No);
+ let d = make_test_description(&config, tn, p, std::io::Cursor::new("// should-fail"), None);
+ assert_eq!(d.should_panic, test::ShouldPanic::Yes);
+}
+
+#[test]
+fn revisions() {
+ let config = config();
+
+ assert_eq!(parse_rs(&config, "// revisions: a b c").revisions, vec!["a", "b", "c"],);
+ assert_eq!(
+ parse_makefile(&config, "# revisions: hello there").revisions,
+ vec!["hello", "there"],
+ );
+}
+
+#[test]
+fn aux_build() {
+ let config = config();
+
+ assert_eq!(
+ parse_rs(
+ &config,
+ r"
+ // aux-build: a.rs
+ // aux-build: b.rs
+ "
+ )
+ .aux,
+ vec!["a.rs", "b.rs"],
+ );
+}
+
+#[test]
+fn no_system_llvm() {
+ let mut config = config();
+
+ config.system_llvm = false;
+ assert!(!check_ignore(&config, "// no-system-llvm"));
+
+ config.system_llvm = true;
+ assert!(check_ignore(&config, "// no-system-llvm"));
+}
+
+#[test]
+fn llvm_version() {
+ let mut config = config();
+
+ config.llvm_version = Some(80102);
+ assert!(check_ignore(&config, "// min-llvm-version: 9.0"));
+
+ config.llvm_version = Some(90001);
+ assert!(check_ignore(&config, "// min-llvm-version: 9.2"));
+
+ config.llvm_version = Some(90301);
+ assert!(!check_ignore(&config, "// min-llvm-version: 9.2"));
+
+ config.llvm_version = Some(100000);
+ assert!(!check_ignore(&config, "// min-llvm-version: 9.0"));
+}
+
+#[test]
+fn ignore_target() {
+ let mut config = config();
+ config.target = "x86_64-unknown-linux-gnu".to_owned();
+
+ assert!(check_ignore(&config, "// ignore-x86_64-unknown-linux-gnu"));
+ assert!(check_ignore(&config, "// ignore-x86_64"));
+ assert!(check_ignore(&config, "// ignore-linux"));
+ assert!(check_ignore(&config, "// ignore-gnu"));
+ assert!(check_ignore(&config, "// ignore-64bit"));
+
+ assert!(!check_ignore(&config, "// ignore-i686"));
+ assert!(!check_ignore(&config, "// ignore-windows"));
+ assert!(!check_ignore(&config, "// ignore-msvc"));
+ assert!(!check_ignore(&config, "// ignore-32bit"));
+}
+
+#[test]
+fn only_target() {
+ let mut config = config();
+ config.target = "x86_64-pc-windows-gnu".to_owned();
+
+ assert!(check_ignore(&config, "// only-x86"));
+ assert!(check_ignore(&config, "// only-linux"));
+ assert!(check_ignore(&config, "// only-msvc"));
+ assert!(check_ignore(&config, "// only-32bit"));
+
+ assert!(!check_ignore(&config, "// only-x86_64-pc-windows-gnu"));
+ assert!(!check_ignore(&config, "// only-x86_64"));
+ assert!(!check_ignore(&config, "// only-windows"));
+ assert!(!check_ignore(&config, "// only-gnu"));
+ assert!(!check_ignore(&config, "// only-64bit"));
+}
+
+#[test]
+fn stage() {
+ let mut config = config();
+ config.stage_id = "stage1".to_owned();
+
+ assert!(check_ignore(&config, "// ignore-stage1"));
+ assert!(!check_ignore(&config, "// ignore-stage2"));
+}
+
+#[test]
+fn cross_compile() {
+ let mut config = config();
+ config.host = "x86_64-apple-darwin".to_owned();
+ config.target = "wasm32-unknown-unknown".to_owned();
+ assert!(check_ignore(&config, "// ignore-cross-compile"));
+
+ config.target = config.host.clone();
+ assert!(!check_ignore(&config, "// ignore-cross-compile"));
+}
+
+#[test]
+fn debugger() {
+ let mut config = config();
+ config.debugger = None;
+ assert!(!check_ignore(&config, "// ignore-cdb"));
+
+ config.debugger = Some(Debugger::Cdb);
+ assert!(check_ignore(&config, "// ignore-cdb"));
+
+ config.debugger = Some(Debugger::Gdb);
+ assert!(check_ignore(&config, "// ignore-gdb"));
+
+ config.debugger = Some(Debugger::Lldb);
+ assert!(check_ignore(&config, "// ignore-lldb"));
+}
+
+#[test]
+fn sanitizers() {
+ let mut config = config();
+
+ // Target that supports all sanitizers:
+ config.target = "x86_64-unknown-linux-gnu".to_owned();
+ assert!(!check_ignore(&config, "// needs-sanitizer-address"));
+ assert!(!check_ignore(&config, "// needs-sanitizer-leak"));
+ assert!(!check_ignore(&config, "// needs-sanitizer-memory"));
+ assert!(!check_ignore(&config, "// needs-sanitizer-thread"));
+
+ // Target that doesn't support sanitizers:
+ config.target = "wasm32-unknown-emscripten".to_owned();
+ assert!(check_ignore(&config, "// needs-sanitizer-address"));
+ assert!(check_ignore(&config, "// needs-sanitizer-leak"));
+ assert!(check_ignore(&config, "// needs-sanitizer-memory"));
+ assert!(check_ignore(&config, "// needs-sanitizer-thread"));
+}
+
+#[test]
+fn asm_support() {
+ let mut config = config();
+
+ config.target = "avr-unknown-gnu-atmega328".to_owned();
+ assert!(check_ignore(&config, "// needs-asm-support"));
+
+ config.target = "i686-unknown-netbsd".to_owned();
+ assert!(!check_ignore(&config, "// needs-asm-support"));
+}
+
+#[test]
+fn channel() {
+ let mut config = config();
+ config.channel = "beta".into();
+
+ assert!(check_ignore(&config, "// ignore-beta"));
+ assert!(check_ignore(&config, "// only-nightly"));
+ assert!(check_ignore(&config, "// only-stable"));
+
+ assert!(!check_ignore(&config, "// only-beta"));
+ assert!(!check_ignore(&config, "// ignore-nightly"));
+ assert!(!check_ignore(&config, "// ignore-stable"));
+}
+
+#[test]
+fn test_extract_version_range() {
+ use super::{extract_llvm_version, extract_version_range};
+
+ assert_eq!(extract_version_range("1.2.3 - 4.5.6", extract_llvm_version), Some((10203, 40506)));
+ assert_eq!(extract_version_range("0 - 4.5.6", extract_llvm_version), Some((0, 40506)));
+ assert_eq!(extract_version_range("1.2.3 -", extract_llvm_version), None);
+ assert_eq!(extract_version_range("1.2.3 - ", extract_llvm_version), None);
+ assert_eq!(extract_version_range("- 4.5.6", extract_llvm_version), None);
+ assert_eq!(extract_version_range("-", extract_llvm_version), None);
+ assert_eq!(extract_version_range(" - 4.5.6", extract_llvm_version), None);
+ assert_eq!(extract_version_range(" - 4.5.6", extract_llvm_version), None);
+ assert_eq!(extract_version_range("0 -", extract_llvm_version), None);
+}
+
+#[test]
+#[should_panic(expected = "Duplicate revision: `rpass1` in line ` rpass1 rpass1`")]
+fn test_duplicate_revisions() {
+ let config = config();
+ parse_rs(&config, "// revisions: rpass1 rpass1");
+}
diff --git a/src/tools/compiletest/src/json.rs b/src/tools/compiletest/src/json.rs
new file mode 100644
index 000000000..10726b984
--- /dev/null
+++ b/src/tools/compiletest/src/json.rs
@@ -0,0 +1,321 @@
+//! These structs are a subset of the ones found in `rustc_errors::json`.
+//! They are only used for deserialization of JSON output provided by libtest.
+
+use crate::errors::{Error, ErrorKind};
+use crate::runtest::ProcRes;
+use serde::Deserialize;
+use std::path::{Path, PathBuf};
+use std::str::FromStr;
+
+#[derive(Deserialize)]
+struct Diagnostic {
+ message: String,
+ code: Option<DiagnosticCode>,
+ level: String,
+ spans: Vec<DiagnosticSpan>,
+ children: Vec<Diagnostic>,
+ rendered: Option<String>,
+}
+
+#[derive(Deserialize)]
+struct ArtifactNotification {
+ #[allow(dead_code)]
+ artifact: PathBuf,
+}
+
+#[derive(Deserialize)]
+struct UnusedExternNotification {
+ #[allow(dead_code)]
+ lint_level: String,
+ #[allow(dead_code)]
+ unused_extern_names: Vec<String>,
+}
+
+#[derive(Deserialize, Clone)]
+struct DiagnosticSpan {
+ file_name: String,
+ line_start: usize,
+ line_end: usize,
+ column_start: usize,
+ column_end: usize,
+ is_primary: bool,
+ label: Option<String>,
+ suggested_replacement: Option<String>,
+ expansion: Option<Box<DiagnosticSpanMacroExpansion>>,
+}
+
+#[derive(Deserialize)]
+struct FutureIncompatReport {
+ future_incompat_report: Vec<FutureBreakageItem>,
+}
+
+#[derive(Deserialize)]
+struct FutureBreakageItem {
+ diagnostic: Diagnostic,
+}
+
+impl DiagnosticSpan {
+ /// Returns the deepest source span in the macro call stack with a given file name.
+ /// This is either the supplied span, or the span for some macro callsite that expanded to it.
+ fn first_callsite_in_file(&self, file_name: &str) -> &DiagnosticSpan {
+ if self.file_name == file_name {
+ self
+ } else {
+ self.expansion
+ .as_ref()
+ .map(|origin| origin.span.first_callsite_in_file(file_name))
+ .unwrap_or(self)
+ }
+ }
+}
+
+#[derive(Deserialize, Clone)]
+struct DiagnosticSpanMacroExpansion {
+ /// span where macro was applied to generate this code
+ span: DiagnosticSpan,
+
+ /// name of macro that was applied (e.g., "foo!" or "#[derive(Eq)]")
+ macro_decl_name: String,
+}
+
+#[derive(Deserialize, Clone)]
+struct DiagnosticCode {
+ /// The code itself.
+ code: String,
+}
+
+pub fn rustfix_diagnostics_only(output: &str) -> String {
+ output
+ .lines()
+ .filter(|line| line.starts_with('{') && serde_json::from_str::<Diagnostic>(line).is_ok())
+ .collect()
+}
+
+pub fn extract_rendered(output: &str) -> String {
+ output
+ .lines()
+ .filter_map(|line| {
+ if line.starts_with('{') {
+ if let Ok(diagnostic) = serde_json::from_str::<Diagnostic>(line) {
+ diagnostic.rendered
+ } else if let Ok(report) = serde_json::from_str::<FutureIncompatReport>(line) {
+ if report.future_incompat_report.is_empty() {
+ None
+ } else {
+ Some(format!(
+ "Future incompatibility report: {}",
+ report
+ .future_incompat_report
+ .into_iter()
+ .map(|item| {
+ format!(
+ "Future breakage diagnostic:\n{}",
+ item.diagnostic
+ .rendered
+ .unwrap_or_else(|| "Not rendered".to_string())
+ )
+ })
+ .collect::<String>()
+ ))
+ }
+ } else if serde_json::from_str::<ArtifactNotification>(line).is_ok() {
+ // Ignore the notification.
+ None
+ } else if serde_json::from_str::<UnusedExternNotification>(line).is_ok() {
+ // Ignore the notification.
+ None
+ } else {
+ print!(
+ "failed to decode compiler output as json: line: {}\noutput: {}",
+ line, output
+ );
+ panic!()
+ }
+ } else {
+ // preserve non-JSON lines, such as ICEs
+ Some(format!("{}\n", line))
+ }
+ })
+ .collect()
+}
+
+pub fn parse_output(file_name: &str, output: &str, proc_res: &ProcRes) -> Vec<Error> {
+ output.lines().flat_map(|line| parse_line(file_name, line, output, proc_res)).collect()
+}
+
+fn parse_line(file_name: &str, line: &str, output: &str, proc_res: &ProcRes) -> Vec<Error> {
+ // The compiler sometimes intermingles non-JSON stuff into the
+ // output. This hack just skips over such lines. Yuck.
+ if line.starts_with('{') {
+ match serde_json::from_str::<Diagnostic>(line) {
+ Ok(diagnostic) => {
+ let mut expected_errors = vec![];
+ push_expected_errors(&mut expected_errors, &diagnostic, &[], file_name);
+ expected_errors
+ }
+ Err(error) => {
+ // Ignore the future compat report message - this is handled
+ // by `extract_rendered`
+ if serde_json::from_str::<FutureIncompatReport>(line).is_ok() {
+ vec![]
+ } else {
+ proc_res.fatal(
+ Some(&format!(
+ "failed to decode compiler output as json: \
+ `{}`\nline: {}\noutput: {}",
+ error, line, output
+ )),
+ || (),
+ );
+ }
+ }
+ }
+ } else {
+ vec![]
+ }
+}
+
+fn push_expected_errors(
+ expected_errors: &mut Vec<Error>,
+ diagnostic: &Diagnostic,
+ default_spans: &[&DiagnosticSpan],
+ file_name: &str,
+) {
+ // In case of macro expansions, we need to get the span of the callsite
+ let spans_info_in_this_file: Vec<_> = diagnostic
+ .spans
+ .iter()
+ .map(|span| (span.is_primary, span.first_callsite_in_file(file_name)))
+ .filter(|(_, span)| Path::new(&span.file_name) == Path::new(&file_name))
+ .collect();
+
+ let spans_in_this_file: Vec<_> = spans_info_in_this_file.iter().map(|(_, span)| span).collect();
+
+ let primary_spans: Vec<_> = spans_info_in_this_file
+ .iter()
+ .filter(|(is_primary, _)| *is_primary)
+ .map(|(_, span)| span)
+ .take(1) // sometimes we have more than one showing up in the json; pick first
+ .cloned()
+ .collect();
+ let primary_spans = if primary_spans.is_empty() {
+ // subdiagnostics often don't have a span of their own;
+ // inherit the span from the parent in that case
+ default_spans
+ } else {
+ &primary_spans
+ };
+
+ // We break the output into multiple lines, and then append the
+ // [E123] to every line in the output. This may be overkill. The
+ // intention was to match existing tests that do things like "//|
+ // found `i32` [E123]" and expect to match that somewhere, and yet
+ // also ensure that `//~ ERROR E123` *always* works. The
+ // assumption is that these multi-line error messages are on their
+ // way out anyhow.
+ let with_code = |span: &DiagnosticSpan, text: &str| {
+ match diagnostic.code {
+ Some(ref code) =>
+ // FIXME(#33000) -- it'd be better to use a dedicated
+ // UI harness than to include the line/col number like
+ // this, but some current tests rely on it.
+ //
+ // Note: Do NOT include the filename. These can easily
+ // cause false matches where the expected message
+ // appears in the filename, and hence the message
+ // changes but the test still passes.
+ {
+ format!(
+ "{}:{}: {}:{}: {} [{}]",
+ span.line_start,
+ span.column_start,
+ span.line_end,
+ span.column_end,
+ text,
+ code.code.clone()
+ )
+ }
+ None =>
+ // FIXME(#33000) -- it'd be better to use a dedicated UI harness
+ {
+ format!(
+ "{}:{}: {}:{}: {}",
+ span.line_start, span.column_start, span.line_end, span.column_end, text
+ )
+ }
+ }
+ };
+
+ // Convert multi-line messages into multiple expected
+ // errors. We expect to replace these with something
+ // more structured shortly anyhow.
+ let mut message_lines = diagnostic.message.lines();
+ if let Some(first_line) = message_lines.next() {
+ for span in primary_spans {
+ let msg = with_code(span, first_line);
+ let kind = ErrorKind::from_str(&diagnostic.level).ok();
+ expected_errors.push(Error { line_num: span.line_start, kind, msg });
+ }
+ }
+ for next_line in message_lines {
+ for span in primary_spans {
+ expected_errors.push(Error {
+ line_num: span.line_start,
+ kind: None,
+ msg: with_code(span, next_line),
+ });
+ }
+ }
+
+ // If the message has a suggestion, register that.
+ for span in primary_spans {
+ if let Some(ref suggested_replacement) = span.suggested_replacement {
+ for (index, line) in suggested_replacement.lines().enumerate() {
+ expected_errors.push(Error {
+ line_num: span.line_start + index,
+ kind: Some(ErrorKind::Suggestion),
+ msg: line.to_string(),
+ });
+ }
+ }
+ }
+
+ // Add notes for the backtrace
+ for span in primary_spans {
+ for frame in &span.expansion {
+ push_backtrace(expected_errors, frame, file_name);
+ }
+ }
+
+ // Add notes for any labels that appear in the message.
+ for span in spans_in_this_file.iter().filter(|span| span.label.is_some()) {
+ expected_errors.push(Error {
+ line_num: span.line_start,
+ kind: Some(ErrorKind::Note),
+ msg: span.label.clone().unwrap(),
+ });
+ }
+
+ // Flatten out the children.
+ for child in &diagnostic.children {
+ push_expected_errors(expected_errors, child, primary_spans, file_name);
+ }
+}
+
+fn push_backtrace(
+ expected_errors: &mut Vec<Error>,
+ expansion: &DiagnosticSpanMacroExpansion,
+ file_name: &str,
+) {
+ if Path::new(&expansion.span.file_name) == Path::new(&file_name) {
+ expected_errors.push(Error {
+ line_num: expansion.span.line_start,
+ kind: Some(ErrorKind::Note),
+ msg: format!("in this expansion of {}", expansion.macro_decl_name),
+ });
+ }
+
+ for previous_expansion in &expansion.span.expansion {
+ push_backtrace(expected_errors, previous_expansion, file_name);
+ }
+}
diff --git a/src/tools/compiletest/src/main.rs b/src/tools/compiletest/src/main.rs
new file mode 100644
index 000000000..a8a151ca1
--- /dev/null
+++ b/src/tools/compiletest/src/main.rs
@@ -0,0 +1,1014 @@
+#![crate_name = "compiletest"]
+// The `test` crate is the only unstable feature
+// allowed here, just to share similar code.
+#![feature(test)]
+
+extern crate test;
+
+use crate::common::{
+ expected_output_path, output_base_dir, output_relative_path, PanicStrategy, UI_EXTENSIONS,
+};
+use crate::common::{CompareMode, Config, Debugger, Mode, PassMode, TestPaths};
+use crate::util::logv;
+use getopts::Options;
+use std::env;
+use std::ffi::OsString;
+use std::fs;
+use std::io::{self, ErrorKind};
+use std::path::{Path, PathBuf};
+use std::process::{Command, Stdio};
+use std::time::SystemTime;
+use test::ColorConfig;
+use tracing::*;
+use walkdir::WalkDir;
+
+use self::header::{make_test_description, EarlyProps};
+
+#[cfg(test)]
+mod tests;
+
+pub mod common;
+pub mod compute_diff;
+pub mod errors;
+pub mod header;
+mod json;
+mod raise_fd_limit;
+mod read2;
+pub mod runtest;
+pub mod util;
+
+fn main() {
+ tracing_subscriber::fmt::init();
+
+ let config = parse_config(env::args().collect());
+
+ if config.valgrind_path.is_none() && config.force_valgrind {
+ panic!("Can't find Valgrind to run Valgrind tests");
+ }
+
+ if !config.has_tidy && config.mode == Mode::Rustdoc {
+ eprintln!("warning: `tidy` is not installed; diffs will not be generated");
+ }
+
+ log_config(&config);
+ run_tests(config);
+}
+
+pub fn parse_config(args: Vec<String>) -> Config {
+ let mut opts = Options::new();
+ opts.reqopt("", "compile-lib-path", "path to host shared libraries", "PATH")
+ .reqopt("", "run-lib-path", "path to target shared libraries", "PATH")
+ .reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH")
+ .optopt("", "rustdoc-path", "path to rustdoc to use for compiling", "PATH")
+ .optopt("", "rust-demangler-path", "path to rust-demangler to use in tests", "PATH")
+ .reqopt("", "python", "path to python to use for doc tests", "PATH")
+ .optopt("", "jsondocck-path", "path to jsondocck to use for doc tests", "PATH")
+ .optopt("", "valgrind-path", "path to Valgrind executable for Valgrind tests", "PROGRAM")
+ .optflag("", "force-valgrind", "fail if Valgrind tests cannot be run under Valgrind")
+ .optopt("", "run-clang-based-tests-with", "path to Clang executable", "PATH")
+ .optopt("", "llvm-filecheck", "path to LLVM's FileCheck binary", "DIR")
+ .reqopt("", "src-base", "directory to scan for test files", "PATH")
+ .reqopt("", "build-base", "directory to deposit test outputs", "PATH")
+ .reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET")
+ .reqopt(
+ "",
+ "mode",
+ "which sort of compile tests to run",
+ "run-pass-valgrind | pretty | debug-info | codegen | rustdoc \
+ | rustdoc-json | codegen-units | incremental | run-make | ui | js-doc-test | mir-opt | assembly",
+ )
+ .reqopt(
+ "",
+ "suite",
+ "which suite of compile tests to run. used for nicer error reporting.",
+ "SUITE",
+ )
+ .optopt(
+ "",
+ "pass",
+ "force {check,build,run}-pass tests to this mode.",
+ "check | build | run",
+ )
+ .optopt("", "run", "whether to execute run-* tests", "auto | always | never")
+ .optflag("", "ignored", "run tests marked as ignored")
+ .optmulti("", "skip", "skip tests matching SUBSTRING. Can be passed multiple times", "SUBSTRING")
+ .optflag("", "exact", "filters match exactly")
+ .optopt(
+ "",
+ "runtool",
+ "supervisor program to run tests under \
+ (eg. emulator, valgrind)",
+ "PROGRAM",
+ )
+ .optmulti("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS")
+ .optmulti("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS")
+ .optflag("", "optimize-tests", "run tests with optimizations enabled")
+ .optopt("", "target-panic", "what panic strategy the target supports", "unwind | abort")
+ .optflag("", "verbose", "run tests verbosely, showing all output")
+ .optflag(
+ "",
+ "bless",
+ "overwrite stderr/stdout files instead of complaining about a mismatch",
+ )
+ .optflag("", "quiet", "print one character per test instead of one line")
+ .optopt("", "color", "coloring: auto, always, never", "WHEN")
+ .optopt("", "logfile", "file to log test execution to", "FILE")
+ .optopt("", "target", "the target to build for", "TARGET")
+ .optopt("", "host", "the host to build for", "HOST")
+ .optopt("", "cdb", "path to CDB to use for CDB debuginfo tests", "PATH")
+ .optopt("", "gdb", "path to GDB to use for GDB debuginfo tests", "PATH")
+ .optopt("", "lldb-version", "the version of LLDB used", "VERSION STRING")
+ .optopt("", "llvm-version", "the version of LLVM used", "VERSION STRING")
+ .optflag("", "system-llvm", "is LLVM the system LLVM")
+ .optopt("", "android-cross-path", "Android NDK standalone path", "PATH")
+ .optopt("", "adb-path", "path to the android debugger", "PATH")
+ .optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH")
+ .optopt("", "lldb-python-dir", "directory containing LLDB's python module", "PATH")
+ .reqopt("", "cc", "path to a C compiler", "PATH")
+ .reqopt("", "cxx", "path to a C++ compiler", "PATH")
+ .reqopt("", "cflags", "flags for the C compiler", "FLAGS")
+ .reqopt("", "cxxflags", "flags for the CXX compiler", "FLAGS")
+ .optopt("", "ar", "path to an archiver", "PATH")
+ .optopt("", "linker", "path to a linker", "PATH")
+ .reqopt("", "llvm-components", "list of LLVM components built in", "LIST")
+ .optopt("", "llvm-bin-dir", "Path to LLVM's `bin` directory", "PATH")
+ .optopt("", "nodejs", "the name of nodejs", "PATH")
+ .optopt("", "npm", "the name of npm", "PATH")
+ .optopt("", "remote-test-client", "path to the remote test client", "PATH")
+ .optopt(
+ "",
+ "compare-mode",
+ "mode describing what file the actual ui output will be compared to",
+ "COMPARE MODE",
+ )
+ .optflag(
+ "",
+ "rustfix-coverage",
+ "enable this to generate a Rustfix coverage file, which is saved in \
+ `./<build_base>/rustfix_missing_coverage.txt`",
+ )
+ .optflag("", "force-rerun", "rerun tests even if the inputs are unchanged")
+ .optflag("h", "help", "show this message")
+ .reqopt("", "channel", "current Rust channel", "CHANNEL")
+ .optopt("", "edition", "default Rust edition", "EDITION");
+
+ let (argv0, args_) = args.split_first().unwrap();
+ if args.len() == 1 || args[1] == "-h" || args[1] == "--help" {
+ let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
+ println!("{}", opts.usage(&message));
+ println!();
+ panic!()
+ }
+
+ let matches = &match opts.parse(args_) {
+ Ok(m) => m,
+ Err(f) => panic!("{:?}", f),
+ };
+
+ if matches.opt_present("h") || matches.opt_present("help") {
+ let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
+ println!("{}", opts.usage(&message));
+ println!();
+ panic!()
+ }
+
+ fn opt_path(m: &getopts::Matches, nm: &str) -> PathBuf {
+ match m.opt_str(nm) {
+ Some(s) => PathBuf::from(&s),
+ None => panic!("no option (=path) found for {}", nm),
+ }
+ }
+
+ fn make_absolute(path: PathBuf) -> PathBuf {
+ if path.is_relative() { env::current_dir().unwrap().join(path) } else { path }
+ }
+
+ let target = opt_str2(matches.opt_str("target"));
+ let android_cross_path = opt_path(matches, "android-cross-path");
+ let (cdb, cdb_version) = analyze_cdb(matches.opt_str("cdb"), &target);
+ let (gdb, gdb_version, gdb_native_rust) =
+ analyze_gdb(matches.opt_str("gdb"), &target, &android_cross_path);
+ let (lldb_version, lldb_native_rust) = matches
+ .opt_str("lldb-version")
+ .as_deref()
+ .and_then(extract_lldb_version)
+ .map(|(v, b)| (Some(v), b))
+ .unwrap_or((None, false));
+ let color = match matches.opt_str("color").as_deref() {
+ Some("auto") | None => ColorConfig::AutoColor,
+ Some("always") => ColorConfig::AlwaysColor,
+ Some("never") => ColorConfig::NeverColor,
+ Some(x) => panic!("argument for --color must be auto, always, or never, but found `{}`", x),
+ };
+ let llvm_version =
+ matches.opt_str("llvm-version").as_deref().and_then(header::extract_llvm_version);
+
+ let src_base = opt_path(matches, "src-base");
+ let run_ignored = matches.opt_present("ignored");
+ let mode = matches.opt_str("mode").unwrap().parse().expect("invalid mode");
+ let has_tidy = if mode == Mode::Rustdoc {
+ Command::new("tidy")
+ .arg("--version")
+ .stdout(Stdio::null())
+ .status()
+ .map_or(false, |status| status.success())
+ } else {
+ // Avoid spawning an external command when we know tidy won't be used.
+ false
+ };
+ Config {
+ bless: matches.opt_present("bless"),
+ compile_lib_path: make_absolute(opt_path(matches, "compile-lib-path")),
+ run_lib_path: make_absolute(opt_path(matches, "run-lib-path")),
+ rustc_path: opt_path(matches, "rustc-path"),
+ rustdoc_path: matches.opt_str("rustdoc-path").map(PathBuf::from),
+ rust_demangler_path: matches.opt_str("rust-demangler-path").map(PathBuf::from),
+ python: matches.opt_str("python").unwrap(),
+ jsondocck_path: matches.opt_str("jsondocck-path"),
+ valgrind_path: matches.opt_str("valgrind-path"),
+ force_valgrind: matches.opt_present("force-valgrind"),
+ run_clang_based_tests_with: matches.opt_str("run-clang-based-tests-with"),
+ llvm_filecheck: matches.opt_str("llvm-filecheck").map(PathBuf::from),
+ llvm_bin_dir: matches.opt_str("llvm-bin-dir").map(PathBuf::from),
+ src_base,
+ build_base: opt_path(matches, "build-base"),
+ stage_id: matches.opt_str("stage-id").unwrap(),
+ mode,
+ suite: matches.opt_str("suite").unwrap(),
+ debugger: None,
+ run_ignored,
+ filters: matches.free.clone(),
+ skip: matches.opt_strs("skip"),
+ filter_exact: matches.opt_present("exact"),
+ force_pass_mode: matches.opt_str("pass").map(|mode| {
+ mode.parse::<PassMode>()
+ .unwrap_or_else(|_| panic!("unknown `--pass` option `{}` given", mode))
+ }),
+ run: matches.opt_str("run").and_then(|mode| match mode.as_str() {
+ "auto" => None,
+ "always" => Some(true),
+ "never" => Some(false),
+ _ => panic!("unknown `--run` option `{}` given", mode),
+ }),
+ logfile: matches.opt_str("logfile").map(|s| PathBuf::from(&s)),
+ runtool: matches.opt_str("runtool"),
+ host_rustcflags: Some(matches.opt_strs("host-rustcflags").join(" ")),
+ target_rustcflags: Some(matches.opt_strs("target-rustcflags").join(" ")),
+ optimize_tests: matches.opt_present("optimize-tests"),
+ target_panic: match matches.opt_str("target-panic").as_deref() {
+ Some("unwind") | None => PanicStrategy::Unwind,
+ Some("abort") => PanicStrategy::Abort,
+ _ => panic!("unknown `--target-panic` option `{}` given", mode),
+ },
+ target,
+ host: opt_str2(matches.opt_str("host")),
+ cdb,
+ cdb_version,
+ gdb,
+ gdb_version,
+ gdb_native_rust,
+ lldb_version,
+ lldb_native_rust,
+ llvm_version,
+ system_llvm: matches.opt_present("system-llvm"),
+ android_cross_path,
+ adb_path: opt_str2(matches.opt_str("adb-path")),
+ adb_test_dir: opt_str2(matches.opt_str("adb-test-dir")),
+ adb_device_status: opt_str2(matches.opt_str("target")).contains("android")
+ && "(none)" != opt_str2(matches.opt_str("adb-test-dir"))
+ && !opt_str2(matches.opt_str("adb-test-dir")).is_empty(),
+ lldb_python_dir: matches.opt_str("lldb-python-dir"),
+ verbose: matches.opt_present("verbose"),
+ quiet: matches.opt_present("quiet"),
+ color,
+ remote_test_client: matches.opt_str("remote-test-client").map(PathBuf::from),
+ compare_mode: matches.opt_str("compare-mode").map(CompareMode::parse),
+ rustfix_coverage: matches.opt_present("rustfix-coverage"),
+ has_tidy,
+ channel: matches.opt_str("channel").unwrap(),
+ edition: matches.opt_str("edition"),
+
+ cc: matches.opt_str("cc").unwrap(),
+ cxx: matches.opt_str("cxx").unwrap(),
+ cflags: matches.opt_str("cflags").unwrap(),
+ cxxflags: matches.opt_str("cxxflags").unwrap(),
+ ar: matches.opt_str("ar").unwrap_or_else(|| String::from("ar")),
+ linker: matches.opt_str("linker"),
+ llvm_components: matches.opt_str("llvm-components").unwrap(),
+ nodejs: matches.opt_str("nodejs"),
+ npm: matches.opt_str("npm"),
+
+ force_rerun: matches.opt_present("force-rerun"),
+ }
+}
+
+pub fn log_config(config: &Config) {
+ let c = config;
+ logv(c, "configuration:".to_string());
+ logv(c, format!("compile_lib_path: {:?}", config.compile_lib_path));
+ logv(c, format!("run_lib_path: {:?}", config.run_lib_path));
+ logv(c, format!("rustc_path: {:?}", config.rustc_path.display()));
+ logv(c, format!("rustdoc_path: {:?}", config.rustdoc_path));
+ logv(c, format!("rust_demangler_path: {:?}", config.rust_demangler_path));
+ logv(c, format!("src_base: {:?}", config.src_base.display()));
+ logv(c, format!("build_base: {:?}", config.build_base.display()));
+ logv(c, format!("stage_id: {}", config.stage_id));
+ logv(c, format!("mode: {}", config.mode));
+ logv(c, format!("run_ignored: {}", config.run_ignored));
+ logv(c, format!("filters: {:?}", config.filters));
+ logv(c, format!("skip: {:?}", config.skip));
+ logv(c, format!("filter_exact: {}", config.filter_exact));
+ logv(
+ c,
+ format!("force_pass_mode: {}", opt_str(&config.force_pass_mode.map(|m| format!("{}", m))),),
+ );
+ logv(c, format!("runtool: {}", opt_str(&config.runtool)));
+ logv(c, format!("host-rustcflags: {}", opt_str(&config.host_rustcflags)));
+ logv(c, format!("target-rustcflags: {}", opt_str(&config.target_rustcflags)));
+ logv(c, format!("target: {}", config.target));
+ logv(c, format!("host: {}", config.host));
+ logv(c, format!("android-cross-path: {:?}", config.android_cross_path.display()));
+ logv(c, format!("adb_path: {:?}", config.adb_path));
+ logv(c, format!("adb_test_dir: {:?}", config.adb_test_dir));
+ logv(c, format!("adb_device_status: {}", config.adb_device_status));
+ logv(c, format!("ar: {}", config.ar));
+ logv(c, format!("linker: {:?}", config.linker));
+ logv(c, format!("verbose: {}", config.verbose));
+ logv(c, format!("quiet: {}", config.quiet));
+ logv(c, "\n".to_string());
+}
+
+pub fn opt_str(maybestr: &Option<String>) -> &str {
+ match *maybestr {
+ None => "(none)",
+ Some(ref s) => s,
+ }
+}
+
+pub fn opt_str2(maybestr: Option<String>) -> String {
+ match maybestr {
+ None => "(none)".to_owned(),
+ Some(s) => s,
+ }
+}
+
+pub fn run_tests(config: Config) {
+ // If we want to collect rustfix coverage information,
+ // we first make sure that the coverage file does not exist.
+ // It will be created later on.
+ if config.rustfix_coverage {
+ let mut coverage_file_path = config.build_base.clone();
+ coverage_file_path.push("rustfix_missing_coverage.txt");
+ if coverage_file_path.exists() {
+ if let Err(e) = fs::remove_file(&coverage_file_path) {
+ panic!("Could not delete {} due to {}", coverage_file_path.display(), e)
+ }
+ }
+ }
+
+ // sadly osx needs some file descriptor limits raised for running tests in
+ // parallel (especially when we have lots and lots of child processes).
+ // For context, see #8904
+ unsafe {
+ raise_fd_limit::raise_fd_limit();
+ }
+ // Prevent issue #21352 UAC blocking .exe containing 'patch' etc. on Windows
+ // If #11207 is resolved (adding manifest to .exe) this becomes unnecessary
+ env::set_var("__COMPAT_LAYER", "RunAsInvoker");
+
+ // Let tests know which target they're running as
+ env::set_var("TARGET", &config.target);
+
+ let opts = test_opts(&config);
+
+ let mut configs = Vec::new();
+ if let Mode::DebugInfo = config.mode {
+ // Debugging emscripten code doesn't make sense today
+ if !config.target.contains("emscripten") {
+ configs.extend(configure_cdb(&config));
+ configs.extend(configure_gdb(&config));
+ configs.extend(configure_lldb(&config));
+ }
+ } else {
+ configs.push(config.clone());
+ };
+
+ let mut tests = Vec::new();
+ for c in &configs {
+ make_tests(c, &mut tests);
+ }
+
+ let res = test::run_tests_console(&opts, tests);
+ match res {
+ Ok(true) => {}
+ Ok(false) => {
+ // We want to report that the tests failed, but we also want to give
+ // some indication of just what tests we were running. Especially on
+ // CI, where there can be cross-compiled tests for a lot of
+ // architectures, without this critical information it can be quite
+ // easy to miss which tests failed, and as such fail to reproduce
+ // the failure locally.
+
+ eprintln!(
+ "Some tests failed in compiletest suite={}{} mode={} host={} target={}",
+ config.suite,
+ config.compare_mode.map(|c| format!(" compare_mode={:?}", c)).unwrap_or_default(),
+ config.mode,
+ config.host,
+ config.target
+ );
+
+ std::process::exit(1);
+ }
+ Err(e) => {
+ // We don't know if tests passed or not, but if there was an error
+ // during testing we don't want to just succeed (we may not have
+ // tested something), so fail.
+ //
+ // This should realistically "never" happen, so don't try to make
+ // this a pretty error message.
+ panic!("I/O failure during tests: {:?}", e);
+ }
+ }
+}
+
+fn configure_cdb(config: &Config) -> Option<Config> {
+ config.cdb.as_ref()?;
+
+ Some(Config { debugger: Some(Debugger::Cdb), ..config.clone() })
+}
+
+fn configure_gdb(config: &Config) -> Option<Config> {
+ config.gdb_version?;
+
+ if util::matches_env(&config.target, "msvc") {
+ return None;
+ }
+
+ if config.remote_test_client.is_some() && !config.target.contains("android") {
+ println!(
+ "WARNING: debuginfo tests are not available when \
+ testing with remote"
+ );
+ return None;
+ }
+
+ if config.target.contains("android") {
+ println!(
+ "{} debug-info test uses tcp 5039 port.\
+ please reserve it",
+ config.target
+ );
+
+ // android debug-info test uses remote debugger so, we test 1 thread
+ // at once as they're all sharing the same TCP port to communicate
+ // over.
+ //
+ // we should figure out how to lift this restriction! (run them all
+ // on different ports allocated dynamically).
+ env::set_var("RUST_TEST_THREADS", "1");
+ }
+
+ Some(Config { debugger: Some(Debugger::Gdb), ..config.clone() })
+}
+
+fn configure_lldb(config: &Config) -> Option<Config> {
+ config.lldb_python_dir.as_ref()?;
+
+ if let Some(350) = config.lldb_version {
+ println!(
+ "WARNING: The used version of LLDB (350) has a \
+ known issue that breaks debuginfo tests. See \
+ issue #32520 for more information. Skipping all \
+ LLDB-based tests!",
+ );
+ return None;
+ }
+
+ Some(Config { debugger: Some(Debugger::Lldb), ..config.clone() })
+}
+
+pub fn test_opts(config: &Config) -> test::TestOpts {
+ test::TestOpts {
+ exclude_should_panic: false,
+ filters: config.filters.clone(),
+ filter_exact: config.filter_exact,
+ run_ignored: if config.run_ignored { test::RunIgnored::Yes } else { test::RunIgnored::No },
+ format: if config.quiet { test::OutputFormat::Terse } else { test::OutputFormat::Pretty },
+ logfile: config.logfile.clone(),
+ run_tests: true,
+ bench_benchmarks: true,
+ nocapture: match env::var("RUST_TEST_NOCAPTURE") {
+ Ok(val) => &val != "0",
+ Err(_) => false,
+ },
+ color: config.color,
+ shuffle: false,
+ shuffle_seed: None,
+ test_threads: None,
+ skip: config.skip.clone(),
+ list: false,
+ options: test::Options::new(),
+ time_options: None,
+ force_run_in_process: false,
+ }
+}
+
+pub fn make_tests(config: &Config, tests: &mut Vec<test::TestDescAndFn>) {
+ debug!("making tests from {:?}", config.src_base.display());
+ let inputs = common_inputs_stamp(config);
+ collect_tests_from_dir(config, &config.src_base, &PathBuf::new(), &inputs, tests)
+ .unwrap_or_else(|_| panic!("Could not read tests from {}", config.src_base.display()));
+}
+
+/// Returns a stamp constructed from input files common to all test cases.
+fn common_inputs_stamp(config: &Config) -> Stamp {
+ let rust_src_dir = config.find_rust_src_root().expect("Could not find Rust source root");
+
+ let mut stamp = Stamp::from_path(&config.rustc_path);
+
+ // Relevant pretty printer files
+ let pretty_printer_files = [
+ "src/etc/rust_types.py",
+ "src/etc/gdb_load_rust_pretty_printers.py",
+ "src/etc/gdb_lookup.py",
+ "src/etc/gdb_providers.py",
+ "src/etc/lldb_batchmode.py",
+ "src/etc/lldb_lookup.py",
+ "src/etc/lldb_providers.py",
+ ];
+ for file in &pretty_printer_files {
+ let path = rust_src_dir.join(file);
+ stamp.add_path(&path);
+ }
+
+ stamp.add_dir(&config.run_lib_path);
+
+ if let Some(ref rustdoc_path) = config.rustdoc_path {
+ stamp.add_path(&rustdoc_path);
+ stamp.add_path(&rust_src_dir.join("src/etc/htmldocck.py"));
+ }
+
+ // Compiletest itself.
+ stamp.add_dir(&rust_src_dir.join("src/tools/compiletest/"));
+
+ stamp
+}
+
+fn collect_tests_from_dir(
+ config: &Config,
+ dir: &Path,
+ relative_dir_path: &Path,
+ inputs: &Stamp,
+ tests: &mut Vec<test::TestDescAndFn>,
+) -> io::Result<()> {
+ // Ignore directories that contain a file named `compiletest-ignore-dir`.
+ if dir.join("compiletest-ignore-dir").exists() {
+ return Ok(());
+ }
+
+ if config.mode == Mode::RunMake && dir.join("Makefile").exists() {
+ let paths = TestPaths {
+ file: dir.to_path_buf(),
+ relative_dir: relative_dir_path.parent().unwrap().to_path_buf(),
+ };
+ tests.extend(make_test(config, &paths, inputs));
+ return Ok(());
+ }
+
+ // If we find a test foo/bar.rs, we have to build the
+ // output directory `$build/foo` so we can write
+ // `$build/foo/bar` into it. We do this *now* in this
+ // sequential loop because otherwise, if we do it in the
+ // tests themselves, they race for the privilege of
+ // creating the directories and sometimes fail randomly.
+ let build_dir = output_relative_path(config, relative_dir_path);
+ fs::create_dir_all(&build_dir).unwrap();
+
+ // Add each `.rs` file as a test, and recurse further on any
+ // subdirectories we find, except for `aux` directories.
+ for file in fs::read_dir(dir)? {
+ let file = file?;
+ let file_path = file.path();
+ let file_name = file.file_name();
+ if is_test(&file_name) {
+ debug!("found test file: {:?}", file_path.display());
+ let paths =
+ TestPaths { file: file_path, relative_dir: relative_dir_path.to_path_buf() };
+
+ tests.extend(make_test(config, &paths, inputs))
+ } else if file_path.is_dir() {
+ let relative_file_path = relative_dir_path.join(file.file_name());
+ if &file_name != "auxiliary" {
+ debug!("found directory: {:?}", file_path.display());
+ collect_tests_from_dir(config, &file_path, &relative_file_path, inputs, tests)?;
+ }
+ } else {
+ debug!("found other file/directory: {:?}", file_path.display());
+ }
+ }
+ Ok(())
+}
+
+/// Returns true if `file_name` looks like a proper test file name.
+pub fn is_test(file_name: &OsString) -> bool {
+ let file_name = file_name.to_str().unwrap();
+
+ if !file_name.ends_with(".rs") {
+ return false;
+ }
+
+ // `.`, `#`, and `~` are common temp-file prefixes.
+ let invalid_prefixes = &[".", "#", "~"];
+ !invalid_prefixes.iter().any(|p| file_name.starts_with(p))
+}
+
+fn make_test(config: &Config, testpaths: &TestPaths, inputs: &Stamp) -> Vec<test::TestDescAndFn> {
+ let test_path = if config.mode == Mode::RunMake {
+ // Parse directives in the Makefile
+ testpaths.file.join("Makefile")
+ } else {
+ PathBuf::from(&testpaths.file)
+ };
+ let early_props = EarlyProps::from_file(config, &test_path);
+
+ // Incremental tests are special, they inherently cannot be run in parallel.
+ // `runtest::run` will be responsible for iterating over revisions.
+ let revisions = if early_props.revisions.is_empty() || config.mode == Mode::Incremental {
+ vec![None]
+ } else {
+ early_props.revisions.iter().map(Some).collect()
+ };
+ revisions
+ .into_iter()
+ .map(|revision| {
+ let src_file =
+ std::fs::File::open(&test_path).expect("open test file to parse ignores");
+ let cfg = revision.map(|v| &**v);
+ let test_name = crate::make_test_name(config, testpaths, revision);
+ let mut desc = make_test_description(config, test_name, &test_path, src_file, cfg);
+ // Ignore tests that already run and are up to date with respect to inputs.
+ if !config.force_rerun {
+ desc.ignore |= is_up_to_date(
+ config,
+ testpaths,
+ &early_props,
+ revision.map(|s| s.as_str()),
+ inputs,
+ );
+ }
+ test::TestDescAndFn { desc, testfn: make_test_closure(config, testpaths, revision) }
+ })
+ .collect()
+}
+
+fn stamp(config: &Config, testpaths: &TestPaths, revision: Option<&str>) -> PathBuf {
+ output_base_dir(config, testpaths, revision).join("stamp")
+}
+
+fn files_related_to_test(
+ config: &Config,
+ testpaths: &TestPaths,
+ props: &EarlyProps,
+ revision: Option<&str>,
+) -> Vec<PathBuf> {
+ let mut related = vec![];
+
+ if testpaths.file.is_dir() {
+ // run-make tests use their individual directory
+ for entry in WalkDir::new(&testpaths.file) {
+ let path = entry.unwrap().into_path();
+ if path.is_file() {
+ related.push(path);
+ }
+ }
+ } else {
+ related.push(testpaths.file.clone());
+ }
+
+ for aux in &props.aux {
+ let path = testpaths.file.parent().unwrap().join("auxiliary").join(aux);
+ related.push(path);
+ }
+
+ // UI test files.
+ for extension in UI_EXTENSIONS {
+ let path = expected_output_path(testpaths, revision, &config.compare_mode, extension);
+ related.push(path);
+ }
+
+ related
+}
+
+fn is_up_to_date(
+ config: &Config,
+ testpaths: &TestPaths,
+ props: &EarlyProps,
+ revision: Option<&str>,
+ inputs: &Stamp,
+) -> bool {
+ let stamp_name = stamp(config, testpaths, revision);
+ // Check hash.
+ let contents = match fs::read_to_string(&stamp_name) {
+ Ok(f) => f,
+ Err(ref e) if e.kind() == ErrorKind::InvalidData => panic!("Can't read stamp contents"),
+ Err(_) => return false,
+ };
+ let expected_hash = runtest::compute_stamp_hash(config);
+ if contents != expected_hash {
+ return false;
+ }
+
+ // Check timestamps.
+ let mut inputs = inputs.clone();
+ for path in files_related_to_test(config, testpaths, props, revision) {
+ inputs.add_path(&path);
+ }
+
+ inputs < Stamp::from_path(&stamp_name)
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+struct Stamp {
+ time: SystemTime,
+}
+
+impl Stamp {
+ fn from_path(path: &Path) -> Self {
+ let mut stamp = Stamp { time: SystemTime::UNIX_EPOCH };
+ stamp.add_path(path);
+ stamp
+ }
+
+ fn add_path(&mut self, path: &Path) {
+ let modified = fs::metadata(path)
+ .and_then(|metadata| metadata.modified())
+ .unwrap_or(SystemTime::UNIX_EPOCH);
+ self.time = self.time.max(modified);
+ }
+
+ fn add_dir(&mut self, path: &Path) {
+ for entry in WalkDir::new(path) {
+ let entry = entry.unwrap();
+ if entry.file_type().is_file() {
+ let modified = entry
+ .metadata()
+ .ok()
+ .and_then(|metadata| metadata.modified().ok())
+ .unwrap_or(SystemTime::UNIX_EPOCH);
+ self.time = self.time.max(modified);
+ }
+ }
+ }
+}
+
+fn make_test_name(
+ config: &Config,
+ testpaths: &TestPaths,
+ revision: Option<&String>,
+) -> test::TestName {
+ // Print the name of the file, relative to the repository root.
+ // `src_base` looks like `/path/to/rust/src/test/ui`
+ let root_directory = config.src_base.parent().unwrap().parent().unwrap().parent().unwrap();
+ let path = testpaths.file.strip_prefix(root_directory).unwrap();
+ let debugger = match config.debugger {
+ Some(d) => format!("-{}", d),
+ None => String::new(),
+ };
+ let mode_suffix = match config.compare_mode {
+ Some(ref mode) => format!(" ({})", mode.to_str()),
+ None => String::new(),
+ };
+
+ test::DynTestName(format!(
+ "[{}{}{}] {}{}",
+ config.mode,
+ debugger,
+ mode_suffix,
+ path.display(),
+ revision.map_or("".to_string(), |rev| format!("#{}", rev))
+ ))
+}
+
+fn make_test_closure(
+ config: &Config,
+ testpaths: &TestPaths,
+ revision: Option<&String>,
+) -> test::TestFn {
+ let config = config.clone();
+ let testpaths = testpaths.clone();
+ let revision = revision.cloned();
+ test::DynTestFn(Box::new(move || runtest::run(config, &testpaths, revision.as_deref())))
+}
+
+/// Returns `true` if the given target is an Android target for the
+/// purposes of GDB testing.
+fn is_android_gdb_target(target: &str) -> bool {
+ matches!(
+ &target[..],
+ "arm-linux-androideabi" | "armv7-linux-androideabi" | "aarch64-linux-android"
+ )
+}
+
+/// Returns `true` if the given target is a MSVC target for the purpouses of CDB testing.
+fn is_pc_windows_msvc_target(target: &str) -> bool {
+ target.ends_with("-pc-windows-msvc")
+}
+
+fn find_cdb(target: &str) -> Option<OsString> {
+ if !(cfg!(windows) && is_pc_windows_msvc_target(target)) {
+ return None;
+ }
+
+ let pf86 = env::var_os("ProgramFiles(x86)").or_else(|| env::var_os("ProgramFiles"))?;
+ let cdb_arch = if cfg!(target_arch = "x86") {
+ "x86"
+ } else if cfg!(target_arch = "x86_64") {
+ "x64"
+ } else if cfg!(target_arch = "aarch64") {
+ "arm64"
+ } else if cfg!(target_arch = "arm") {
+ "arm"
+ } else {
+ return None; // No compatible CDB.exe in the Windows 10 SDK
+ };
+
+ let mut path = PathBuf::new();
+ path.push(pf86);
+ path.push(r"Windows Kits\10\Debuggers"); // We could check 8.1 etc. too?
+ path.push(cdb_arch);
+ path.push(r"cdb.exe");
+
+ if !path.exists() {
+ return None;
+ }
+
+ Some(path.into_os_string())
+}
+
+/// Returns Path to CDB
+fn analyze_cdb(cdb: Option<String>, target: &str) -> (Option<OsString>, Option<[u16; 4]>) {
+ let cdb = cdb.map(OsString::from).or_else(|| find_cdb(target));
+
+ let mut version = None;
+ if let Some(cdb) = cdb.as_ref() {
+ if let Ok(output) = Command::new(cdb).arg("/version").output() {
+ if let Some(first_line) = String::from_utf8_lossy(&output.stdout).lines().next() {
+ version = extract_cdb_version(&first_line);
+ }
+ }
+ }
+
+ (cdb, version)
+}
+
+fn extract_cdb_version(full_version_line: &str) -> Option<[u16; 4]> {
+ // Example full_version_line: "cdb version 10.0.18362.1"
+ let version = full_version_line.rsplit(' ').next()?;
+ let mut components = version.split('.');
+ let major: u16 = components.next().unwrap().parse().unwrap();
+ let minor: u16 = components.next().unwrap().parse().unwrap();
+ let patch: u16 = components.next().unwrap_or("0").parse().unwrap();
+ let build: u16 = components.next().unwrap_or("0").parse().unwrap();
+ Some([major, minor, patch, build])
+}
+
+/// Returns (Path to GDB, GDB Version, GDB has Rust Support)
+fn analyze_gdb(
+ gdb: Option<String>,
+ target: &str,
+ android_cross_path: &PathBuf,
+) -> (Option<String>, Option<u32>, bool) {
+ #[cfg(not(windows))]
+ const GDB_FALLBACK: &str = "gdb";
+ #[cfg(windows)]
+ const GDB_FALLBACK: &str = "gdb.exe";
+
+ const MIN_GDB_WITH_RUST: u32 = 7011010;
+
+ let fallback_gdb = || {
+ if is_android_gdb_target(target) {
+ let mut gdb_path = match android_cross_path.to_str() {
+ Some(x) => x.to_owned(),
+ None => panic!("cannot find android cross path"),
+ };
+ gdb_path.push_str("/bin/gdb");
+ gdb_path
+ } else {
+ GDB_FALLBACK.to_owned()
+ }
+ };
+
+ let gdb = match gdb {
+ None => fallback_gdb(),
+ Some(ref s) if s.is_empty() => fallback_gdb(), // may be empty if configure found no gdb
+ Some(ref s) => s.to_owned(),
+ };
+
+ let mut version_line = None;
+ if let Ok(output) = Command::new(&gdb).arg("--version").output() {
+ if let Some(first_line) = String::from_utf8_lossy(&output.stdout).lines().next() {
+ version_line = Some(first_line.to_string());
+ }
+ }
+
+ let version = match version_line {
+ Some(line) => extract_gdb_version(&line),
+ None => return (None, None, false),
+ };
+
+ let gdb_native_rust = version.map_or(false, |v| v >= MIN_GDB_WITH_RUST);
+
+ (Some(gdb), version, gdb_native_rust)
+}
+
+fn extract_gdb_version(full_version_line: &str) -> Option<u32> {
+ let full_version_line = full_version_line.trim();
+
+ // GDB versions look like this: "major.minor.patch?.yyyymmdd?", with both
+ // of the ? sections being optional
+
+ // We will parse up to 3 digits for each component, ignoring the date
+
+ // We skip text in parentheses. This avoids accidentally parsing
+ // the openSUSE version, which looks like:
+ // GNU gdb (GDB; openSUSE Leap 15.0) 8.1
+ // This particular form is documented in the GNU coding standards:
+ // https://www.gnu.org/prep/standards/html_node/_002d_002dversion.html#g_t_002d_002dversion
+
+ let unbracketed_part = full_version_line.split('[').next().unwrap();
+ let mut splits = unbracketed_part.trim_end().rsplit(' ');
+ let version_string = splits.next().unwrap();
+
+ let mut splits = version_string.split('.');
+ let major = splits.next().unwrap();
+ let minor = splits.next().unwrap();
+ let patch = splits.next();
+
+ let major: u32 = major.parse().unwrap();
+ let (minor, patch): (u32, u32) = match minor.find(not_a_digit) {
+ None => {
+ let minor = minor.parse().unwrap();
+ let patch: u32 = match patch {
+ Some(patch) => match patch.find(not_a_digit) {
+ None => patch.parse().unwrap(),
+ Some(idx) if idx > 3 => 0,
+ Some(idx) => patch[..idx].parse().unwrap(),
+ },
+ None => 0,
+ };
+ (minor, patch)
+ }
+ // There is no patch version after minor-date (e.g. "4-2012").
+ Some(idx) => {
+ let minor = minor[..idx].parse().unwrap();
+ (minor, 0)
+ }
+ };
+
+ Some(((major * 1000) + minor) * 1000 + patch)
+}
+
+/// Returns (LLDB version, LLDB is rust-enabled)
+fn extract_lldb_version(full_version_line: &str) -> Option<(u32, bool)> {
+ // Extract the major LLDB version from the given version string.
+ // LLDB version strings are different for Apple and non-Apple platforms.
+ // The Apple variant looks like this:
+ //
+ // LLDB-179.5 (older versions)
+ // lldb-300.2.51 (new versions)
+ //
+ // We are only interested in the major version number, so this function
+ // will return `Some(179)` and `Some(300)` respectively.
+ //
+ // Upstream versions look like:
+ // lldb version 6.0.1
+ //
+ // There doesn't seem to be a way to correlate the Apple version
+ // with the upstream version, and since the tests were originally
+ // written against Apple versions, we make a fake Apple version by
+ // multiplying the first number by 100. This is a hack, but
+ // normally fine because the only non-Apple version we test is
+ // rust-enabled.
+
+ let full_version_line = full_version_line.trim();
+
+ if let Some(apple_ver) =
+ full_version_line.strip_prefix("LLDB-").or_else(|| full_version_line.strip_prefix("lldb-"))
+ {
+ if let Some(idx) = apple_ver.find(not_a_digit) {
+ let version: u32 = apple_ver[..idx].parse().unwrap();
+ return Some((version, full_version_line.contains("rust-enabled")));
+ }
+ } else if let Some(lldb_ver) = full_version_line.strip_prefix("lldb version ") {
+ if let Some(idx) = lldb_ver.find(not_a_digit) {
+ let version: u32 = lldb_ver[..idx].parse().ok()?;
+ return Some((version * 100, full_version_line.contains("rust-enabled")));
+ }
+ }
+ None
+}
+
+fn not_a_digit(c: char) -> bool {
+ !c.is_digit(10)
+}
diff --git a/src/tools/compiletest/src/raise_fd_limit.rs b/src/tools/compiletest/src/raise_fd_limit.rs
new file mode 100644
index 000000000..bc2946e2c
--- /dev/null
+++ b/src/tools/compiletest/src/raise_fd_limit.rs
@@ -0,0 +1,54 @@
+/// darwin_fd_limit exists to work around an issue where launchctl on macOS
+/// defaults the rlimit maxfiles to 256/unlimited. The default soft limit of 256
+/// ends up being far too low for our multithreaded scheduler testing, depending
+/// on the number of cores available.
+///
+/// This fixes issue #7772.
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+#[allow(non_camel_case_types)]
+pub unsafe fn raise_fd_limit() {
+ use std::cmp;
+ use std::io;
+ use std::mem::size_of_val;
+ use std::ptr::null_mut;
+
+ static CTL_KERN: libc::c_int = 1;
+ static KERN_MAXFILESPERPROC: libc::c_int = 29;
+
+ // The strategy here is to fetch the current resource limits, read the
+ // kern.maxfilesperproc sysctl value, and bump the soft resource limit for
+ // maxfiles up to the sysctl value.
+
+ // Fetch the kern.maxfilesperproc value
+ let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC];
+ let mut maxfiles: libc::c_int = 0;
+ let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
+ if libc::sysctl(&mut mib[0], 2, &mut maxfiles as *mut _ as *mut _, &mut size, null_mut(), 0)
+ != 0
+ {
+ let err = io::Error::last_os_error();
+ panic!("raise_fd_limit: error calling sysctl: {}", err);
+ }
+
+ // Fetch the current resource limits
+ let mut rlim = libc::rlimit { rlim_cur: 0, rlim_max: 0 };
+ if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 {
+ let err = io::Error::last_os_error();
+ panic!("raise_fd_limit: error calling getrlimit: {}", err);
+ }
+
+ // Make sure we're only ever going to increase the rlimit.
+ if rlim.rlim_cur < maxfiles as libc::rlim_t {
+ // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit.
+ rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max);
+
+ // Set our newly-increased resource limit.
+ if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 {
+ let err = io::Error::last_os_error();
+ panic!("raise_fd_limit: error calling setrlimit: {}", err);
+ }
+ }
+}
+
+#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+pub unsafe fn raise_fd_limit() {}
diff --git a/src/tools/compiletest/src/read2.rs b/src/tools/compiletest/src/read2.rs
new file mode 100644
index 000000000..7640e6517
--- /dev/null
+++ b/src/tools/compiletest/src/read2.rs
@@ -0,0 +1,317 @@
+// FIXME: This is a complete copy of `cargo/src/cargo/util/read2.rs`
+// Consider unify the read2() in libstd, cargo and this to prevent further code duplication.
+
+#[cfg(test)]
+mod tests;
+
+pub use self::imp::read2;
+use std::io::{self, Write};
+use std::mem::replace;
+use std::process::{Child, Output};
+
+pub fn read2_abbreviated(mut child: Child, filter_paths_from_len: &[String]) -> io::Result<Output> {
+ let mut stdout = ProcOutput::new();
+ let mut stderr = ProcOutput::new();
+
+ drop(child.stdin.take());
+ read2(
+ child.stdout.take().unwrap(),
+ child.stderr.take().unwrap(),
+ &mut |is_stdout, data, _| {
+ if is_stdout { &mut stdout } else { &mut stderr }.extend(data, filter_paths_from_len);
+ data.clear();
+ },
+ )?;
+ let status = child.wait()?;
+
+ Ok(Output { status, stdout: stdout.into_bytes(), stderr: stderr.into_bytes() })
+}
+
+const HEAD_LEN: usize = 160 * 1024;
+const TAIL_LEN: usize = 256 * 1024;
+
+// Whenever a path is filtered when counting the length of the output, we need to add some
+// placeholder length to ensure a compiler emitting only filtered paths doesn't cause a OOM.
+//
+// 32 was chosen semi-arbitrarily: it was the highest power of two that still allowed the test
+// suite to pass at the moment of implementing path filtering.
+const FILTERED_PATHS_PLACEHOLDER_LEN: usize = 32;
+
+enum ProcOutput {
+ Full { bytes: Vec<u8>, filtered_len: usize },
+ Abbreviated { head: Vec<u8>, skipped: usize, tail: Box<[u8]> },
+}
+
+impl ProcOutput {
+ fn new() -> Self {
+ ProcOutput::Full { bytes: Vec::new(), filtered_len: 0 }
+ }
+
+ fn extend(&mut self, data: &[u8], filter_paths_from_len: &[String]) {
+ let new_self = match *self {
+ ProcOutput::Full { ref mut bytes, ref mut filtered_len } => {
+ let old_len = bytes.len();
+ bytes.extend_from_slice(data);
+ *filtered_len += data.len();
+
+ // We had problems in the past with tests failing only in some environments,
+ // due to the length of the base path pushing the output size over the limit.
+ //
+ // To make those failures deterministic across all environments we ignore known
+ // paths when calculating the string length, while still including the full
+ // path in the output. This could result in some output being larger than the
+ // threshold, but it's better than having nondeterministic failures.
+ //
+ // The compiler emitting only excluded strings is addressed by adding a
+ // placeholder size for each excluded segment, which will eventually reach
+ // the configured threshold.
+ for path in filter_paths_from_len {
+ let path_bytes = path.as_bytes();
+ // We start matching `path_bytes - 1` into the previously loaded data,
+ // to account for the fact a path_bytes might be included across multiple
+ // `extend` calls. Starting from `- 1` avoids double-counting paths.
+ let matches = (&bytes[(old_len.saturating_sub(path_bytes.len() - 1))..])
+ .windows(path_bytes.len())
+ .filter(|window| window == &path_bytes)
+ .count();
+ *filtered_len -= matches * path_bytes.len();
+
+ // We can't just remove the length of the filtered path from the output lenght,
+ // otherwise a compiler emitting only filtered paths would OOM compiletest. Add
+ // a fixed placeholder length for each path to prevent that.
+ *filtered_len += matches * FILTERED_PATHS_PLACEHOLDER_LEN;
+ }
+
+ let new_len = bytes.len();
+ if *filtered_len <= HEAD_LEN + TAIL_LEN {
+ return;
+ }
+
+ let mut head = replace(bytes, Vec::new());
+ let mut middle = head.split_off(HEAD_LEN);
+ let tail = middle.split_off(middle.len() - TAIL_LEN).into_boxed_slice();
+ let skipped = new_len - HEAD_LEN - TAIL_LEN;
+ ProcOutput::Abbreviated { head, skipped, tail }
+ }
+ ProcOutput::Abbreviated { ref mut skipped, ref mut tail, .. } => {
+ *skipped += data.len();
+ if data.len() <= TAIL_LEN {
+ tail[..data.len()].copy_from_slice(data);
+ tail.rotate_left(data.len());
+ } else {
+ tail.copy_from_slice(&data[(data.len() - TAIL_LEN)..]);
+ }
+ return;
+ }
+ };
+ *self = new_self;
+ }
+
+ fn into_bytes(self) -> Vec<u8> {
+ match self {
+ ProcOutput::Full { bytes, .. } => bytes,
+ ProcOutput::Abbreviated { mut head, skipped, tail } => {
+ write!(&mut head, "\n\n<<<<<< SKIPPED {} BYTES >>>>>>\n\n", skipped).unwrap();
+ head.extend_from_slice(&tail);
+ head
+ }
+ }
+ }
+}
+
+#[cfg(not(any(unix, windows)))]
+mod imp {
+ use std::io::{self, Read};
+ use std::process::{ChildStderr, ChildStdout};
+
+ pub fn read2(
+ out_pipe: ChildStdout,
+ err_pipe: ChildStderr,
+ data: &mut dyn FnMut(bool, &mut Vec<u8>, bool),
+ ) -> io::Result<()> {
+ let mut buffer = Vec::new();
+ out_pipe.read_to_end(&mut buffer)?;
+ data(true, &mut buffer, true);
+ buffer.clear();
+ err_pipe.read_to_end(&mut buffer)?;
+ data(false, &mut buffer, true);
+ Ok(())
+ }
+}
+
+#[cfg(unix)]
+mod imp {
+ use std::io;
+ use std::io::prelude::*;
+ use std::mem;
+ use std::os::unix::prelude::*;
+ use std::process::{ChildStderr, ChildStdout};
+
+ pub fn read2(
+ mut out_pipe: ChildStdout,
+ mut err_pipe: ChildStderr,
+ data: &mut dyn FnMut(bool, &mut Vec<u8>, bool),
+ ) -> io::Result<()> {
+ unsafe {
+ libc::fcntl(out_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK);
+ libc::fcntl(err_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK);
+ }
+
+ let mut out_done = false;
+ let mut err_done = false;
+ let mut out = Vec::new();
+ let mut err = Vec::new();
+
+ let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() };
+ fds[0].fd = out_pipe.as_raw_fd();
+ fds[0].events = libc::POLLIN;
+ fds[1].fd = err_pipe.as_raw_fd();
+ fds[1].events = libc::POLLIN;
+ let mut nfds = 2;
+ let mut errfd = 1;
+
+ while nfds > 0 {
+ // wait for either pipe to become readable using `select`
+ let r = unsafe { libc::poll(fds.as_mut_ptr(), nfds, -1) };
+ if r == -1 {
+ let err = io::Error::last_os_error();
+ if err.kind() == io::ErrorKind::Interrupted {
+ continue;
+ }
+ return Err(err);
+ }
+
+ // Read as much as we can from each pipe, ignoring EWOULDBLOCK or
+ // EAGAIN. If we hit EOF, then this will happen because the underlying
+ // reader will return Ok(0), in which case we'll see `Ok` ourselves. In
+ // this case we flip the other fd back into blocking mode and read
+ // whatever's leftover on that file descriptor.
+ let handle = |res: io::Result<_>| match res {
+ Ok(_) => Ok(true),
+ Err(e) => {
+ if e.kind() == io::ErrorKind::WouldBlock {
+ Ok(false)
+ } else {
+ Err(e)
+ }
+ }
+ };
+ if !err_done && fds[errfd].revents != 0 && handle(err_pipe.read_to_end(&mut err))? {
+ err_done = true;
+ nfds -= 1;
+ }
+ data(false, &mut err, err_done);
+ if !out_done && fds[0].revents != 0 && handle(out_pipe.read_to_end(&mut out))? {
+ out_done = true;
+ fds[0].fd = err_pipe.as_raw_fd();
+ errfd = 0;
+ nfds -= 1;
+ }
+ data(true, &mut out, out_done);
+ }
+ Ok(())
+ }
+}
+
+#[cfg(windows)]
+mod imp {
+ use std::io;
+ use std::os::windows::prelude::*;
+ use std::process::{ChildStderr, ChildStdout};
+ use std::slice;
+
+ use miow::iocp::{CompletionPort, CompletionStatus};
+ use miow::pipe::NamedPipe;
+ use miow::Overlapped;
+ use winapi::shared::winerror::ERROR_BROKEN_PIPE;
+
+ struct Pipe<'a> {
+ dst: &'a mut Vec<u8>,
+ overlapped: Overlapped,
+ pipe: NamedPipe,
+ done: bool,
+ }
+
+ pub fn read2(
+ out_pipe: ChildStdout,
+ err_pipe: ChildStderr,
+ data: &mut dyn FnMut(bool, &mut Vec<u8>, bool),
+ ) -> io::Result<()> {
+ let mut out = Vec::new();
+ let mut err = Vec::new();
+
+ let port = CompletionPort::new(1)?;
+ port.add_handle(0, &out_pipe)?;
+ port.add_handle(1, &err_pipe)?;
+
+ unsafe {
+ let mut out_pipe = Pipe::new(out_pipe, &mut out);
+ let mut err_pipe = Pipe::new(err_pipe, &mut err);
+
+ out_pipe.read()?;
+ err_pipe.read()?;
+
+ let mut status = [CompletionStatus::zero(), CompletionStatus::zero()];
+
+ while !out_pipe.done || !err_pipe.done {
+ for status in port.get_many(&mut status, None)? {
+ if status.token() == 0 {
+ out_pipe.complete(status);
+ data(true, out_pipe.dst, out_pipe.done);
+ out_pipe.read()?;
+ } else {
+ err_pipe.complete(status);
+ data(false, err_pipe.dst, err_pipe.done);
+ err_pipe.read()?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+ }
+
+ impl<'a> Pipe<'a> {
+ unsafe fn new<P: IntoRawHandle>(p: P, dst: &'a mut Vec<u8>) -> Pipe<'a> {
+ Pipe {
+ dst: dst,
+ pipe: NamedPipe::from_raw_handle(p.into_raw_handle()),
+ overlapped: Overlapped::zero(),
+ done: false,
+ }
+ }
+
+ unsafe fn read(&mut self) -> io::Result<()> {
+ let dst = slice_to_end(self.dst);
+ match self.pipe.read_overlapped(dst, self.overlapped.raw()) {
+ Ok(_) => Ok(()),
+ Err(e) => {
+ if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
+ self.done = true;
+ Ok(())
+ } else {
+ Err(e)
+ }
+ }
+ }
+ }
+
+ unsafe fn complete(&mut self, status: &CompletionStatus) {
+ let prev = self.dst.len();
+ self.dst.set_len(prev + status.bytes_transferred() as usize);
+ if status.bytes_transferred() == 0 {
+ self.done = true;
+ }
+ }
+ }
+
+ unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
+ if v.capacity() == 0 {
+ v.reserve(16);
+ }
+ if v.capacity() == v.len() {
+ v.reserve(1);
+ }
+ slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize), v.capacity() - v.len())
+ }
+}
diff --git a/src/tools/compiletest/src/read2/tests.rs b/src/tools/compiletest/src/read2/tests.rs
new file mode 100644
index 000000000..1ca682a46
--- /dev/null
+++ b/src/tools/compiletest/src/read2/tests.rs
@@ -0,0 +1,123 @@
+use crate::read2::{ProcOutput, FILTERED_PATHS_PLACEHOLDER_LEN, HEAD_LEN, TAIL_LEN};
+
+#[test]
+fn test_abbreviate_short_string() {
+ let mut out = ProcOutput::new();
+ out.extend(b"Hello world!", &[]);
+ assert_eq!(b"Hello world!", &*out.into_bytes());
+}
+
+#[test]
+fn test_abbreviate_short_string_multiple_steps() {
+ let mut out = ProcOutput::new();
+
+ out.extend(b"Hello ", &[]);
+ out.extend(b"world!", &[]);
+
+ assert_eq!(b"Hello world!", &*out.into_bytes());
+}
+
+#[test]
+fn test_abbreviate_long_string() {
+ let mut out = ProcOutput::new();
+
+ let data = vec![b'.'; HEAD_LEN + TAIL_LEN + 16];
+ out.extend(&data, &[]);
+
+ let mut expected = vec![b'.'; HEAD_LEN];
+ expected.extend_from_slice(b"\n\n<<<<<< SKIPPED 16 BYTES >>>>>>\n\n");
+ expected.extend_from_slice(&vec![b'.'; TAIL_LEN]);
+
+ // We first check the length to avoid endless terminal output if the length differs, since
+ // `out` is hundreds of KBs in size.
+ let out = out.into_bytes();
+ assert_eq!(expected.len(), out.len());
+ assert_eq!(expected, out);
+}
+
+#[test]
+fn test_abbreviate_long_string_multiple_steps() {
+ let mut out = ProcOutput::new();
+
+ out.extend(&vec![b'.'; HEAD_LEN], &[]);
+ out.extend(&vec![b'.'; TAIL_LEN], &[]);
+ // Also test whether the rotation works
+ out.extend(&vec![b'!'; 16], &[]);
+ out.extend(&vec![b'?'; 16], &[]);
+
+ let mut expected = vec![b'.'; HEAD_LEN];
+ expected.extend_from_slice(b"\n\n<<<<<< SKIPPED 32 BYTES >>>>>>\n\n");
+ expected.extend_from_slice(&vec![b'.'; TAIL_LEN - 32]);
+ expected.extend_from_slice(&vec![b'!'; 16]);
+ expected.extend_from_slice(&vec![b'?'; 16]);
+
+ // We first check the length to avoid endless terminal output if the length differs, since
+ // `out` is hundreds of KBs in size.
+ let out = out.into_bytes();
+ assert_eq!(expected.len(), out.len());
+ assert_eq!(expected, out);
+}
+
+#[test]
+fn test_abbreviate_filterss_are_detected() {
+ let mut out = ProcOutput::new();
+ let filters = &["foo".to_string(), "quux".to_string()];
+
+ out.extend(b"Hello foo", filters);
+ // Check items from a previous extension are not double-counted.
+ out.extend(b"! This is a qu", filters);
+ // Check items are detected across extensions.
+ out.extend(b"ux.", filters);
+
+ match &out {
+ ProcOutput::Full { bytes, filtered_len } => assert_eq!(
+ *filtered_len,
+ bytes.len() + FILTERED_PATHS_PLACEHOLDER_LEN * filters.len()
+ - filters.iter().map(|i| i.len()).sum::<usize>()
+ ),
+ ProcOutput::Abbreviated { .. } => panic!("out should not be abbreviated"),
+ }
+
+ assert_eq!(b"Hello foo! This is a quux.", &*out.into_bytes());
+}
+
+#[test]
+fn test_abbreviate_filters_avoid_abbreviations() {
+ let mut out = ProcOutput::new();
+ let filters = &[std::iter::repeat('a').take(64).collect::<String>()];
+
+ let mut expected = vec![b'.'; HEAD_LEN - FILTERED_PATHS_PLACEHOLDER_LEN as usize];
+ expected.extend_from_slice(filters[0].as_bytes());
+ expected.extend_from_slice(&vec![b'.'; TAIL_LEN]);
+
+ out.extend(&expected, filters);
+
+ // We first check the length to avoid endless terminal output if the length differs, since
+ // `out` is hundreds of KBs in size.
+ let out = out.into_bytes();
+ assert_eq!(expected.len(), out.len());
+ assert_eq!(expected, out);
+}
+
+#[test]
+fn test_abbreviate_filters_can_still_cause_abbreviations() {
+ let mut out = ProcOutput::new();
+ let filters = &[std::iter::repeat('a').take(64).collect::<String>()];
+
+ let mut input = vec![b'.'; HEAD_LEN];
+ input.extend_from_slice(&vec![b'.'; TAIL_LEN]);
+ input.extend_from_slice(filters[0].as_bytes());
+
+ let mut expected = vec![b'.'; HEAD_LEN];
+ expected.extend_from_slice(b"\n\n<<<<<< SKIPPED 64 BYTES >>>>>>\n\n");
+ expected.extend_from_slice(&vec![b'.'; TAIL_LEN - 64]);
+ expected.extend_from_slice(&vec![b'a'; 64]);
+
+ out.extend(&input, filters);
+
+ // We first check the length to avoid endless terminal output if the length differs, since
+ // `out` is hundreds of KBs in size.
+ let out = out.into_bytes();
+ assert_eq!(expected.len(), out.len());
+ assert_eq!(expected, out);
+}
diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs
new file mode 100644
index 000000000..d3e5a2dd6
--- /dev/null
+++ b/src/tools/compiletest/src/runtest.rs
@@ -0,0 +1,3948 @@
+// ignore-tidy-filelength
+
+use crate::common::{expected_output_path, UI_EXTENSIONS, UI_FIXED, UI_STDERR, UI_STDOUT};
+use crate::common::{incremental_dir, output_base_dir, output_base_name, output_testname_unique};
+use crate::common::{Assembly, Incremental, JsDocTest, MirOpt, RunMake, RustdocJson, Ui};
+use crate::common::{Codegen, CodegenUnits, DebugInfo, Debugger, Rustdoc};
+use crate::common::{CompareMode, FailMode, PassMode};
+use crate::common::{Config, TestPaths};
+use crate::common::{Pretty, RunPassValgrind};
+use crate::common::{UI_RUN_STDERR, UI_RUN_STDOUT};
+use crate::compute_diff::{write_diff, write_filtered_diff};
+use crate::errors::{self, Error, ErrorKind};
+use crate::header::TestProps;
+use crate::json;
+use crate::read2::read2_abbreviated;
+use crate::util::get_pointer_width;
+use crate::util::{logv, PathBufExt};
+use crate::ColorConfig;
+use regex::{Captures, Regex};
+use rustfix::{apply_suggestions, get_suggestions_from_json, Filter};
+
+use std::collections::hash_map::DefaultHasher;
+use std::collections::{HashMap, HashSet};
+use std::env;
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, create_dir_all, File, OpenOptions};
+use std::hash::{Hash, Hasher};
+use std::io::prelude::*;
+use std::io::{self, BufReader};
+use std::path::{Path, PathBuf};
+use std::process::{Child, Command, ExitStatus, Output, Stdio};
+use std::str;
+
+use glob::glob;
+use lazy_static::lazy_static;
+use tracing::*;
+
+use crate::extract_gdb_version;
+use crate::is_android_gdb_target;
+
+mod debugger;
+use debugger::{check_debugger_output, DebuggerCommands};
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(windows)]
+fn disable_error_reporting<F: FnOnce() -> R, R>(f: F) -> R {
+ use std::sync::Mutex;
+ use winapi::um::errhandlingapi::SetErrorMode;
+ use winapi::um::winbase::SEM_NOGPFAULTERRORBOX;
+
+ lazy_static! {
+ static ref LOCK: Mutex<()> = Mutex::new(());
+ }
+ // Error mode is a global variable, so lock it so only one thread will change it
+ let _lock = LOCK.lock().unwrap();
+
+ // Tell Windows to not show any UI on errors (such as terminating abnormally).
+ // This is important for running tests, since some of them use abnormal
+ // termination by design. This mode is inherited by all child processes.
+ unsafe {
+ let old_mode = SetErrorMode(SEM_NOGPFAULTERRORBOX); // read inherited flags
+ SetErrorMode(old_mode | SEM_NOGPFAULTERRORBOX);
+ let r = f();
+ SetErrorMode(old_mode);
+ r
+ }
+}
+
+#[cfg(not(windows))]
+fn disable_error_reporting<F: FnOnce() -> R, R>(f: F) -> R {
+ f()
+}
+
+/// The name of the environment variable that holds dynamic library locations.
+pub fn dylib_env_var() -> &'static str {
+ if cfg!(windows) {
+ "PATH"
+ } else if cfg!(target_os = "macos") {
+ "DYLD_LIBRARY_PATH"
+ } else if cfg!(target_os = "haiku") {
+ "LIBRARY_PATH"
+ } else {
+ "LD_LIBRARY_PATH"
+ }
+}
+
+/// The platform-specific library name
+pub fn get_lib_name(lib: &str, dylib: bool) -> String {
+ // In some casess (e.g. MUSL), we build a static
+ // library, rather than a dynamic library.
+ // In this case, the only path we can pass
+ // with '--extern-meta' is the '.lib' file
+ if !dylib {
+ return format!("lib{}.rlib", lib);
+ }
+
+ if cfg!(windows) {
+ format!("{}.dll", lib)
+ } else if cfg!(target_os = "macos") {
+ format!("lib{}.dylib", lib)
+ } else {
+ format!("lib{}.so", lib)
+ }
+}
+
+pub fn run(config: Config, testpaths: &TestPaths, revision: Option<&str>) {
+ match &*config.target {
+ "arm-linux-androideabi"
+ | "armv7-linux-androideabi"
+ | "thumbv7neon-linux-androideabi"
+ | "aarch64-linux-android" => {
+ if !config.adb_device_status {
+ panic!("android device not available");
+ }
+ }
+
+ _ => {
+ // android has its own gdb handling
+ if config.debugger == Some(Debugger::Gdb) && config.gdb.is_none() {
+ panic!("gdb not available but debuginfo gdb debuginfo test requested");
+ }
+ }
+ }
+
+ if config.verbose {
+ // We're going to be dumping a lot of info. Start on a new line.
+ print!("\n\n");
+ }
+ debug!("running {:?}", testpaths.file.display());
+ let mut props = TestProps::from_file(&testpaths.file, revision, &config);
+ if props.incremental {
+ props.incremental_dir = Some(incremental_dir(&config, testpaths));
+ }
+
+ let cx = TestCx { config: &config, props: &props, testpaths, revision };
+ create_dir_all(&cx.output_base_dir()).unwrap();
+ if props.incremental {
+ cx.init_incremental_test();
+ }
+
+ if config.mode == Incremental {
+ // Incremental tests are special because they cannot be run in
+ // parallel.
+ assert!(!props.revisions.is_empty(), "Incremental tests require revisions.");
+ for revision in &props.revisions {
+ let mut revision_props = TestProps::from_file(&testpaths.file, Some(revision), &config);
+ revision_props.incremental_dir = props.incremental_dir.clone();
+ let rev_cx = TestCx {
+ config: &config,
+ props: &revision_props,
+ testpaths,
+ revision: Some(revision),
+ };
+ rev_cx.run_revision();
+ }
+ } else {
+ cx.run_revision();
+ }
+
+ cx.create_stamp();
+}
+
+pub fn compute_stamp_hash(config: &Config) -> String {
+ let mut hash = DefaultHasher::new();
+ config.stage_id.hash(&mut hash);
+ config.run.hash(&mut hash);
+
+ match config.debugger {
+ Some(Debugger::Cdb) => {
+ config.cdb.hash(&mut hash);
+ }
+
+ Some(Debugger::Gdb) => {
+ config.gdb.hash(&mut hash);
+ env::var_os("PATH").hash(&mut hash);
+ env::var_os("PYTHONPATH").hash(&mut hash);
+ }
+
+ Some(Debugger::Lldb) => {
+ config.python.hash(&mut hash);
+ config.lldb_python_dir.hash(&mut hash);
+ env::var_os("PATH").hash(&mut hash);
+ env::var_os("PYTHONPATH").hash(&mut hash);
+ }
+
+ None => {}
+ }
+
+ if let Ui = config.mode {
+ config.force_pass_mode.hash(&mut hash);
+ }
+
+ format!("{:x}", hash.finish())
+}
+
+#[derive(Copy, Clone)]
+struct TestCx<'test> {
+ config: &'test Config,
+ props: &'test TestProps,
+ testpaths: &'test TestPaths,
+ revision: Option<&'test str>,
+}
+
+enum ReadFrom {
+ Path,
+ Stdin(String),
+}
+
+enum TestOutput {
+ Compile,
+ Run,
+}
+
+/// Will this test be executed? Should we use `make_exe_name`?
+#[derive(Copy, Clone, PartialEq)]
+enum WillExecute {
+ Yes,
+ No,
+ Disabled,
+}
+
+/// Should `--emit metadata` be used?
+#[derive(Copy, Clone)]
+enum EmitMetadata {
+ Yes,
+ No,
+}
+
+impl<'test> TestCx<'test> {
+ /// Code executed for each revision in turn (or, if there are no
+ /// revisions, exactly once, with revision == None).
+ fn run_revision(&self) {
+ if self.props.should_ice && self.config.mode != Incremental {
+ self.fatal("cannot use should-ice in a test that is not cfail");
+ }
+ match self.config.mode {
+ RunPassValgrind => self.run_valgrind_test(),
+ Pretty => self.run_pretty_test(),
+ DebugInfo => self.run_debuginfo_test(),
+ Codegen => self.run_codegen_test(),
+ Rustdoc => self.run_rustdoc_test(),
+ RustdocJson => self.run_rustdoc_json_test(),
+ CodegenUnits => self.run_codegen_units_test(),
+ Incremental => self.run_incremental_test(),
+ RunMake => self.run_rmake_test(),
+ Ui => self.run_ui_test(),
+ MirOpt => self.run_mir_opt_test(),
+ Assembly => self.run_assembly_test(),
+ JsDocTest => self.run_js_doc_test(),
+ }
+ }
+
+ fn pass_mode(&self) -> Option<PassMode> {
+ self.props.pass_mode(self.config)
+ }
+
+ fn should_run(&self, pm: Option<PassMode>) -> WillExecute {
+ let test_should_run = match self.config.mode {
+ Ui if pm == Some(PassMode::Run) || self.props.fail_mode == Some(FailMode::Run) => true,
+ MirOpt if pm == Some(PassMode::Run) => true,
+ Ui | MirOpt => false,
+ mode => panic!("unimplemented for mode {:?}", mode),
+ };
+ if test_should_run { self.run_if_enabled() } else { WillExecute::No }
+ }
+
+ fn run_if_enabled(&self) -> WillExecute {
+ if self.config.run_enabled() { WillExecute::Yes } else { WillExecute::Disabled }
+ }
+
+ fn should_run_successfully(&self, pm: Option<PassMode>) -> bool {
+ match self.config.mode {
+ Ui | MirOpt => pm == Some(PassMode::Run),
+ mode => panic!("unimplemented for mode {:?}", mode),
+ }
+ }
+
+ fn should_compile_successfully(&self, pm: Option<PassMode>) -> bool {
+ match self.config.mode {
+ JsDocTest => true,
+ Ui => pm.is_some() || self.props.fail_mode > Some(FailMode::Build),
+ Incremental => {
+ let revision =
+ self.revision.expect("incremental tests require a list of revisions");
+ if revision.starts_with("rpass") || revision.starts_with("rfail") {
+ true
+ } else if revision.starts_with("cfail") {
+ // FIXME: would be nice if incremental revs could start with "cpass"
+ pm.is_some()
+ } else {
+ panic!("revision name must begin with rpass, rfail, or cfail");
+ }
+ }
+ mode => panic!("unimplemented for mode {:?}", mode),
+ }
+ }
+
+ fn check_if_test_should_compile(&self, proc_res: &ProcRes, pm: Option<PassMode>) {
+ if self.should_compile_successfully(pm) {
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("test compilation failed although it shouldn't!", proc_res);
+ }
+ } else {
+ if proc_res.status.success() {
+ self.fatal_proc_rec(
+ &format!("{} test compiled successfully!", self.config.mode)[..],
+ proc_res,
+ );
+ }
+
+ self.check_correct_failure_status(proc_res);
+ }
+ }
+
+ fn run_cfail_test(&self) {
+ let pm = self.pass_mode();
+ let proc_res = self.compile_test(WillExecute::No, self.should_emit_metadata(pm));
+ self.check_if_test_should_compile(&proc_res, pm);
+ self.check_no_compiler_crash(&proc_res, self.props.should_ice);
+
+ let output_to_check = self.get_output(&proc_res);
+ let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
+ if !expected_errors.is_empty() {
+ if !self.props.error_patterns.is_empty() || !self.props.regex_error_patterns.is_empty()
+ {
+ self.fatal("both error pattern and expected errors specified");
+ }
+ self.check_expected_errors(expected_errors, &proc_res);
+ } else {
+ self.check_all_error_patterns(&output_to_check, &proc_res, pm);
+ }
+ if self.props.should_ice {
+ match proc_res.status.code() {
+ Some(101) => (),
+ _ => self.fatal("expected ICE"),
+ }
+ }
+
+ self.check_forbid_output(&output_to_check, &proc_res);
+ }
+
+ fn run_rfail_test(&self) {
+ let pm = self.pass_mode();
+ let should_run = self.run_if_enabled();
+ let proc_res = self.compile_test(should_run, self.should_emit_metadata(pm));
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
+ let proc_res = self.exec_compiled_test();
+
+ // The value our Makefile configures valgrind to return on failure
+ const VALGRIND_ERR: i32 = 100;
+ if proc_res.status.code() == Some(VALGRIND_ERR) {
+ self.fatal_proc_rec("run-fail test isn't valgrind-clean!", &proc_res);
+ }
+
+ let output_to_check = self.get_output(&proc_res);
+ self.check_correct_failure_status(&proc_res);
+ self.check_all_error_patterns(&output_to_check, &proc_res, pm);
+ }
+
+ fn get_output(&self, proc_res: &ProcRes) -> String {
+ if self.props.check_stdout {
+ format!("{}{}", proc_res.stdout, proc_res.stderr)
+ } else {
+ proc_res.stderr.clone()
+ }
+ }
+
+ fn check_correct_failure_status(&self, proc_res: &ProcRes) {
+ let expected_status = Some(self.props.failure_status);
+ let received_status = proc_res.status.code();
+
+ if expected_status != received_status {
+ self.fatal_proc_rec(
+ &format!(
+ "Error: expected failure status ({:?}) but received status {:?}.",
+ expected_status, received_status
+ ),
+ proc_res,
+ );
+ }
+ }
+
+ fn run_rpass_test(&self) {
+ let emit_metadata = self.should_emit_metadata(self.pass_mode());
+ let should_run = self.run_if_enabled();
+ let proc_res = self.compile_test(should_run, emit_metadata);
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
+ // FIXME(#41968): Move this check to tidy?
+ let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
+ assert!(
+ expected_errors.is_empty(),
+ "run-pass tests with expected warnings should be moved to ui/"
+ );
+
+ let proc_res = self.exec_compiled_test();
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("test run failed!", &proc_res);
+ }
+ }
+
+ fn run_valgrind_test(&self) {
+ assert!(self.revision.is_none(), "revisions not relevant here");
+
+ if self.config.valgrind_path.is_none() {
+ assert!(!self.config.force_valgrind);
+ return self.run_rpass_test();
+ }
+
+ let should_run = self.run_if_enabled();
+ let mut proc_res = self.compile_test(should_run, EmitMetadata::No);
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
+ let mut new_config = self.config.clone();
+ new_config.runtool = new_config.valgrind_path.clone();
+ let new_cx = TestCx { config: &new_config, ..*self };
+ proc_res = new_cx.exec_compiled_test();
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("test run failed!", &proc_res);
+ }
+ }
+
+ fn run_pretty_test(&self) {
+ if self.props.pp_exact.is_some() {
+ logv(self.config, "testing for exact pretty-printing".to_owned());
+ } else {
+ logv(self.config, "testing for converging pretty-printing".to_owned());
+ }
+
+ let rounds = match self.props.pp_exact {
+ Some(_) => 1,
+ None => 2,
+ };
+
+ let src = fs::read_to_string(&self.testpaths.file).unwrap();
+ let mut srcs = vec![src];
+
+ let mut round = 0;
+ while round < rounds {
+ logv(
+ self.config,
+ format!("pretty-printing round {} revision {:?}", round, self.revision),
+ );
+ let read_from =
+ if round == 0 { ReadFrom::Path } else { ReadFrom::Stdin(srcs[round].to_owned()) };
+
+ let proc_res = self.print_source(read_from, &self.props.pretty_mode);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec(
+ &format!(
+ "pretty-printing failed in round {} revision {:?}",
+ round, self.revision
+ ),
+ &proc_res,
+ );
+ }
+
+ let ProcRes { stdout, .. } = proc_res;
+ srcs.push(stdout);
+ round += 1;
+ }
+
+ let mut expected = match self.props.pp_exact {
+ Some(ref file) => {
+ let filepath = self.testpaths.file.parent().unwrap().join(file);
+ fs::read_to_string(&filepath).unwrap()
+ }
+ None => srcs[srcs.len() - 2].clone(),
+ };
+ let mut actual = srcs[srcs.len() - 1].clone();
+
+ if self.props.pp_exact.is_some() {
+ // Now we have to care about line endings
+ let cr = "\r".to_owned();
+ actual = actual.replace(&cr, "");
+ expected = expected.replace(&cr, "");
+ }
+
+ if !self.config.bless {
+ self.compare_source(&expected, &actual);
+ } else if expected != actual {
+ let filepath_buf;
+ let filepath = match &self.props.pp_exact {
+ Some(file) => {
+ filepath_buf = self.testpaths.file.parent().unwrap().join(file);
+ &filepath_buf
+ }
+ None => &self.testpaths.file,
+ };
+ fs::write(filepath, &actual).unwrap();
+ }
+
+ // If we're only making sure that the output matches then just stop here
+ if self.props.pretty_compare_only {
+ return;
+ }
+
+ // Finally, let's make sure it actually appears to remain valid code
+ let proc_res = self.typecheck_source(actual);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("pretty-printed source does not typecheck", &proc_res);
+ }
+
+ if !self.props.pretty_expanded {
+ return;
+ }
+
+ // additionally, run `-Zunpretty=expanded` and try to build it.
+ let proc_res = self.print_source(ReadFrom::Path, "expanded");
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("pretty-printing (expanded) failed", &proc_res);
+ }
+
+ let ProcRes { stdout: expanded_src, .. } = proc_res;
+ let proc_res = self.typecheck_source(expanded_src);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("pretty-printed source (expanded) does not typecheck", &proc_res);
+ }
+ }
+
+ fn print_source(&self, read_from: ReadFrom, pretty_type: &str) -> ProcRes {
+ let aux_dir = self.aux_output_dir_name();
+ let input: &str = match read_from {
+ ReadFrom::Stdin(_) => "-",
+ ReadFrom::Path => self.testpaths.file.to_str().unwrap(),
+ };
+
+ let mut rustc = Command::new(&self.config.rustc_path);
+ rustc
+ .arg(input)
+ .args(&["-Z", &format!("unpretty={}", pretty_type)])
+ .args(&["--target", &self.config.target])
+ .arg("-L")
+ .arg(&aux_dir)
+ .args(&self.props.compile_flags)
+ .envs(self.props.rustc_env.clone());
+ self.maybe_add_external_args(
+ &mut rustc,
+ self.split_maybe_args(&self.config.target_rustcflags),
+ );
+
+ let src = match read_from {
+ ReadFrom::Stdin(src) => Some(src),
+ ReadFrom::Path => None,
+ };
+
+ self.compose_and_run(
+ rustc,
+ self.config.compile_lib_path.to_str().unwrap(),
+ Some(aux_dir.to_str().unwrap()),
+ src,
+ )
+ }
+
+ fn compare_source(&self, expected: &str, actual: &str) {
+ if expected != actual {
+ self.fatal(&format!(
+ "pretty-printed source does not match expected source\n\
+ expected:\n\
+ ------------------------------------------\n\
+ {}\n\
+ ------------------------------------------\n\
+ actual:\n\
+ ------------------------------------------\n\
+ {}\n\
+ ------------------------------------------\n\
+ diff:\n\
+ ------------------------------------------\n\
+ {}\n",
+ expected,
+ actual,
+ write_diff(expected, actual, 3),
+ ));
+ }
+ }
+
+ fn set_revision_flags(&self, cmd: &mut Command) {
+ if let Some(revision) = self.revision {
+ // Normalize revisions to be lowercase and replace `-`s with `_`s.
+ // Otherwise the `--cfg` flag is not valid.
+ let normalized_revision = revision.to_lowercase().replace("-", "_");
+ cmd.args(&["--cfg", &normalized_revision]);
+ }
+ }
+
+ fn typecheck_source(&self, src: String) -> ProcRes {
+ let mut rustc = Command::new(&self.config.rustc_path);
+
+ let out_dir = self.output_base_name().with_extension("pretty-out");
+ let _ = fs::remove_dir_all(&out_dir);
+ create_dir_all(&out_dir).unwrap();
+
+ let target = if self.props.force_host { &*self.config.host } else { &*self.config.target };
+
+ let aux_dir = self.aux_output_dir_name();
+
+ rustc
+ .arg("-")
+ .arg("-Zno-codegen")
+ .arg("--out-dir")
+ .arg(&out_dir)
+ .arg(&format!("--target={}", target))
+ .arg("-L")
+ .arg(&self.config.build_base)
+ .arg("-L")
+ .arg(aux_dir);
+ self.set_revision_flags(&mut rustc);
+ self.maybe_add_external_args(
+ &mut rustc,
+ self.split_maybe_args(&self.config.target_rustcflags),
+ );
+ rustc.args(&self.props.compile_flags);
+
+ self.compose_and_run_compiler(rustc, Some(src))
+ }
+
+ fn run_debuginfo_test(&self) {
+ match self.config.debugger.unwrap() {
+ Debugger::Cdb => self.run_debuginfo_cdb_test(),
+ Debugger::Gdb => self.run_debuginfo_gdb_test(),
+ Debugger::Lldb => self.run_debuginfo_lldb_test(),
+ }
+ }
+
+ fn run_debuginfo_cdb_test(&self) {
+ let config = Config {
+ target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
+ host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
+ ..self.config.clone()
+ };
+
+ let test_cx = TestCx { config: &config, ..*self };
+
+ test_cx.run_debuginfo_cdb_test_no_opt();
+ }
+
+ fn run_debuginfo_cdb_test_no_opt(&self) {
+ let exe_file = self.make_exe_name();
+
+ // Existing PDB files are update in-place. When changing the debuginfo
+ // the compiler generates for something, this can lead to the situation
+ // where both the old and the new version of the debuginfo for the same
+ // type is present in the PDB, which is very confusing.
+ // Therefore we delete any existing PDB file before compiling the test
+ // case.
+ // FIXME: If can reliably detect that MSVC's link.exe is used, then
+ // passing `/INCREMENTAL:NO` might be a cleaner way to do this.
+ let pdb_file = exe_file.with_extension(".pdb");
+ if pdb_file.exists() {
+ std::fs::remove_file(pdb_file).unwrap();
+ }
+
+ // compile test file (it should have 'compile-flags:-g' in the header)
+ let should_run = self.run_if_enabled();
+ let compile_result = self.compile_test(should_run, EmitMetadata::No);
+ if !compile_result.status.success() {
+ self.fatal_proc_rec("compilation failed!", &compile_result);
+ }
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
+ let prefixes = {
+ static PREFIXES: &[&str] = &["cdb", "cdbg"];
+ // No "native rust support" variation for CDB yet.
+ PREFIXES
+ };
+
+ // Parse debugger commands etc from test files
+ let DebuggerCommands { commands, check_lines, breakpoint_lines, .. } =
+ match DebuggerCommands::parse_from(
+ &self.testpaths.file,
+ self.config,
+ prefixes,
+ self.revision,
+ ) {
+ Ok(cmds) => cmds,
+ Err(e) => self.fatal(&e),
+ };
+
+ // https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-commands
+ let mut script_str = String::with_capacity(2048);
+ script_str.push_str("version\n"); // List CDB (and more) version info in test output
+ script_str.push_str(".nvlist\n"); // List loaded `*.natvis` files, bulk of custom MSVC debug
+
+ // If a .js file exists next to the source file being tested, then this is a JavaScript
+ // debugging extension that needs to be loaded.
+ let mut js_extension = self.testpaths.file.clone();
+ js_extension.set_extension("cdb.js");
+ if js_extension.exists() {
+ script_str.push_str(&format!(".scriptload \"{}\"\n", js_extension.to_string_lossy()));
+ }
+
+ // Set breakpoints on every line that contains the string "#break"
+ let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
+ for line in &breakpoint_lines {
+ script_str.push_str(&format!("bp `{}:{}`\n", source_file_name, line));
+ }
+
+ // Append the other `cdb-command:`s
+ for line in &commands {
+ script_str.push_str(line);
+ script_str.push_str("\n");
+ }
+
+ script_str.push_str("\nqq\n"); // Quit the debugger (including remote debugger, if any)
+
+ // Write the script into a file
+ debug!("script_str = {}", script_str);
+ self.dump_output_file(&script_str, "debugger.script");
+ let debugger_script = self.make_out_name("debugger.script");
+
+ let cdb_path = &self.config.cdb.as_ref().unwrap();
+ let mut cdb = Command::new(cdb_path);
+ cdb.arg("-lines") // Enable source line debugging.
+ .arg("-cf")
+ .arg(&debugger_script)
+ .arg(&exe_file);
+
+ let debugger_run_result = self.compose_and_run(
+ cdb,
+ self.config.run_lib_path.to_str().unwrap(),
+ None, // aux_path
+ None, // input
+ );
+
+ if !debugger_run_result.status.success() {
+ self.fatal_proc_rec("Error while running CDB", &debugger_run_result);
+ }
+
+ if let Err(e) = check_debugger_output(&debugger_run_result, &check_lines) {
+ self.fatal_proc_rec(&e, &debugger_run_result);
+ }
+ }
+
+ fn run_debuginfo_gdb_test(&self) {
+ let config = Config {
+ target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
+ host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
+ ..self.config.clone()
+ };
+
+ let test_cx = TestCx { config: &config, ..*self };
+
+ test_cx.run_debuginfo_gdb_test_no_opt();
+ }
+
+ fn run_debuginfo_gdb_test_no_opt(&self) {
+ let prefixes = if self.config.gdb_native_rust {
+ // GDB with Rust
+ static PREFIXES: &[&str] = &["gdb", "gdbr"];
+ println!("NOTE: compiletest thinks it is using GDB with native rust support");
+ PREFIXES
+ } else {
+ // Generic GDB
+ static PREFIXES: &[&str] = &["gdb", "gdbg"];
+ println!("NOTE: compiletest thinks it is using GDB without native rust support");
+ PREFIXES
+ };
+
+ let DebuggerCommands { commands, check_lines, breakpoint_lines } =
+ match DebuggerCommands::parse_from(
+ &self.testpaths.file,
+ self.config,
+ prefixes,
+ self.revision,
+ ) {
+ Ok(cmds) => cmds,
+ Err(e) => self.fatal(&e),
+ };
+ let mut cmds = commands.join("\n");
+
+ // compile test file (it should have 'compile-flags:-g' in the header)
+ let should_run = self.run_if_enabled();
+ let compiler_run_result = self.compile_test(should_run, EmitMetadata::No);
+ if !compiler_run_result.status.success() {
+ self.fatal_proc_rec("compilation failed!", &compiler_run_result);
+ }
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
+ let exe_file = self.make_exe_name();
+
+ let debugger_run_result;
+ if is_android_gdb_target(&self.config.target) {
+ cmds = cmds.replace("run", "continue");
+
+ let tool_path = match self.config.android_cross_path.to_str() {
+ Some(x) => x.to_owned(),
+ None => self.fatal("cannot find android cross path"),
+ };
+
+ // write debugger script
+ let mut script_str = String::with_capacity(2048);
+ script_str.push_str(&format!("set charset {}\n", Self::charset()));
+ script_str.push_str(&format!("set sysroot {}\n", tool_path));
+ script_str.push_str(&format!("file {}\n", exe_file.to_str().unwrap()));
+ script_str.push_str("target remote :5039\n");
+ script_str.push_str(&format!(
+ "set solib-search-path \
+ ./{}/stage2/lib/rustlib/{}/lib/\n",
+ self.config.host, self.config.target
+ ));
+ for line in &breakpoint_lines {
+ script_str.push_str(
+ &format!(
+ "break {:?}:{}\n",
+ self.testpaths.file.file_name().unwrap().to_string_lossy(),
+ *line
+ )[..],
+ );
+ }
+ script_str.push_str(&cmds);
+ script_str.push_str("\nquit\n");
+
+ debug!("script_str = {}", script_str);
+ self.dump_output_file(&script_str, "debugger.script");
+
+ let adb_path = &self.config.adb_path;
+
+ Command::new(adb_path)
+ .arg("push")
+ .arg(&exe_file)
+ .arg(&self.config.adb_test_dir)
+ .status()
+ .unwrap_or_else(|_| panic!("failed to exec `{:?}`", adb_path));
+
+ Command::new(adb_path)
+ .args(&["forward", "tcp:5039", "tcp:5039"])
+ .status()
+ .unwrap_or_else(|_| panic!("failed to exec `{:?}`", adb_path));
+
+ let adb_arg = format!(
+ "export LD_LIBRARY_PATH={}; \
+ gdbserver{} :5039 {}/{}",
+ self.config.adb_test_dir.clone(),
+ if self.config.target.contains("aarch64") { "64" } else { "" },
+ self.config.adb_test_dir.clone(),
+ exe_file.file_name().unwrap().to_str().unwrap()
+ );
+
+ debug!("adb arg: {}", adb_arg);
+ let mut adb = Command::new(adb_path)
+ .args(&["shell", &adb_arg])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::inherit())
+ .spawn()
+ .unwrap_or_else(|_| panic!("failed to exec `{:?}`", adb_path));
+
+ // Wait for the gdbserver to print out "Listening on port ..."
+ // at which point we know that it's started and then we can
+ // execute the debugger below.
+ let mut stdout = BufReader::new(adb.stdout.take().unwrap());
+ let mut line = String::new();
+ loop {
+ line.truncate(0);
+ stdout.read_line(&mut line).unwrap();
+ if line.starts_with("Listening on port 5039") {
+ break;
+ }
+ }
+ drop(stdout);
+
+ let mut debugger_script = OsString::from("-command=");
+ debugger_script.push(self.make_out_name("debugger.script"));
+ let debugger_opts: &[&OsStr] =
+ &["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script];
+
+ let gdb_path = self.config.gdb.as_ref().unwrap();
+ let Output { status, stdout, stderr } = Command::new(&gdb_path)
+ .args(debugger_opts)
+ .output()
+ .unwrap_or_else(|_| panic!("failed to exec `{:?}`", gdb_path));
+ let cmdline = {
+ let mut gdb = Command::new(&format!("{}-gdb", self.config.target));
+ gdb.args(debugger_opts);
+ let cmdline = self.make_cmdline(&gdb, "");
+ logv(self.config, format!("executing {}", cmdline));
+ cmdline
+ };
+
+ debugger_run_result = ProcRes {
+ status,
+ stdout: String::from_utf8(stdout).unwrap(),
+ stderr: String::from_utf8(stderr).unwrap(),
+ cmdline,
+ };
+ if adb.kill().is_err() {
+ println!("Adb process is already finished.");
+ }
+ } else {
+ let rust_src_root =
+ self.config.find_rust_src_root().expect("Could not find Rust source root");
+ let rust_pp_module_rel_path = Path::new("./src/etc");
+ let rust_pp_module_abs_path =
+ rust_src_root.join(rust_pp_module_rel_path).to_str().unwrap().to_owned();
+ // write debugger script
+ let mut script_str = String::with_capacity(2048);
+ script_str.push_str(&format!("set charset {}\n", Self::charset()));
+ script_str.push_str("show version\n");
+
+ match self.config.gdb_version {
+ Some(version) => {
+ println!("NOTE: compiletest thinks it is using GDB version {}", version);
+
+ if version > extract_gdb_version("7.4").unwrap() {
+ // Add the directory containing the pretty printers to
+ // GDB's script auto loading safe path
+ script_str.push_str(&format!(
+ "add-auto-load-safe-path {}\n",
+ rust_pp_module_abs_path.replace(r"\", r"\\")
+ ));
+
+ let output_base_dir = self.output_base_dir().to_str().unwrap().to_owned();
+
+ // Add the directory containing the output binary to
+ // include embedded pretty printers to GDB's script
+ // auto loading safe path
+ script_str.push_str(&format!(
+ "add-auto-load-safe-path {}\n",
+ output_base_dir.replace(r"\", r"\\")
+ ));
+ }
+ }
+ _ => {
+ println!(
+ "NOTE: compiletest does not know which version of \
+ GDB it is using"
+ );
+ }
+ }
+
+ // The following line actually doesn't have to do anything with
+ // pretty printing, it just tells GDB to print values on one line:
+ script_str.push_str("set print pretty off\n");
+
+ // Add the pretty printer directory to GDB's source-file search path
+ script_str
+ .push_str(&format!("directory {}\n", rust_pp_module_abs_path.replace(r"\", r"\\")));
+
+ // Load the target executable
+ script_str
+ .push_str(&format!("file {}\n", exe_file.to_str().unwrap().replace(r"\", r"\\")));
+
+ // Force GDB to print values in the Rust format.
+ if self.config.gdb_native_rust {
+ script_str.push_str("set language rust\n");
+ }
+
+ // Add line breakpoints
+ for line in &breakpoint_lines {
+ script_str.push_str(&format!(
+ "break '{}':{}\n",
+ self.testpaths.file.file_name().unwrap().to_string_lossy(),
+ *line
+ ));
+ }
+
+ script_str.push_str(&cmds);
+ script_str.push_str("\nquit\n");
+
+ debug!("script_str = {}", script_str);
+ self.dump_output_file(&script_str, "debugger.script");
+
+ let mut debugger_script = OsString::from("-command=");
+ debugger_script.push(self.make_out_name("debugger.script"));
+
+ let debugger_opts: &[&OsStr] =
+ &["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script];
+
+ let mut gdb = Command::new(self.config.gdb.as_ref().unwrap());
+ gdb.args(debugger_opts).env("PYTHONPATH", rust_pp_module_abs_path);
+
+ debugger_run_result =
+ self.compose_and_run(gdb, self.config.run_lib_path.to_str().unwrap(), None, None);
+ }
+
+ if !debugger_run_result.status.success() {
+ self.fatal_proc_rec("gdb failed to execute", &debugger_run_result);
+ }
+
+ if let Err(e) = check_debugger_output(&debugger_run_result, &check_lines) {
+ self.fatal_proc_rec(&e, &debugger_run_result);
+ }
+ }
+
+ fn run_debuginfo_lldb_test(&self) {
+ if self.config.lldb_python_dir.is_none() {
+ self.fatal("Can't run LLDB test because LLDB's python path is not set.");
+ }
+
+ let config = Config {
+ target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
+ host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
+ ..self.config.clone()
+ };
+
+ let test_cx = TestCx { config: &config, ..*self };
+
+ test_cx.run_debuginfo_lldb_test_no_opt();
+ }
+
+ fn run_debuginfo_lldb_test_no_opt(&self) {
+ // compile test file (it should have 'compile-flags:-g' in the header)
+ let should_run = self.run_if_enabled();
+ let compile_result = self.compile_test(should_run, EmitMetadata::No);
+ if !compile_result.status.success() {
+ self.fatal_proc_rec("compilation failed!", &compile_result);
+ }
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
+ let exe_file = self.make_exe_name();
+
+ match self.config.lldb_version {
+ Some(ref version) => {
+ println!("NOTE: compiletest thinks it is using LLDB version {}", version);
+ }
+ _ => {
+ println!(
+ "NOTE: compiletest does not know which version of \
+ LLDB it is using"
+ );
+ }
+ }
+
+ let prefixes = if self.config.lldb_native_rust {
+ static PREFIXES: &[&str] = &["lldb", "lldbr"];
+ println!("NOTE: compiletest thinks it is using LLDB with native rust support");
+ PREFIXES
+ } else {
+ static PREFIXES: &[&str] = &["lldb", "lldbg"];
+ println!("NOTE: compiletest thinks it is using LLDB without native rust support");
+ PREFIXES
+ };
+
+ // Parse debugger commands etc from test files
+ let DebuggerCommands { commands, check_lines, breakpoint_lines, .. } =
+ match DebuggerCommands::parse_from(
+ &self.testpaths.file,
+ self.config,
+ prefixes,
+ self.revision,
+ ) {
+ Ok(cmds) => cmds,
+ Err(e) => self.fatal(&e),
+ };
+
+ // Write debugger script:
+ // We don't want to hang when calling `quit` while the process is still running
+ let mut script_str = String::from("settings set auto-confirm true\n");
+
+ // Make LLDB emit its version, so we have it documented in the test output
+ script_str.push_str("version\n");
+
+ // Switch LLDB into "Rust mode"
+ let rust_src_root =
+ self.config.find_rust_src_root().expect("Could not find Rust source root");
+ let rust_pp_module_rel_path = Path::new("./src/etc/lldb_lookup.py");
+ let rust_pp_module_abs_path =
+ rust_src_root.join(rust_pp_module_rel_path).to_str().unwrap().to_owned();
+
+ let rust_type_regexes = vec![
+ "^(alloc::([a-z_]+::)+)String$",
+ "^&(mut )?str$",
+ "^&(mut )?\\[.+\\]$",
+ "^(std::ffi::([a-z_]+::)+)OsString$",
+ "^(alloc::([a-z_]+::)+)Vec<.+>$",
+ "^(alloc::([a-z_]+::)+)VecDeque<.+>$",
+ "^(alloc::([a-z_]+::)+)BTreeSet<.+>$",
+ "^(alloc::([a-z_]+::)+)BTreeMap<.+>$",
+ "^(std::collections::([a-z_]+::)+)HashMap<.+>$",
+ "^(std::collections::([a-z_]+::)+)HashSet<.+>$",
+ "^(alloc::([a-z_]+::)+)Rc<.+>$",
+ "^(alloc::([a-z_]+::)+)Arc<.+>$",
+ "^(core::([a-z_]+::)+)Cell<.+>$",
+ "^(core::([a-z_]+::)+)Ref<.+>$",
+ "^(core::([a-z_]+::)+)RefMut<.+>$",
+ "^(core::([a-z_]+::)+)RefCell<.+>$",
+ ];
+
+ script_str
+ .push_str(&format!("command script import {}\n", &rust_pp_module_abs_path[..])[..]);
+ script_str.push_str("type synthetic add -l lldb_lookup.synthetic_lookup -x '.*' ");
+ script_str.push_str("--category Rust\n");
+ for type_regex in rust_type_regexes {
+ script_str.push_str("type summary add -F lldb_lookup.summary_lookup -e -x -h ");
+ script_str.push_str(&format!("'{}' ", type_regex));
+ script_str.push_str("--category Rust\n");
+ }
+ script_str.push_str("type category enable Rust\n");
+
+ // Set breakpoints on every line that contains the string "#break"
+ let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
+ for line in &breakpoint_lines {
+ script_str.push_str(&format!(
+ "breakpoint set --file '{}' --line {}\n",
+ source_file_name, line
+ ));
+ }
+
+ // Append the other commands
+ for line in &commands {
+ script_str.push_str(line);
+ script_str.push_str("\n");
+ }
+
+ // Finally, quit the debugger
+ script_str.push_str("\nquit\n");
+
+ // Write the script into a file
+ debug!("script_str = {}", script_str);
+ self.dump_output_file(&script_str, "debugger.script");
+ let debugger_script = self.make_out_name("debugger.script");
+
+ // Let LLDB execute the script via lldb_batchmode.py
+ let debugger_run_result = self.run_lldb(&exe_file, &debugger_script, &rust_src_root);
+
+ if !debugger_run_result.status.success() {
+ self.fatal_proc_rec("Error while running LLDB", &debugger_run_result);
+ }
+
+ if let Err(e) = check_debugger_output(&debugger_run_result, &check_lines) {
+ self.fatal_proc_rec(&e, &debugger_run_result);
+ }
+ }
+
+ fn run_lldb(
+ &self,
+ test_executable: &Path,
+ debugger_script: &Path,
+ rust_src_root: &Path,
+ ) -> ProcRes {
+ // Prepare the lldb_batchmode which executes the debugger script
+ let lldb_script_path = rust_src_root.join("src/etc/lldb_batchmode.py");
+ self.cmd2procres(
+ Command::new(&self.config.python)
+ .arg(&lldb_script_path)
+ .arg(test_executable)
+ .arg(debugger_script)
+ .env("PYTHONUNBUFFERED", "1") // Help debugging #78665
+ .env("PYTHONPATH", self.config.lldb_python_dir.as_ref().unwrap()),
+ )
+ }
+
+ fn cmd2procres(&self, cmd: &mut Command) -> ProcRes {
+ let (status, out, err) = match cmd.output() {
+ Ok(Output { status, stdout, stderr }) => {
+ (status, String::from_utf8(stdout).unwrap(), String::from_utf8(stderr).unwrap())
+ }
+ Err(e) => self.fatal(&format!(
+ "Failed to setup Python process for \
+ LLDB script: {}",
+ e
+ )),
+ };
+
+ self.dump_output(&out, &err);
+ ProcRes { status, stdout: out, stderr: err, cmdline: format!("{:?}", cmd) }
+ }
+
+ fn cleanup_debug_info_options(&self, options: &Option<String>) -> Option<String> {
+ if options.is_none() {
+ return None;
+ }
+
+ // Remove options that are either unwanted (-O) or may lead to duplicates due to RUSTFLAGS.
+ let options_to_remove = ["-O".to_owned(), "-g".to_owned(), "--debuginfo".to_owned()];
+ let new_options = self
+ .split_maybe_args(options)
+ .into_iter()
+ .filter(|x| !options_to_remove.contains(x))
+ .collect::<Vec<String>>();
+
+ Some(new_options.join(" "))
+ }
+
+ fn maybe_add_external_args(&self, cmd: &mut Command, args: Vec<String>) {
+ // Filter out the arguments that should not be added by runtest here.
+ //
+ // Notable use-cases are: do not add our optimisation flag if
+ // `compile-flags: -Copt-level=x` and similar for debug-info level as well.
+ const OPT_FLAGS: &[&str] = &["-O", "-Copt-level=", /*-C<space>*/ "opt-level="];
+ const DEBUG_FLAGS: &[&str] = &["-g", "-Cdebuginfo=", /*-C<space>*/ "debuginfo="];
+
+ // FIXME: ideally we would "just" check the `cmd` itself, but it does not allow inspecting
+ // its arguments. They need to be collected separately. For now I cannot be bothered to
+ // implement this the "right" way.
+ let have_opt_flag =
+ self.props.compile_flags.iter().any(|arg| OPT_FLAGS.iter().any(|f| arg.starts_with(f)));
+ let have_debug_flag = self
+ .props
+ .compile_flags
+ .iter()
+ .any(|arg| DEBUG_FLAGS.iter().any(|f| arg.starts_with(f)));
+
+ for arg in args {
+ if OPT_FLAGS.iter().any(|f| arg.starts_with(f)) && have_opt_flag {
+ continue;
+ }
+ if DEBUG_FLAGS.iter().any(|f| arg.starts_with(f)) && have_debug_flag {
+ continue;
+ }
+ cmd.arg(arg);
+ }
+ }
+
+ fn check_all_error_patterns(
+ &self,
+ output_to_check: &str,
+ proc_res: &ProcRes,
+ pm: Option<PassMode>,
+ ) {
+ if self.props.error_patterns.is_empty() && self.props.regex_error_patterns.is_empty() {
+ if pm.is_some() {
+ // FIXME(#65865)
+ return;
+ } else {
+ self.fatal(&format!(
+ "no error pattern specified in {:?}",
+ self.testpaths.file.display()
+ ));
+ }
+ }
+
+ let mut missing_patterns: Vec<String> = Vec::new();
+
+ self.check_error_patterns(output_to_check, &mut missing_patterns);
+ self.check_regex_error_patterns(output_to_check, proc_res, &mut missing_patterns);
+
+ if missing_patterns.is_empty() {
+ return;
+ }
+
+ if missing_patterns.len() == 1 {
+ self.fatal_proc_rec(
+ &format!("error pattern '{}' not found!", missing_patterns[0]),
+ proc_res,
+ );
+ } else {
+ for pattern in missing_patterns {
+ self.error(&format!("error pattern '{}' not found!", pattern));
+ }
+ self.fatal_proc_rec("multiple error patterns not found", proc_res);
+ }
+ }
+
+ fn check_error_patterns(&self, output_to_check: &str, missing_patterns: &mut Vec<String>) {
+ debug!("check_error_patterns");
+ for pattern in &self.props.error_patterns {
+ if output_to_check.contains(pattern.trim()) {
+ debug!("found error pattern {}", pattern);
+ } else {
+ missing_patterns.push(pattern.to_string());
+ }
+ }
+ }
+
+ fn check_regex_error_patterns(
+ &self,
+ output_to_check: &str,
+ proc_res: &ProcRes,
+ missing_patterns: &mut Vec<String>,
+ ) {
+ debug!("check_regex_error_patterns");
+
+ for pattern in &self.props.regex_error_patterns {
+ let pattern = pattern.trim();
+ let re = match Regex::new(pattern) {
+ Ok(re) => re,
+ Err(err) => {
+ self.fatal_proc_rec(
+ &format!("invalid regex error pattern '{}': {:?}", pattern, err),
+ proc_res,
+ );
+ }
+ };
+ if re.is_match(output_to_check) {
+ debug!("found regex error pattern {}", pattern);
+ } else {
+ missing_patterns.push(pattern.to_string());
+ }
+ }
+ }
+
+ fn check_no_compiler_crash(&self, proc_res: &ProcRes, should_ice: bool) {
+ match proc_res.status.code() {
+ Some(101) if !should_ice => {
+ self.fatal_proc_rec("compiler encountered internal error", proc_res)
+ }
+ None => self.fatal_proc_rec("compiler terminated by signal", proc_res),
+ _ => (),
+ }
+ }
+
+ fn check_forbid_output(&self, output_to_check: &str, proc_res: &ProcRes) {
+ for pat in &self.props.forbid_output {
+ if output_to_check.contains(pat) {
+ self.fatal_proc_rec("forbidden pattern found in compiler output", proc_res);
+ }
+ }
+ }
+
+ fn check_expected_errors(&self, expected_errors: Vec<errors::Error>, proc_res: &ProcRes) {
+ debug!(
+ "check_expected_errors: expected_errors={:?} proc_res.status={:?}",
+ expected_errors, proc_res.status
+ );
+ if proc_res.status.success()
+ && expected_errors.iter().any(|x| x.kind == Some(ErrorKind::Error))
+ {
+ self.fatal_proc_rec("process did not return an error status", proc_res);
+ }
+
+ if self.props.known_bug {
+ if !expected_errors.is_empty() {
+ self.fatal_proc_rec(
+ "`known_bug` tests should not have an expected errors",
+ proc_res,
+ );
+ }
+ return;
+ }
+
+ // On Windows, keep all '\' path separators to match the paths reported in the JSON output
+ // from the compiler
+ let os_file_name = self.testpaths.file.display().to_string();
+
+ // on windows, translate all '\' path separators to '/'
+ let file_name = format!("{}", self.testpaths.file.display()).replace(r"\", "/");
+
+ // If the testcase being checked contains at least one expected "help"
+ // message, then we'll ensure that all "help" messages are expected.
+ // Otherwise, all "help" messages reported by the compiler will be ignored.
+ // This logic also applies to "note" messages.
+ let expect_help = expected_errors.iter().any(|ee| ee.kind == Some(ErrorKind::Help));
+ let expect_note = expected_errors.iter().any(|ee| ee.kind == Some(ErrorKind::Note));
+
+ // Parse the JSON output from the compiler and extract out the messages.
+ let actual_errors = json::parse_output(&os_file_name, &proc_res.stderr, proc_res);
+ let mut unexpected = Vec::new();
+ let mut found = vec![false; expected_errors.len()];
+ for actual_error in &actual_errors {
+ let opt_index =
+ expected_errors.iter().enumerate().position(|(index, expected_error)| {
+ !found[index]
+ && actual_error.line_num == expected_error.line_num
+ && (expected_error.kind.is_none()
+ || actual_error.kind == expected_error.kind)
+ && actual_error.msg.contains(&expected_error.msg)
+ });
+
+ match opt_index {
+ Some(index) => {
+ // found a match, everybody is happy
+ assert!(!found[index]);
+ found[index] = true;
+ }
+
+ None => {
+ // If the test is a known bug, don't require that the error is annotated
+ if self.is_unexpected_compiler_message(actual_error, expect_help, expect_note) {
+ self.error(&format!(
+ "{}:{}: unexpected {}: '{}'",
+ file_name,
+ actual_error.line_num,
+ actual_error
+ .kind
+ .as_ref()
+ .map_or(String::from("message"), |k| k.to_string()),
+ actual_error.msg
+ ));
+ unexpected.push(actual_error);
+ }
+ }
+ }
+ }
+
+ let mut not_found = Vec::new();
+ // anything not yet found is a problem
+ for (index, expected_error) in expected_errors.iter().enumerate() {
+ if !found[index] {
+ self.error(&format!(
+ "{}:{}: expected {} not found: {}",
+ file_name,
+ expected_error.line_num,
+ expected_error.kind.as_ref().map_or("message".into(), |k| k.to_string()),
+ expected_error.msg
+ ));
+ not_found.push(expected_error);
+ }
+ }
+
+ if !unexpected.is_empty() || !not_found.is_empty() {
+ self.error(&format!(
+ "{} unexpected errors found, {} expected errors not found",
+ unexpected.len(),
+ not_found.len()
+ ));
+ println!("status: {}\ncommand: {}", proc_res.status, proc_res.cmdline);
+ if !unexpected.is_empty() {
+ println!("unexpected errors (from JSON output): {:#?}\n", unexpected);
+ }
+ if !not_found.is_empty() {
+ println!("not found errors (from test file): {:#?}\n", not_found);
+ }
+ panic!();
+ }
+ }
+
+ /// Returns `true` if we should report an error about `actual_error`,
+ /// which did not match any of the expected error. We always require
+ /// errors/warnings to be explicitly listed, but only require
+ /// helps/notes if there are explicit helps/notes given.
+ fn is_unexpected_compiler_message(
+ &self,
+ actual_error: &Error,
+ expect_help: bool,
+ expect_note: bool,
+ ) -> bool {
+ match actual_error.kind {
+ Some(ErrorKind::Help) => expect_help,
+ Some(ErrorKind::Note) => expect_note,
+ Some(ErrorKind::Error) | Some(ErrorKind::Warning) => true,
+ Some(ErrorKind::Suggestion) | None => false,
+ }
+ }
+
+ fn should_emit_metadata(&self, pm: Option<PassMode>) -> EmitMetadata {
+ match (pm, self.props.fail_mode, self.config.mode) {
+ (Some(PassMode::Check), ..) | (_, Some(FailMode::Check), Ui) => EmitMetadata::Yes,
+ _ => EmitMetadata::No,
+ }
+ }
+
+ fn compile_test(&self, will_execute: WillExecute, emit_metadata: EmitMetadata) -> ProcRes {
+ self.compile_test_general(will_execute, emit_metadata, self.props.local_pass_mode())
+ }
+
+ fn compile_test_general(
+ &self,
+ will_execute: WillExecute,
+ emit_metadata: EmitMetadata,
+ local_pm: Option<PassMode>,
+ ) -> ProcRes {
+ // Only use `make_exe_name` when the test ends up being executed.
+ let output_file = match will_execute {
+ WillExecute::Yes => TargetLocation::ThisFile(self.make_exe_name()),
+ WillExecute::No | WillExecute::Disabled => {
+ TargetLocation::ThisDirectory(self.output_base_dir())
+ }
+ };
+
+ let allow_unused = match self.config.mode {
+ Ui => {
+ // UI tests tend to have tons of unused code as
+ // it's just testing various pieces of the compile, but we don't
+ // want to actually assert warnings about all this code. Instead
+ // let's just ignore unused code warnings by defaults and tests
+ // can turn it back on if needed.
+ if !self.is_rustdoc()
+ // Note that we use the local pass mode here as we don't want
+ // to set unused to allow if we've overridden the pass mode
+ // via command line flags.
+ && local_pm != Some(PassMode::Run)
+ {
+ AllowUnused::Yes
+ } else {
+ AllowUnused::No
+ }
+ }
+ _ => AllowUnused::No,
+ };
+
+ let mut rustc =
+ self.make_compile_args(&self.testpaths.file, output_file, emit_metadata, allow_unused);
+
+ rustc.arg("-L").arg(&self.aux_output_dir_name());
+
+ self.compose_and_run_compiler(rustc, None)
+ }
+
+ fn document(&self, out_dir: &Path) -> ProcRes {
+ if self.props.build_aux_docs {
+ for rel_ab in &self.props.aux_builds {
+ let aux_testpaths = self.compute_aux_test_paths(rel_ab);
+ let aux_props =
+ self.props.from_aux_file(&aux_testpaths.file, self.revision, self.config);
+ let aux_cx = TestCx {
+ config: self.config,
+ props: &aux_props,
+ testpaths: &aux_testpaths,
+ revision: self.revision,
+ };
+ // Create the directory for the stdout/stderr files.
+ create_dir_all(aux_cx.output_base_dir()).unwrap();
+ let auxres = aux_cx.document(out_dir);
+ if !auxres.status.success() {
+ return auxres;
+ }
+ }
+ }
+
+ let aux_dir = self.aux_output_dir_name();
+
+ let rustdoc_path = self.config.rustdoc_path.as_ref().expect("--rustdoc-path not passed");
+ let mut rustdoc = Command::new(rustdoc_path);
+
+ rustdoc
+ .arg("-L")
+ .arg(self.config.run_lib_path.to_str().unwrap())
+ .arg("-L")
+ .arg(aux_dir)
+ .arg("-o")
+ .arg(out_dir)
+ .arg("--deny")
+ .arg("warnings")
+ .arg(&self.testpaths.file)
+ .args(&self.props.compile_flags);
+
+ if self.config.mode == RustdocJson {
+ rustdoc.arg("--output-format").arg("json").arg("-Zunstable-options");
+ }
+
+ if let Some(ref linker) = self.config.linker {
+ rustdoc.arg(format!("-Clinker={}", linker));
+ }
+
+ self.compose_and_run_compiler(rustdoc, None)
+ }
+
+ fn exec_compiled_test(&self) -> ProcRes {
+ let env = &self.props.exec_env;
+
+ let proc_res = match &*self.config.target {
+ // This is pretty similar to below, we're transforming:
+ //
+ // program arg1 arg2
+ //
+ // into
+ //
+ // remote-test-client run program 2 support-lib.so support-lib2.so arg1 arg2
+ //
+ // The test-client program will upload `program` to the emulator
+ // along with all other support libraries listed (in this case
+ // `support-lib.so` and `support-lib2.so`. It will then execute
+ // the program on the emulator with the arguments specified
+ // (in the environment we give the process) and then report back
+ // the same result.
+ _ if self.config.remote_test_client.is_some() => {
+ let aux_dir = self.aux_output_dir_name();
+ let ProcArgs { prog, args } = self.make_run_args();
+ let mut support_libs = Vec::new();
+ if let Ok(entries) = aux_dir.read_dir() {
+ for entry in entries {
+ let entry = entry.unwrap();
+ if !entry.path().is_file() {
+ continue;
+ }
+ support_libs.push(entry.path());
+ }
+ }
+ let mut test_client =
+ Command::new(self.config.remote_test_client.as_ref().unwrap());
+ test_client
+ .args(&["run", &support_libs.len().to_string(), &prog])
+ .args(support_libs)
+ .args(args)
+ .envs(env.clone());
+ self.compose_and_run(
+ test_client,
+ self.config.run_lib_path.to_str().unwrap(),
+ Some(aux_dir.to_str().unwrap()),
+ None,
+ )
+ }
+ _ if self.config.target.contains("vxworks") => {
+ let aux_dir = self.aux_output_dir_name();
+ let ProcArgs { prog, args } = self.make_run_args();
+ let mut wr_run = Command::new("wr-run");
+ wr_run.args(&[&prog]).args(args).envs(env.clone());
+ self.compose_and_run(
+ wr_run,
+ self.config.run_lib_path.to_str().unwrap(),
+ Some(aux_dir.to_str().unwrap()),
+ None,
+ )
+ }
+ _ => {
+ let aux_dir = self.aux_output_dir_name();
+ let ProcArgs { prog, args } = self.make_run_args();
+ let mut program = Command::new(&prog);
+ program.args(args).current_dir(&self.output_base_dir()).envs(env.clone());
+ self.compose_and_run(
+ program,
+ self.config.run_lib_path.to_str().unwrap(),
+ Some(aux_dir.to_str().unwrap()),
+ None,
+ )
+ }
+ };
+
+ if proc_res.status.success() {
+ // delete the executable after running it to save space.
+ // it is ok if the deletion failed.
+ let _ = fs::remove_file(self.make_exe_name());
+ }
+
+ proc_res
+ }
+
+ /// For each `aux-build: foo/bar` annotation, we check to find the
+ /// file in an `auxiliary` directory relative to the test itself.
+ fn compute_aux_test_paths(&self, rel_ab: &str) -> TestPaths {
+ let test_ab = self
+ .testpaths
+ .file
+ .parent()
+ .expect("test file path has no parent")
+ .join("auxiliary")
+ .join(rel_ab);
+ if !test_ab.exists() {
+ self.fatal(&format!("aux-build `{}` source not found", test_ab.display()))
+ }
+
+ TestPaths {
+ file: test_ab,
+ relative_dir: self
+ .testpaths
+ .relative_dir
+ .join(self.output_testname_unique())
+ .join("auxiliary")
+ .join(rel_ab)
+ .parent()
+ .expect("aux-build path has no parent")
+ .to_path_buf(),
+ }
+ }
+
+ fn is_vxworks_pure_static(&self) -> bool {
+ if self.config.target.contains("vxworks") {
+ match env::var("RUST_VXWORKS_TEST_DYLINK") {
+ Ok(s) => s != "1",
+ _ => true,
+ }
+ } else {
+ false
+ }
+ }
+
+ fn is_vxworks_pure_dynamic(&self) -> bool {
+ self.config.target.contains("vxworks") && !self.is_vxworks_pure_static()
+ }
+
+ fn build_all_auxiliary(&self, rustc: &mut Command) -> PathBuf {
+ let aux_dir = self.aux_output_dir_name();
+
+ if !self.props.aux_builds.is_empty() {
+ let _ = fs::remove_dir_all(&aux_dir);
+ create_dir_all(&aux_dir).unwrap();
+ }
+
+ for rel_ab in &self.props.aux_builds {
+ self.build_auxiliary(rel_ab, &aux_dir);
+ }
+
+ for (aux_name, aux_path) in &self.props.aux_crates {
+ let is_dylib = self.build_auxiliary(&aux_path, &aux_dir);
+ let lib_name =
+ get_lib_name(&aux_path.trim_end_matches(".rs").replace('-', "_"), is_dylib);
+ rustc.arg("--extern").arg(format!("{}={}/{}", aux_name, aux_dir.display(), lib_name));
+ }
+
+ aux_dir
+ }
+
+ fn compose_and_run_compiler(&self, mut rustc: Command, input: Option<String>) -> ProcRes {
+ let aux_dir = self.build_all_auxiliary(&mut rustc);
+ self.props.unset_rustc_env.clone().iter().fold(&mut rustc, |rustc, v| rustc.env_remove(v));
+ rustc.envs(self.props.rustc_env.clone());
+ self.compose_and_run(
+ rustc,
+ self.config.compile_lib_path.to_str().unwrap(),
+ Some(aux_dir.to_str().unwrap()),
+ input,
+ )
+ }
+
+ /// Builds an aux dependency.
+ ///
+ /// Returns whether or not it is a dylib.
+ fn build_auxiliary(&self, source_path: &str, aux_dir: &Path) -> bool {
+ let aux_testpaths = self.compute_aux_test_paths(source_path);
+ let aux_props = self.props.from_aux_file(&aux_testpaths.file, self.revision, self.config);
+ let aux_output = TargetLocation::ThisDirectory(self.aux_output_dir_name());
+ let aux_cx = TestCx {
+ config: self.config,
+ props: &aux_props,
+ testpaths: &aux_testpaths,
+ revision: self.revision,
+ };
+ // Create the directory for the stdout/stderr files.
+ create_dir_all(aux_cx.output_base_dir()).unwrap();
+ let input_file = &aux_testpaths.file;
+ let mut aux_rustc =
+ aux_cx.make_compile_args(input_file, aux_output, EmitMetadata::No, AllowUnused::No);
+
+ for key in &aux_props.unset_rustc_env {
+ aux_rustc.env_remove(key);
+ }
+ aux_rustc.envs(aux_props.rustc_env.clone());
+
+ let (dylib, crate_type) = if aux_props.no_prefer_dynamic {
+ (true, None)
+ } else if self.config.target.contains("emscripten")
+ || (self.config.target.contains("musl")
+ && !aux_props.force_host
+ && !self.config.host.contains("musl"))
+ || self.config.target.contains("wasm32")
+ || self.config.target.contains("nvptx")
+ || self.is_vxworks_pure_static()
+ || self.config.target.contains("sgx")
+ || self.config.target.contains("bpf")
+ {
+ // We primarily compile all auxiliary libraries as dynamic libraries
+ // to avoid code size bloat and large binaries as much as possible
+ // for the test suite (otherwise including libstd statically in all
+ // executables takes up quite a bit of space).
+ //
+ // For targets like MUSL or Emscripten, however, there is no support for
+ // dynamic libraries so we just go back to building a normal library. Note,
+ // however, that for MUSL if the library is built with `force_host` then
+ // it's ok to be a dylib as the host should always support dylibs.
+ (false, Some("lib"))
+ } else {
+ (true, Some("dylib"))
+ };
+
+ if let Some(crate_type) = crate_type {
+ aux_rustc.args(&["--crate-type", crate_type]);
+ }
+
+ aux_rustc.arg("-L").arg(&aux_dir);
+
+ let auxres = aux_cx.compose_and_run(
+ aux_rustc,
+ aux_cx.config.compile_lib_path.to_str().unwrap(),
+ Some(aux_dir.to_str().unwrap()),
+ None,
+ );
+ if !auxres.status.success() {
+ self.fatal_proc_rec(
+ &format!(
+ "auxiliary build of {:?} failed to compile: ",
+ aux_testpaths.file.display()
+ ),
+ &auxres,
+ );
+ }
+ dylib
+ }
+
+ fn read2_abbreviated(&self, child: Child) -> Output {
+ let mut filter_paths_from_len = Vec::new();
+ let mut add_path = |path: &Path| {
+ let path = path.display().to_string();
+ let windows = path.replace("\\", "\\\\");
+ if windows != path {
+ filter_paths_from_len.push(windows);
+ }
+ filter_paths_from_len.push(path);
+ };
+
+ // List of paths that will not be measured when determining whether the output is larger
+ // than the output truncation threshold.
+ //
+ // Note: avoid adding a subdirectory of an already filtered directory here, otherwise the
+ // same slice of text will be double counted and the truncation might not happen.
+ add_path(&self.config.src_base);
+ add_path(&self.config.build_base);
+
+ read2_abbreviated(child, &filter_paths_from_len).expect("failed to read output")
+ }
+
+ fn compose_and_run(
+ &self,
+ mut command: Command,
+ lib_path: &str,
+ aux_path: Option<&str>,
+ input: Option<String>,
+ ) -> ProcRes {
+ let cmdline = {
+ let cmdline = self.make_cmdline(&command, lib_path);
+ logv(self.config, format!("executing {}", cmdline));
+ cmdline
+ };
+
+ command.stdout(Stdio::piped()).stderr(Stdio::piped()).stdin(Stdio::piped());
+
+ // Need to be sure to put both the lib_path and the aux path in the dylib
+ // search path for the child.
+ let mut path =
+ env::split_paths(&env::var_os(dylib_env_var()).unwrap_or_default()).collect::<Vec<_>>();
+ if let Some(p) = aux_path {
+ path.insert(0, PathBuf::from(p))
+ }
+ path.insert(0, PathBuf::from(lib_path));
+
+ // Add the new dylib search path var
+ let newpath = env::join_paths(&path).unwrap();
+ command.env(dylib_env_var(), newpath);
+
+ let mut child = disable_error_reporting(|| command.spawn())
+ .unwrap_or_else(|_| panic!("failed to exec `{:?}`", &command));
+ if let Some(input) = input {
+ child.stdin.as_mut().unwrap().write_all(input.as_bytes()).unwrap();
+ }
+
+ let Output { status, stdout, stderr } = self.read2_abbreviated(child);
+
+ let result = ProcRes {
+ status,
+ stdout: String::from_utf8_lossy(&stdout).into_owned(),
+ stderr: String::from_utf8_lossy(&stderr).into_owned(),
+ cmdline,
+ };
+
+ self.dump_output(&result.stdout, &result.stderr);
+
+ result
+ }
+
+ fn is_rustdoc(&self) -> bool {
+ self.config.src_base.ends_with("rustdoc-ui")
+ || self.config.src_base.ends_with("rustdoc-js")
+ || self.config.src_base.ends_with("rustdoc-json")
+ }
+
+ fn make_compile_args(
+ &self,
+ input_file: &Path,
+ output_file: TargetLocation,
+ emit_metadata: EmitMetadata,
+ allow_unused: AllowUnused,
+ ) -> Command {
+ let is_aux = input_file.components().map(|c| c.as_os_str()).any(|c| c == "auxiliary");
+ let is_rustdoc = self.is_rustdoc() && !is_aux;
+ let mut rustc = if !is_rustdoc {
+ Command::new(&self.config.rustc_path)
+ } else {
+ Command::new(&self.config.rustdoc_path.clone().expect("no rustdoc built yet"))
+ };
+ rustc.arg(input_file);
+
+ // Use a single thread for efficiency and a deterministic error message order
+ rustc.arg("-Zthreads=1");
+
+ // Optionally prevent default --target if specified in test compile-flags.
+ let custom_target = self.props.compile_flags.iter().any(|x| x.starts_with("--target"));
+
+ if !custom_target {
+ let target =
+ if self.props.force_host { &*self.config.host } else { &*self.config.target };
+
+ rustc.arg(&format!("--target={}", target));
+ }
+ self.set_revision_flags(&mut rustc);
+
+ if !is_rustdoc {
+ if let Some(ref incremental_dir) = self.props.incremental_dir {
+ rustc.args(&["-C", &format!("incremental={}", incremental_dir.display())]);
+ rustc.args(&["-Z", "incremental-verify-ich"]);
+ }
+
+ if self.config.mode == CodegenUnits {
+ rustc.args(&["-Z", "human_readable_cgu_names"]);
+ }
+ }
+
+ if self.config.optimize_tests && !is_rustdoc {
+ match self.config.mode {
+ Ui => {
+ // If optimize-tests is true we still only want to optimize tests that actually get
+ // executed and that don't specify their own optimization levels.
+ // Note: aux libs don't have a pass-mode, so they won't get optimized
+ // unless compile-flags are set in the aux file.
+ if self.config.optimize_tests
+ && self.props.pass_mode(&self.config) == Some(PassMode::Run)
+ && !self
+ .props
+ .compile_flags
+ .iter()
+ .any(|arg| arg == "-O" || arg.contains("opt-level"))
+ {
+ rustc.arg("-O");
+ }
+ }
+ DebugInfo => { /* debuginfo tests must be unoptimized */ }
+ _ => {
+ rustc.arg("-O");
+ }
+ }
+ }
+
+ match self.config.mode {
+ Incremental => {
+ // If we are extracting and matching errors in the new
+ // fashion, then you want JSON mode. Old-skool error
+ // patterns still match the raw compiler output.
+ if self.props.error_patterns.is_empty()
+ && self.props.regex_error_patterns.is_empty()
+ {
+ rustc.args(&["--error-format", "json"]);
+ rustc.args(&["--json", "future-incompat"]);
+ }
+ rustc.arg("-Zui-testing");
+ rustc.arg("-Zdeduplicate-diagnostics=no");
+ }
+ Ui => {
+ if !self.props.compile_flags.iter().any(|s| s.starts_with("--error-format")) {
+ rustc.args(&["--error-format", "json"]);
+ rustc.args(&["--json", "future-incompat"]);
+ }
+ rustc.arg("-Ccodegen-units=1");
+ rustc.arg("-Zui-testing");
+ rustc.arg("-Zdeduplicate-diagnostics=no");
+ // FIXME: use this for other modes too, for perf?
+ rustc.arg("-Cstrip=debuginfo");
+ }
+ MirOpt => {
+ rustc.args(&[
+ "-Copt-level=1",
+ "-Zdump-mir=all",
+ "-Zvalidate-mir",
+ "-Zdump-mir-exclude-pass-number",
+ "-Zmir-pretty-relative-line-numbers=yes",
+ ]);
+ if let Some(pass) = &self.props.mir_unit_test {
+ rustc.args(&["-Zmir-opt-level=0", &format!("-Zmir-enable-passes=+{}", pass)]);
+ } else {
+ rustc.arg("-Zmir-opt-level=4");
+ }
+
+ let mir_dump_dir = self.get_mir_dump_dir();
+ let _ = fs::remove_dir_all(&mir_dump_dir);
+ create_dir_all(mir_dump_dir.as_path()).unwrap();
+ let mut dir_opt = "-Zdump-mir-dir=".to_string();
+ dir_opt.push_str(mir_dump_dir.to_str().unwrap());
+ debug!("dir_opt: {:?}", dir_opt);
+
+ rustc.arg(dir_opt);
+ }
+ RunPassValgrind | Pretty | DebugInfo | Codegen | Rustdoc | RustdocJson | RunMake
+ | CodegenUnits | JsDocTest | Assembly => {
+ // do not use JSON output
+ }
+ }
+
+ if let (false, EmitMetadata::Yes) = (is_rustdoc, emit_metadata) {
+ rustc.args(&["--emit", "metadata"]);
+ }
+
+ if !is_rustdoc {
+ if self.config.target == "wasm32-unknown-unknown" || self.is_vxworks_pure_static() {
+ // rustc.arg("-g"); // get any backtrace at all on errors
+ } else if !self.props.no_prefer_dynamic {
+ rustc.args(&["-C", "prefer-dynamic"]);
+ }
+ }
+
+ match output_file {
+ TargetLocation::ThisFile(path) => {
+ rustc.arg("-o").arg(path);
+ }
+ TargetLocation::ThisDirectory(path) => {
+ if is_rustdoc {
+ // `rustdoc` uses `-o` for the output directory.
+ rustc.arg("-o").arg(path);
+ } else {
+ rustc.arg("--out-dir").arg(path);
+ }
+ }
+ }
+
+ match self.config.compare_mode {
+ Some(CompareMode::Polonius) => {
+ rustc.args(&["-Zpolonius"]);
+ }
+ Some(CompareMode::Chalk) => {
+ rustc.args(&["-Zchalk"]);
+ }
+ Some(CompareMode::SplitDwarf) => {
+ rustc.args(&["-Csplit-debuginfo=unpacked", "-Zunstable-options"]);
+ }
+ Some(CompareMode::SplitDwarfSingle) => {
+ rustc.args(&["-Csplit-debuginfo=packed", "-Zunstable-options"]);
+ }
+ None => {}
+ }
+
+ // Add `-A unused` before `config` flags and in-test (`props`) flags, so that they can
+ // overwrite this.
+ if let AllowUnused::Yes = allow_unused {
+ rustc.args(&["-A", "unused"]);
+ }
+
+ if self.props.force_host {
+ self.maybe_add_external_args(
+ &mut rustc,
+ self.split_maybe_args(&self.config.host_rustcflags),
+ );
+ } else {
+ self.maybe_add_external_args(
+ &mut rustc,
+ self.split_maybe_args(&self.config.target_rustcflags),
+ );
+ if !is_rustdoc {
+ if let Some(ref linker) = self.config.linker {
+ rustc.arg(format!("-Clinker={}", linker));
+ }
+ }
+ }
+
+ // Use dynamic musl for tests because static doesn't allow creating dylibs
+ if self.config.host.contains("musl") || self.is_vxworks_pure_dynamic() {
+ rustc.arg("-Ctarget-feature=-crt-static");
+ }
+
+ rustc.args(&self.props.compile_flags);
+
+ rustc
+ }
+
+ fn make_exe_name(&self) -> PathBuf {
+ // Using a single letter here to keep the path length down for
+ // Windows. Some test names get very long. rustc creates `rcgu`
+ // files with the module name appended to it which can more than
+ // double the length.
+ let mut f = self.output_base_dir().join("a");
+ // FIXME: This is using the host architecture exe suffix, not target!
+ if self.config.target.contains("emscripten") {
+ f = f.with_extra_extension("js");
+ } else if self.config.target.contains("wasm32") {
+ f = f.with_extra_extension("wasm");
+ } else if self.config.target.contains("spirv") {
+ f = f.with_extra_extension("spv");
+ } else if !env::consts::EXE_SUFFIX.is_empty() {
+ f = f.with_extra_extension(env::consts::EXE_SUFFIX);
+ }
+ f
+ }
+
+ fn make_run_args(&self) -> ProcArgs {
+ // If we've got another tool to run under (valgrind),
+ // then split apart its command
+ let mut args = self.split_maybe_args(&self.config.runtool);
+
+ // If this is emscripten, then run tests under nodejs
+ if self.config.target.contains("emscripten") {
+ if let Some(ref p) = self.config.nodejs {
+ args.push(p.clone());
+ } else {
+ self.fatal("no NodeJS binary found (--nodejs)");
+ }
+ // If this is otherwise wasm, then run tests under nodejs with our
+ // shim
+ } else if self.config.target.contains("wasm32") {
+ if let Some(ref p) = self.config.nodejs {
+ args.push(p.clone());
+ } else {
+ self.fatal("no NodeJS binary found (--nodejs)");
+ }
+
+ let src = self
+ .config
+ .src_base
+ .parent()
+ .unwrap() // chop off `ui`
+ .parent()
+ .unwrap() // chop off `test`
+ .parent()
+ .unwrap(); // chop off `src`
+ args.push(src.join("src/etc/wasm32-shim.js").display().to_string());
+ }
+
+ let exe_file = self.make_exe_name();
+
+ // FIXME (#9639): This needs to handle non-utf8 paths
+ args.push(exe_file.to_str().unwrap().to_owned());
+
+ // Add the arguments in the run_flags directive
+ args.extend(self.split_maybe_args(&self.props.run_flags));
+
+ let prog = args.remove(0);
+ ProcArgs { prog, args }
+ }
+
+ fn split_maybe_args(&self, argstr: &Option<String>) -> Vec<String> {
+ match *argstr {
+ Some(ref s) => s
+ .split(' ')
+ .filter_map(|s| {
+ if s.chars().all(|c| c.is_whitespace()) { None } else { Some(s.to_owned()) }
+ })
+ .collect(),
+ None => Vec::new(),
+ }
+ }
+
+ fn make_cmdline(&self, command: &Command, libpath: &str) -> String {
+ use crate::util;
+
+ // Linux and mac don't require adjusting the library search path
+ if cfg!(unix) {
+ format!("{:?}", command)
+ } else {
+ // Build the LD_LIBRARY_PATH variable as it would be seen on the command line
+ // for diagnostic purposes
+ fn lib_path_cmd_prefix(path: &str) -> String {
+ format!("{}=\"{}\"", util::lib_path_env_var(), util::make_new_path(path))
+ }
+
+ format!("{} {:?}", lib_path_cmd_prefix(libpath), command)
+ }
+ }
+
+ fn dump_output(&self, out: &str, err: &str) {
+ let revision = if let Some(r) = self.revision { format!("{}.", r) } else { String::new() };
+
+ self.dump_output_file(out, &format!("{}out", revision));
+ self.dump_output_file(err, &format!("{}err", revision));
+ self.maybe_dump_to_stdout(out, err);
+ }
+
+ fn dump_output_file(&self, out: &str, extension: &str) {
+ let outfile = self.make_out_name(extension);
+ fs::write(&outfile, out).unwrap();
+ }
+
+ /// Creates a filename for output with the given extension.
+ /// E.g., `/.../testname.revision.mode/testname.extension`.
+ fn make_out_name(&self, extension: &str) -> PathBuf {
+ self.output_base_name().with_extension(extension)
+ }
+
+ /// Gets the directory where auxiliary files are written.
+ /// E.g., `/.../testname.revision.mode/auxiliary/`.
+ fn aux_output_dir_name(&self) -> PathBuf {
+ self.output_base_dir()
+ .join("auxiliary")
+ .with_extra_extension(self.config.mode.disambiguator())
+ }
+
+ /// Generates a unique name for the test, such as `testname.revision.mode`.
+ fn output_testname_unique(&self) -> PathBuf {
+ output_testname_unique(self.config, self.testpaths, self.safe_revision())
+ }
+
+ /// The revision, ignored for incremental compilation since it wants all revisions in
+ /// the same directory.
+ fn safe_revision(&self) -> Option<&str> {
+ if self.config.mode == Incremental { None } else { self.revision }
+ }
+
+ /// Gets the absolute path to the directory where all output for the given
+ /// test/revision should reside.
+ /// E.g., `/path/to/build/host-triple/test/ui/relative/testname.revision.mode/`.
+ fn output_base_dir(&self) -> PathBuf {
+ output_base_dir(self.config, self.testpaths, self.safe_revision())
+ }
+
+ /// Gets the absolute path to the base filename used as output for the given
+ /// test/revision.
+ /// E.g., `/.../relative/testname.revision.mode/testname`.
+ fn output_base_name(&self) -> PathBuf {
+ output_base_name(self.config, self.testpaths, self.safe_revision())
+ }
+
+ fn maybe_dump_to_stdout(&self, out: &str, err: &str) {
+ if self.config.verbose {
+ println!("------stdout------------------------------");
+ println!("{}", out);
+ println!("------stderr------------------------------");
+ println!("{}", err);
+ println!("------------------------------------------");
+ }
+ }
+
+ fn error(&self, err: &str) {
+ match self.revision {
+ Some(rev) => println!("\nerror in revision `{}`: {}", rev, err),
+ None => println!("\nerror: {}", err),
+ }
+ }
+
+ fn fatal(&self, err: &str) -> ! {
+ self.error(err);
+ error!("fatal error, panic: {:?}", err);
+ panic!("fatal error");
+ }
+
+ fn fatal_proc_rec(&self, err: &str, proc_res: &ProcRes) -> ! {
+ self.error(err);
+ proc_res.fatal(None, || ());
+ }
+
+ fn fatal_proc_rec_with_ctx(
+ &self,
+ err: &str,
+ proc_res: &ProcRes,
+ on_failure: impl FnOnce(Self),
+ ) -> ! {
+ self.error(err);
+ proc_res.fatal(None, || on_failure(*self));
+ }
+
+ // codegen tests (using FileCheck)
+
+ fn compile_test_and_save_ir(&self) -> ProcRes {
+ let aux_dir = self.aux_output_dir_name();
+
+ let output_file = TargetLocation::ThisDirectory(self.output_base_dir());
+ let input_file = &self.testpaths.file;
+ let mut rustc =
+ self.make_compile_args(input_file, output_file, EmitMetadata::No, AllowUnused::No);
+ rustc.arg("-L").arg(aux_dir).arg("--emit=llvm-ir");
+
+ self.compose_and_run_compiler(rustc, None)
+ }
+
+ fn compile_test_and_save_assembly(&self) -> (ProcRes, PathBuf) {
+ // This works with both `--emit asm` (as default output name for the assembly)
+ // and `ptx-linker` because the latter can write output at requested location.
+ let output_path = self.output_base_name().with_extension("s");
+
+ let output_file = TargetLocation::ThisFile(output_path.clone());
+ let input_file = &self.testpaths.file;
+ let mut rustc =
+ self.make_compile_args(input_file, output_file, EmitMetadata::No, AllowUnused::No);
+
+ rustc.arg("-L").arg(self.aux_output_dir_name());
+
+ match self.props.assembly_output.as_ref().map(AsRef::as_ref) {
+ Some("emit-asm") => {
+ rustc.arg("--emit=asm");
+ }
+
+ Some("ptx-linker") => {
+ // No extra flags needed.
+ }
+
+ Some(_) => self.fatal("unknown 'assembly-output' header"),
+ None => self.fatal("missing 'assembly-output' header"),
+ }
+
+ (self.compose_and_run_compiler(rustc, None), output_path)
+ }
+
+ fn verify_with_filecheck(&self, output: &Path) -> ProcRes {
+ let mut filecheck = Command::new(self.config.llvm_filecheck.as_ref().unwrap());
+ filecheck.arg("--input-file").arg(output).arg(&self.testpaths.file);
+ // It would be more appropriate to make most of the arguments configurable through
+ // a comment-attribute similar to `compile-flags`. For example, --check-prefixes is a very
+ // useful flag.
+ //
+ // For now, though…
+ let prefix_for_target =
+ if self.config.target.contains("msvc") { "MSVC" } else { "NONMSVC" };
+ let prefixes = if let Some(rev) = self.revision {
+ format!("CHECK,{},{}", prefix_for_target, rev)
+ } else {
+ format!("CHECK,{}", prefix_for_target)
+ };
+ if self.config.llvm_version.unwrap_or(0) >= 130000 {
+ filecheck.args(&["--allow-unused-prefixes", "--check-prefixes", &prefixes]);
+ } else {
+ filecheck.args(&["--check-prefixes", &prefixes]);
+ }
+ self.compose_and_run(filecheck, "", None, None)
+ }
+
+ fn run_codegen_test(&self) {
+ if self.config.llvm_filecheck.is_none() {
+ self.fatal("missing --llvm-filecheck");
+ }
+
+ let proc_res = self.compile_test_and_save_ir();
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ let output_path = self.output_base_name().with_extension("ll");
+ let proc_res = self.verify_with_filecheck(&output_path);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
+ }
+ }
+
+ fn run_assembly_test(&self) {
+ if self.config.llvm_filecheck.is_none() {
+ self.fatal("missing --llvm-filecheck");
+ }
+
+ let (proc_res, output_path) = self.compile_test_and_save_assembly();
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ let proc_res = self.verify_with_filecheck(&output_path);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
+ }
+ }
+
+ fn charset() -> &'static str {
+ // FreeBSD 10.1 defaults to GDB 6.1.1 which doesn't support "auto" charset
+ if cfg!(target_os = "freebsd") { "ISO-8859-1" } else { "UTF-8" }
+ }
+
+ fn run_rustdoc_test(&self) {
+ assert!(self.revision.is_none(), "revisions not relevant here");
+
+ let out_dir = self.output_base_dir();
+ let _ = fs::remove_dir_all(&out_dir);
+ create_dir_all(&out_dir).unwrap();
+
+ let proc_res = self.document(&out_dir);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("rustdoc failed!", &proc_res);
+ }
+
+ if self.props.check_test_line_numbers_match {
+ self.check_rustdoc_test_option(proc_res);
+ } else {
+ let root = self.config.find_rust_src_root().unwrap();
+ let mut cmd = Command::new(&self.config.python);
+ cmd.arg(root.join("src/etc/htmldocck.py")).arg(&out_dir).arg(&self.testpaths.file);
+ if self.config.bless {
+ cmd.arg("--bless");
+ }
+ let res = self.cmd2procres(&mut cmd);
+ if !res.status.success() {
+ self.fatal_proc_rec_with_ctx("htmldocck failed!", &res, |mut this| {
+ this.compare_to_default_rustdoc(&out_dir)
+ });
+ }
+ }
+ }
+
+ fn compare_to_default_rustdoc(&mut self, out_dir: &Path) {
+ if !self.config.has_tidy {
+ return;
+ }
+ println!("info: generating a diff against nightly rustdoc");
+
+ let suffix =
+ self.safe_revision().map_or("nightly".into(), |path| path.to_owned() + "-nightly");
+ let compare_dir = output_base_dir(self.config, self.testpaths, Some(&suffix));
+ // Don't give an error if the directory didn't already exist
+ let _ = fs::remove_dir_all(&compare_dir);
+ create_dir_all(&compare_dir).unwrap();
+
+ // We need to create a new struct for the lifetimes on `config` to work.
+ let new_rustdoc = TestCx {
+ config: &Config {
+ // FIXME: use beta or a user-specified rustdoc instead of
+ // hardcoding the default toolchain
+ rustdoc_path: Some("rustdoc".into()),
+ // Needed for building auxiliary docs below
+ rustc_path: "rustc".into(),
+ ..self.config.clone()
+ },
+ ..*self
+ };
+
+ let output_file = TargetLocation::ThisDirectory(new_rustdoc.aux_output_dir_name());
+ let mut rustc = new_rustdoc.make_compile_args(
+ &new_rustdoc.testpaths.file,
+ output_file,
+ EmitMetadata::No,
+ AllowUnused::Yes,
+ );
+ rustc.arg("-L").arg(&new_rustdoc.aux_output_dir_name());
+ new_rustdoc.build_all_auxiliary(&mut rustc);
+
+ let proc_res = new_rustdoc.document(&compare_dir);
+ if !proc_res.status.success() {
+ eprintln!("failed to run nightly rustdoc");
+ return;
+ }
+
+ #[rustfmt::skip]
+ let tidy_args = [
+ "--indent", "yes",
+ "--indent-spaces", "2",
+ "--wrap", "0",
+ "--show-warnings", "no",
+ "--markup", "yes",
+ "--quiet", "yes",
+ "-modify",
+ ];
+ let tidy_dir = |dir| {
+ for entry in walkdir::WalkDir::new(dir) {
+ let entry = entry.expect("failed to read file");
+ if entry.file_type().is_file()
+ && entry.path().extension().and_then(|p| p.to_str()) == Some("html".into())
+ {
+ let status =
+ Command::new("tidy").args(&tidy_args).arg(entry.path()).status().unwrap();
+ // `tidy` returns 1 if it modified the file.
+ assert!(status.success() || status.code() == Some(1));
+ }
+ }
+ };
+ tidy_dir(out_dir);
+ tidy_dir(&compare_dir);
+
+ let pager = {
+ let output = Command::new("git").args(&["config", "--get", "core.pager"]).output().ok();
+ output.and_then(|out| {
+ if out.status.success() {
+ Some(String::from_utf8(out.stdout).expect("invalid UTF8 in git pager"))
+ } else {
+ None
+ }
+ })
+ };
+
+ let diff_filename = format!("build/tmp/rustdoc-compare-{}.diff", std::process::id());
+
+ if !write_filtered_diff(
+ &diff_filename,
+ out_dir,
+ &compare_dir,
+ self.config.verbose,
+ |file_type, extension| {
+ file_type.is_file()
+ && (extension == Some("html".into()) || extension == Some("js".into()))
+ },
+ ) {
+ return;
+ }
+
+ match self.config.color {
+ ColorConfig::AlwaysColor => colored::control::set_override(true),
+ ColorConfig::NeverColor => colored::control::set_override(false),
+ _ => {}
+ }
+
+ if let Some(pager) = pager {
+ let pager = pager.trim();
+ if self.config.verbose {
+ eprintln!("using pager {}", pager);
+ }
+ let output = Command::new(pager)
+ // disable paging; we want this to be non-interactive
+ .env("PAGER", "")
+ .stdin(File::open(&diff_filename).unwrap())
+ // Capture output and print it explicitly so it will in turn be
+ // captured by libtest.
+ .output()
+ .unwrap();
+ assert!(output.status.success());
+ println!("{}", String::from_utf8_lossy(&output.stdout));
+ eprintln!("{}", String::from_utf8_lossy(&output.stderr));
+ } else {
+ use colored::Colorize;
+ eprintln!("warning: no pager configured, falling back to unified diff");
+ eprintln!(
+ "help: try configuring a git pager (e.g. `delta`) with `git config --global core.pager delta`"
+ );
+ let mut out = io::stdout();
+ let mut diff = BufReader::new(File::open(&diff_filename).unwrap());
+ let mut line = Vec::new();
+ loop {
+ line.truncate(0);
+ match diff.read_until(b'\n', &mut line) {
+ Ok(0) => break,
+ Ok(_) => {}
+ Err(e) => eprintln!("ERROR: {:?}", e),
+ }
+ match String::from_utf8(line.clone()) {
+ Ok(line) => {
+ if line.starts_with("+") {
+ write!(&mut out, "{}", line.green()).unwrap();
+ } else if line.starts_with("-") {
+ write!(&mut out, "{}", line.red()).unwrap();
+ } else if line.starts_with("@") {
+ write!(&mut out, "{}", line.blue()).unwrap();
+ } else {
+ out.write_all(line.as_bytes()).unwrap();
+ }
+ }
+ Err(_) => {
+ write!(&mut out, "{}", String::from_utf8_lossy(&line).reversed()).unwrap();
+ }
+ }
+ }
+ };
+ }
+
+ fn run_rustdoc_json_test(&self) {
+ //FIXME: Add bless option.
+
+ assert!(self.revision.is_none(), "revisions not relevant here");
+
+ let out_dir = self.output_base_dir();
+ let _ = fs::remove_dir_all(&out_dir);
+ create_dir_all(&out_dir).unwrap();
+
+ let proc_res = self.document(&out_dir);
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("rustdoc failed!", &proc_res);
+ }
+
+ let root = self.config.find_rust_src_root().unwrap();
+ let mut json_out = out_dir.join(self.testpaths.file.file_stem().unwrap());
+ json_out.set_extension("json");
+ let res = self.cmd2procres(
+ Command::new(self.config.jsondocck_path.as_ref().unwrap())
+ .arg("--doc-dir")
+ .arg(root.join(&out_dir))
+ .arg("--template")
+ .arg(&self.testpaths.file),
+ );
+
+ if !res.status.success() {
+ self.fatal_proc_rec_with_ctx("jsondocck failed!", &res, |_| {
+ println!("Rustdoc Output:");
+ proc_res.print_info();
+ })
+ }
+
+ let mut json_out = out_dir.join(self.testpaths.file.file_stem().unwrap());
+ json_out.set_extension("json");
+ let res = self.cmd2procres(
+ Command::new(&self.config.python)
+ .arg(root.join("src/etc/check_missing_items.py"))
+ .arg(&json_out),
+ );
+
+ if !res.status.success() {
+ self.fatal_proc_rec("check_missing_items failed!", &res);
+ }
+ }
+
+ fn get_lines<P: AsRef<Path>>(
+ &self,
+ path: &P,
+ mut other_files: Option<&mut Vec<String>>,
+ ) -> Vec<usize> {
+ let content = fs::read_to_string(&path).unwrap();
+ let mut ignore = false;
+ content
+ .lines()
+ .enumerate()
+ .filter_map(|(line_nb, line)| {
+ if (line.trim_start().starts_with("pub mod ")
+ || line.trim_start().starts_with("mod "))
+ && line.ends_with(';')
+ {
+ if let Some(ref mut other_files) = other_files {
+ other_files.push(line.rsplit("mod ").next().unwrap().replace(";", ""));
+ }
+ None
+ } else {
+ let sline = line.split("///").last().unwrap_or("");
+ let line = sline.trim_start();
+ if line.starts_with("```") {
+ if ignore {
+ ignore = false;
+ None
+ } else {
+ ignore = true;
+ Some(line_nb + 1)
+ }
+ } else {
+ None
+ }
+ }
+ })
+ .collect()
+ }
+
+ fn check_rustdoc_test_option(&self, res: ProcRes) {
+ let mut other_files = Vec::new();
+ let mut files: HashMap<String, Vec<usize>> = HashMap::new();
+ let cwd = env::current_dir().unwrap();
+ files.insert(
+ self.testpaths
+ .file
+ .strip_prefix(&cwd)
+ .unwrap_or(&self.testpaths.file)
+ .to_str()
+ .unwrap()
+ .replace('\\', "/"),
+ self.get_lines(&self.testpaths.file, Some(&mut other_files)),
+ );
+ for other_file in other_files {
+ let mut path = self.testpaths.file.clone();
+ path.set_file_name(&format!("{}.rs", other_file));
+ files.insert(
+ path.strip_prefix(&cwd).unwrap_or(&path).to_str().unwrap().replace('\\', "/"),
+ self.get_lines(&path, None),
+ );
+ }
+
+ let mut tested = 0;
+ for _ in res.stdout.split('\n').filter(|s| s.starts_with("test ")).inspect(|s| {
+ if let Some((left, right)) = s.split_once(" - ") {
+ let path = left.rsplit("test ").next().unwrap();
+ if let Some(ref mut v) = files.get_mut(&path.replace('\\', "/")) {
+ tested += 1;
+ let mut iter = right.split("(line ");
+ iter.next();
+ let line = iter
+ .next()
+ .unwrap_or(")")
+ .split(')')
+ .next()
+ .unwrap_or("0")
+ .parse()
+ .unwrap_or(0);
+ if let Ok(pos) = v.binary_search(&line) {
+ v.remove(pos);
+ } else {
+ self.fatal_proc_rec(
+ &format!("Not found doc test: \"{}\" in \"{}\":{:?}", s, path, v),
+ &res,
+ );
+ }
+ }
+ }
+ }) {}
+ if tested == 0 {
+ self.fatal_proc_rec(&format!("No test has been found... {:?}", files), &res);
+ } else {
+ for (entry, v) in &files {
+ if !v.is_empty() {
+ self.fatal_proc_rec(
+ &format!(
+ "Not found test at line{} \"{}\":{:?}",
+ if v.len() > 1 { "s" } else { "" },
+ entry,
+ v
+ ),
+ &res,
+ );
+ }
+ }
+ }
+ }
+
+ fn run_codegen_units_test(&self) {
+ assert!(self.revision.is_none(), "revisions not relevant here");
+
+ let proc_res = self.compile_test(WillExecute::No, EmitMetadata::No);
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ self.check_no_compiler_crash(&proc_res, self.props.should_ice);
+
+ const PREFIX: &str = "MONO_ITEM ";
+ const CGU_MARKER: &str = "@@";
+
+ let actual: Vec<MonoItem> = proc_res
+ .stdout
+ .lines()
+ .filter(|line| line.starts_with(PREFIX))
+ .map(|line| str_to_mono_item(line, true))
+ .collect();
+
+ let expected: Vec<MonoItem> = errors::load_errors(&self.testpaths.file, None)
+ .iter()
+ .map(|e| str_to_mono_item(&e.msg[..], false))
+ .collect();
+
+ let mut missing = Vec::new();
+ let mut wrong_cgus = Vec::new();
+
+ for expected_item in &expected {
+ let actual_item_with_same_name = actual.iter().find(|ti| ti.name == expected_item.name);
+
+ if let Some(actual_item) = actual_item_with_same_name {
+ if !expected_item.codegen_units.is_empty() &&
+ // Also check for codegen units
+ expected_item.codegen_units != actual_item.codegen_units
+ {
+ wrong_cgus.push((expected_item.clone(), actual_item.clone()));
+ }
+ } else {
+ missing.push(expected_item.string.clone());
+ }
+ }
+
+ let unexpected: Vec<_> = actual
+ .iter()
+ .filter(|acgu| !expected.iter().any(|ecgu| acgu.name == ecgu.name))
+ .map(|acgu| acgu.string.clone())
+ .collect();
+
+ if !missing.is_empty() {
+ missing.sort();
+
+ println!("\nThese items should have been contained but were not:\n");
+
+ for item in &missing {
+ println!("{}", item);
+ }
+
+ println!("\n");
+ }
+
+ if !unexpected.is_empty() {
+ let sorted = {
+ let mut sorted = unexpected.clone();
+ sorted.sort();
+ sorted
+ };
+
+ println!("\nThese items were contained but should not have been:\n");
+
+ for item in sorted {
+ println!("{}", item);
+ }
+
+ println!("\n");
+ }
+
+ if !wrong_cgus.is_empty() {
+ wrong_cgus.sort_by_key(|pair| pair.0.name.clone());
+ println!("\nThe following items were assigned to wrong codegen units:\n");
+
+ for &(ref expected_item, ref actual_item) in &wrong_cgus {
+ println!("{}", expected_item.name);
+ println!(" expected: {}", codegen_units_to_str(&expected_item.codegen_units));
+ println!(" actual: {}", codegen_units_to_str(&actual_item.codegen_units));
+ println!();
+ }
+ }
+
+ if !(missing.is_empty() && unexpected.is_empty() && wrong_cgus.is_empty()) {
+ panic!();
+ }
+
+ #[derive(Clone, Eq, PartialEq)]
+ struct MonoItem {
+ name: String,
+ codegen_units: HashSet<String>,
+ string: String,
+ }
+
+ // [MONO_ITEM] name [@@ (cgu)+]
+ fn str_to_mono_item(s: &str, cgu_has_crate_disambiguator: bool) -> MonoItem {
+ let s = if s.starts_with(PREFIX) { (&s[PREFIX.len()..]).trim() } else { s.trim() };
+
+ let full_string = format!("{}{}", PREFIX, s);
+
+ let parts: Vec<&str> =
+ s.split(CGU_MARKER).map(str::trim).filter(|s| !s.is_empty()).collect();
+
+ let name = parts[0].trim();
+
+ let cgus = if parts.len() > 1 {
+ let cgus_str = parts[1];
+
+ cgus_str
+ .split(' ')
+ .map(str::trim)
+ .filter(|s| !s.is_empty())
+ .map(|s| {
+ if cgu_has_crate_disambiguator {
+ remove_crate_disambiguators_from_set_of_cgu_names(s)
+ } else {
+ s.to_string()
+ }
+ })
+ .collect()
+ } else {
+ HashSet::new()
+ };
+
+ MonoItem { name: name.to_owned(), codegen_units: cgus, string: full_string }
+ }
+
+ fn codegen_units_to_str(cgus: &HashSet<String>) -> String {
+ let mut cgus: Vec<_> = cgus.iter().collect();
+ cgus.sort();
+
+ let mut string = String::new();
+ for cgu in cgus {
+ string.push_str(&cgu[..]);
+ string.push_str(" ");
+ }
+
+ string
+ }
+
+ // Given a cgu-name-prefix of the form <crate-name>.<crate-disambiguator> or
+ // the form <crate-name1>.<crate-disambiguator1>-in-<crate-name2>.<crate-disambiguator2>,
+ // remove all crate-disambiguators.
+ fn remove_crate_disambiguator_from_cgu(cgu: &str) -> String {
+ lazy_static! {
+ static ref RE: Regex =
+ Regex::new(r"^[^\.]+(?P<d1>\.[[:alnum:]]+)(-in-[^\.]+(?P<d2>\.[[:alnum:]]+))?")
+ .unwrap();
+ }
+
+ let captures =
+ RE.captures(cgu).unwrap_or_else(|| panic!("invalid cgu name encountered: {}", cgu));
+
+ let mut new_name = cgu.to_owned();
+
+ if let Some(d2) = captures.name("d2") {
+ new_name.replace_range(d2.start()..d2.end(), "");
+ }
+
+ let d1 = captures.name("d1").unwrap();
+ new_name.replace_range(d1.start()..d1.end(), "");
+
+ new_name
+ }
+
+ // The name of merged CGUs is constructed as the names of the original
+ // CGUs joined with "--". This function splits such composite CGU names
+ // and handles each component individually.
+ fn remove_crate_disambiguators_from_set_of_cgu_names(cgus: &str) -> String {
+ cgus.split("--")
+ .map(|cgu| remove_crate_disambiguator_from_cgu(cgu))
+ .collect::<Vec<_>>()
+ .join("--")
+ }
+ }
+
+ fn init_incremental_test(&self) {
+ // (See `run_incremental_test` for an overview of how incremental tests work.)
+
+ // Before any of the revisions have executed, create the
+ // incremental workproduct directory. Delete any old
+ // incremental work products that may be there from prior
+ // runs.
+ let incremental_dir = self.props.incremental_dir.as_ref().unwrap();
+ if incremental_dir.exists() {
+ // Canonicalizing the path will convert it to the //?/ format
+ // on Windows, which enables paths longer than 260 character
+ let canonicalized = incremental_dir.canonicalize().unwrap();
+ fs::remove_dir_all(canonicalized).unwrap();
+ }
+ fs::create_dir_all(&incremental_dir).unwrap();
+
+ if self.config.verbose {
+ println!("init_incremental_test: incremental_dir={}", incremental_dir.display());
+ }
+ }
+
+ fn run_incremental_test(&self) {
+ // Basic plan for a test incremental/foo/bar.rs:
+ // - load list of revisions rpass1, cfail2, rpass3
+ // - each should begin with `rpass`, `cfail`, or `rfail`
+ // - if `rpass`, expect compile and execution to succeed
+ // - if `cfail`, expect compilation to fail
+ // - if `rfail`, expect execution to fail
+ // - create a directory build/foo/bar.incremental
+ // - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C rpass1
+ // - because name of revision starts with "rpass", expect success
+ // - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C cfail2
+ // - because name of revision starts with "cfail", expect an error
+ // - load expected errors as usual, but filter for those that end in `[rfail2]`
+ // - compile foo/bar.rs with -C incremental=.../foo/bar.incremental and -C rpass3
+ // - because name of revision starts with "rpass", expect success
+ // - execute build/foo/bar.exe and save output
+ //
+ // FIXME -- use non-incremental mode as an oracle? That doesn't apply
+ // to #[rustc_dirty] and clean tests I guess
+
+ let revision = self.revision.expect("incremental tests require a list of revisions");
+
+ // Incremental workproduct directory should have already been created.
+ let incremental_dir = self.props.incremental_dir.as_ref().unwrap();
+ assert!(incremental_dir.exists(), "init_incremental_test failed to create incremental dir");
+
+ if self.config.verbose {
+ print!("revision={:?} props={:#?}", revision, self.props);
+ }
+
+ if revision.starts_with("rpass") {
+ if self.props.should_ice {
+ self.fatal("can only use should-ice in cfail tests");
+ }
+ self.run_rpass_test();
+ } else if revision.starts_with("rfail") {
+ if self.props.should_ice {
+ self.fatal("can only use should-ice in cfail tests");
+ }
+ self.run_rfail_test();
+ } else if revision.starts_with("cfail") {
+ self.run_cfail_test();
+ } else {
+ self.fatal("revision name must begin with rpass, rfail, or cfail");
+ }
+ }
+
+ fn run_rmake_test(&self) {
+ let cwd = env::current_dir().unwrap();
+ let src_root = self.config.src_base.parent().unwrap().parent().unwrap().parent().unwrap();
+ let src_root = cwd.join(&src_root);
+
+ let tmpdir = cwd.join(self.output_base_name());
+ if tmpdir.exists() {
+ self.aggressive_rm_rf(&tmpdir).unwrap();
+ }
+ create_dir_all(&tmpdir).unwrap();
+
+ let host = &self.config.host;
+ let make = if host.contains("dragonfly")
+ || host.contains("freebsd")
+ || host.contains("netbsd")
+ || host.contains("openbsd")
+ {
+ "gmake"
+ } else {
+ "make"
+ };
+
+ let mut cmd = Command::new(make);
+ cmd.current_dir(&self.testpaths.file)
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .env("TARGET", &self.config.target)
+ .env("PYTHON", &self.config.python)
+ .env("S", src_root)
+ .env("RUST_BUILD_STAGE", &self.config.stage_id)
+ .env("RUSTC", cwd.join(&self.config.rustc_path))
+ .env("TMPDIR", &tmpdir)
+ .env("LD_LIB_PATH_ENVVAR", dylib_env_var())
+ .env("HOST_RPATH_DIR", cwd.join(&self.config.compile_lib_path))
+ .env("TARGET_RPATH_DIR", cwd.join(&self.config.run_lib_path))
+ .env("LLVM_COMPONENTS", &self.config.llvm_components)
+ // We for sure don't want these tests to run in parallel, so make
+ // sure they don't have access to these vars if we run via `make`
+ // at the top level
+ .env_remove("MAKEFLAGS")
+ .env_remove("MFLAGS")
+ .env_remove("CARGO_MAKEFLAGS");
+
+ if let Some(ref rustdoc) = self.config.rustdoc_path {
+ cmd.env("RUSTDOC", cwd.join(rustdoc));
+ }
+
+ if let Some(ref rust_demangler) = self.config.rust_demangler_path {
+ cmd.env("RUST_DEMANGLER", cwd.join(rust_demangler));
+ }
+
+ if let Some(ref node) = self.config.nodejs {
+ cmd.env("NODE", node);
+ }
+
+ if let Some(ref linker) = self.config.linker {
+ cmd.env("RUSTC_LINKER", linker);
+ }
+
+ if let Some(ref clang) = self.config.run_clang_based_tests_with {
+ cmd.env("CLANG", clang);
+ }
+
+ if let Some(ref filecheck) = self.config.llvm_filecheck {
+ cmd.env("LLVM_FILECHECK", filecheck);
+ }
+
+ if let Some(ref llvm_bin_dir) = self.config.llvm_bin_dir {
+ cmd.env("LLVM_BIN_DIR", llvm_bin_dir);
+ }
+
+ // We don't want RUSTFLAGS set from the outside to interfere with
+ // compiler flags set in the test cases:
+ cmd.env_remove("RUSTFLAGS");
+
+ // Use dynamic musl for tests because static doesn't allow creating dylibs
+ if self.config.host.contains("musl") {
+ cmd.env("RUSTFLAGS", "-Ctarget-feature=-crt-static").env("IS_MUSL_HOST", "1");
+ }
+
+ if self.config.bless {
+ cmd.env("RUSTC_BLESS_TEST", "--bless");
+ // Assume this option is active if the environment variable is "defined", with _any_ value.
+ // As an example, a `Makefile` can use this option by:
+ //
+ // ifdef RUSTC_BLESS_TEST
+ // cp "$(TMPDIR)"/actual_something.ext expected_something.ext
+ // else
+ // $(DIFF) expected_something.ext "$(TMPDIR)"/actual_something.ext
+ // endif
+ }
+
+ if self.config.target.contains("msvc") && self.config.cc != "" {
+ // We need to pass a path to `lib.exe`, so assume that `cc` is `cl.exe`
+ // and that `lib.exe` lives next to it.
+ let lib = Path::new(&self.config.cc).parent().unwrap().join("lib.exe");
+
+ // MSYS doesn't like passing flags of the form `/foo` as it thinks it's
+ // a path and instead passes `C:\msys64\foo`, so convert all
+ // `/`-arguments to MSVC here to `-` arguments.
+ let cflags = self
+ .config
+ .cflags
+ .split(' ')
+ .map(|s| s.replace("/", "-"))
+ .collect::<Vec<_>>()
+ .join(" ");
+ let cxxflags = self
+ .config
+ .cxxflags
+ .split(' ')
+ .map(|s| s.replace("/", "-"))
+ .collect::<Vec<_>>()
+ .join(" ");
+
+ cmd.env("IS_MSVC", "1")
+ .env("IS_WINDOWS", "1")
+ .env("MSVC_LIB", format!("'{}' -nologo", lib.display()))
+ .env("CC", format!("'{}' {}", self.config.cc, cflags))
+ .env("CXX", format!("'{}' {}", &self.config.cxx, cxxflags));
+ } else {
+ cmd.env("CC", format!("{} {}", self.config.cc, self.config.cflags))
+ .env("CXX", format!("{} {}", self.config.cxx, self.config.cxxflags))
+ .env("AR", &self.config.ar);
+
+ if self.config.target.contains("windows") {
+ cmd.env("IS_WINDOWS", "1");
+ }
+ }
+
+ let output = self.read2_abbreviated(cmd.spawn().expect("failed to spawn `make`"));
+ if !output.status.success() {
+ let res = ProcRes {
+ status: output.status,
+ stdout: String::from_utf8_lossy(&output.stdout).into_owned(),
+ stderr: String::from_utf8_lossy(&output.stderr).into_owned(),
+ cmdline: format!("{:?}", cmd),
+ };
+ self.fatal_proc_rec("make failed", &res);
+ }
+ }
+
+ fn aggressive_rm_rf(&self, path: &Path) -> io::Result<()> {
+ for e in path.read_dir()? {
+ let entry = e?;
+ let path = entry.path();
+ if entry.file_type()?.is_dir() {
+ self.aggressive_rm_rf(&path)?;
+ } else {
+ // Remove readonly files as well on windows (by default we can't)
+ fs::remove_file(&path).or_else(|e| {
+ if cfg!(windows) && e.kind() == io::ErrorKind::PermissionDenied {
+ let mut meta = entry.metadata()?.permissions();
+ meta.set_readonly(false);
+ fs::set_permissions(&path, meta)?;
+ fs::remove_file(&path)
+ } else {
+ Err(e)
+ }
+ })?;
+ }
+ }
+ fs::remove_dir(path)
+ }
+
+ fn run_js_doc_test(&self) {
+ if let Some(nodejs) = &self.config.nodejs {
+ let out_dir = self.output_base_dir();
+
+ self.document(&out_dir);
+
+ let root = self.config.find_rust_src_root().unwrap();
+ let file_stem =
+ self.testpaths.file.file_stem().and_then(|f| f.to_str()).expect("no file stem");
+ let res = self.cmd2procres(
+ Command::new(&nodejs)
+ .arg(root.join("src/tools/rustdoc-js/tester.js"))
+ .arg("--doc-folder")
+ .arg(out_dir)
+ .arg("--crate-name")
+ .arg(file_stem.replace("-", "_"))
+ .arg("--test-file")
+ .arg(self.testpaths.file.with_extension("js")),
+ );
+ if !res.status.success() {
+ self.fatal_proc_rec("rustdoc-js test failed!", &res);
+ }
+ } else {
+ self.fatal("no nodeJS");
+ }
+ }
+
+ fn load_compare_outputs(
+ &self,
+ proc_res: &ProcRes,
+ output_kind: TestOutput,
+ explicit_format: bool,
+ ) -> usize {
+ let stderr_bits = format!("{}.stderr", get_pointer_width(&self.config.target));
+ let (stderr_kind, stdout_kind) = match output_kind {
+ TestOutput::Compile => (
+ {
+ if self.props.stderr_per_bitwidth { &stderr_bits } else { UI_STDERR }
+ },
+ UI_STDOUT,
+ ),
+ TestOutput::Run => (UI_RUN_STDERR, UI_RUN_STDOUT),
+ };
+
+ let expected_stderr = self.load_expected_output(stderr_kind);
+ let expected_stdout = self.load_expected_output(stdout_kind);
+
+ let normalized_stdout = match output_kind {
+ TestOutput::Run if self.config.remote_test_client.is_some() => {
+ // When tests are run using the remote-test-client, the string
+ // 'uploaded "$TEST_BUILD_DIR/<test_executable>, waiting for result"'
+ // is printed to stdout by the client and then captured in the ProcRes,
+ // so it needs to be removed when comparing the run-pass test execution output
+ lazy_static! {
+ static ref REMOTE_TEST_RE: Regex = Regex::new(
+ "^uploaded \"\\$TEST_BUILD_DIR(/[[:alnum:]_\\-.]+)+\", waiting for result\n"
+ )
+ .unwrap();
+ }
+ REMOTE_TEST_RE
+ .replace(
+ &self.normalize_output(&proc_res.stdout, &self.props.normalize_stdout),
+ "",
+ )
+ .to_string()
+ }
+ _ => self.normalize_output(&proc_res.stdout, &self.props.normalize_stdout),
+ };
+
+ let stderr = if explicit_format {
+ proc_res.stderr.clone()
+ } else {
+ json::extract_rendered(&proc_res.stderr)
+ };
+
+ let normalized_stderr = self.normalize_output(&stderr, &self.props.normalize_stderr);
+ let mut errors = 0;
+ match output_kind {
+ TestOutput::Compile => {
+ if !self.props.dont_check_compiler_stdout {
+ errors +=
+ self.compare_output(stdout_kind, &normalized_stdout, &expected_stdout);
+ }
+ if !self.props.dont_check_compiler_stderr {
+ errors +=
+ self.compare_output(stderr_kind, &normalized_stderr, &expected_stderr);
+ }
+ }
+ TestOutput::Run => {
+ errors += self.compare_output(stdout_kind, &normalized_stdout, &expected_stdout);
+ errors += self.compare_output(stderr_kind, &normalized_stderr, &expected_stderr);
+ }
+ }
+ errors
+ }
+
+ fn run_ui_test(&self) {
+ if let Some(FailMode::Build) = self.props.fail_mode {
+ // Make sure a build-fail test cannot fail due to failing analysis (e.g. typeck).
+ let pm = Some(PassMode::Check);
+ let proc_res = self.compile_test_general(WillExecute::No, EmitMetadata::Yes, pm);
+ self.check_if_test_should_compile(&proc_res, pm);
+ }
+
+ let pm = self.pass_mode();
+ let should_run = self.should_run(pm);
+ let emit_metadata = self.should_emit_metadata(pm);
+ let proc_res = self.compile_test(should_run, emit_metadata);
+ self.check_if_test_should_compile(&proc_res, pm);
+
+ // if the user specified a format in the ui test
+ // print the output to the stderr file, otherwise extract
+ // the rendered error messages from json and print them
+ let explicit = self.props.compile_flags.iter().any(|s| s.contains("--error-format"));
+
+ let expected_fixed = self.load_expected_output(UI_FIXED);
+
+ self.check_and_prune_duplicate_outputs(&proc_res, &[], &[]);
+
+ let mut errors = self.load_compare_outputs(&proc_res, TestOutput::Compile, explicit);
+ let rustfix_input = json::rustfix_diagnostics_only(&proc_res.stderr);
+
+ if self.config.compare_mode.is_some() {
+ // don't test rustfix with nll right now
+ } else if self.config.rustfix_coverage {
+ // Find out which tests have `MachineApplicable` suggestions but are missing
+ // `run-rustfix` or `run-rustfix-only-machine-applicable` headers.
+ //
+ // This will return an empty `Vec` in case the executed test file has a
+ // `compile-flags: --error-format=xxxx` header with a value other than `json`.
+ let suggestions = get_suggestions_from_json(
+ &rustfix_input,
+ &HashSet::new(),
+ Filter::MachineApplicableOnly,
+ )
+ .unwrap_or_default();
+ if !suggestions.is_empty()
+ && !self.props.run_rustfix
+ && !self.props.rustfix_only_machine_applicable
+ {
+ let mut coverage_file_path = self.config.build_base.clone();
+ coverage_file_path.push("rustfix_missing_coverage.txt");
+ debug!("coverage_file_path: {}", coverage_file_path.display());
+
+ let mut file = OpenOptions::new()
+ .create(true)
+ .append(true)
+ .open(coverage_file_path.as_path())
+ .expect("could not create or open file");
+
+ if writeln!(file, "{}", self.testpaths.file.display()).is_err() {
+ panic!("couldn't write to {}", coverage_file_path.display());
+ }
+ }
+ } else if self.props.run_rustfix {
+ // Apply suggestions from rustc to the code itself
+ let unfixed_code = self.load_expected_output_from_path(&self.testpaths.file).unwrap();
+ let suggestions = get_suggestions_from_json(
+ &rustfix_input,
+ &HashSet::new(),
+ if self.props.rustfix_only_machine_applicable {
+ Filter::MachineApplicableOnly
+ } else {
+ Filter::Everything
+ },
+ )
+ .unwrap();
+ let fixed_code = apply_suggestions(&unfixed_code, &suggestions).unwrap_or_else(|e| {
+ panic!(
+ "failed to apply suggestions for {:?} with rustfix: {}",
+ self.testpaths.file, e
+ )
+ });
+
+ errors += self.compare_output("fixed", &fixed_code, &expected_fixed);
+ } else if !expected_fixed.is_empty() {
+ panic!(
+ "the `// run-rustfix` directive wasn't found but a `*.fixed` \
+ file was found"
+ );
+ }
+
+ if errors > 0 {
+ println!("To update references, rerun the tests and pass the `--bless` flag");
+ let relative_path_to_file =
+ self.testpaths.relative_dir.join(self.testpaths.file.file_name().unwrap());
+ println!(
+ "To only update this specific test, also pass `--test-args {}`",
+ relative_path_to_file.display(),
+ );
+ self.fatal_proc_rec(
+ &format!("{} errors occurred comparing output.", errors),
+ &proc_res,
+ );
+ }
+
+ let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
+
+ if let WillExecute::Yes = should_run {
+ let proc_res = self.exec_compiled_test();
+ let run_output_errors = if self.props.check_run_results {
+ self.load_compare_outputs(&proc_res, TestOutput::Run, explicit)
+ } else {
+ 0
+ };
+ if run_output_errors > 0 {
+ self.fatal_proc_rec(
+ &format!("{} errors occurred comparing run output.", run_output_errors),
+ &proc_res,
+ );
+ }
+ if self.should_run_successfully(pm) {
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("test run failed!", &proc_res);
+ }
+ } else if proc_res.status.success() {
+ self.fatal_proc_rec("test run succeeded!", &proc_res);
+ }
+
+ if !self.props.error_patterns.is_empty() || !self.props.regex_error_patterns.is_empty()
+ {
+ // "// error-pattern" comments
+ let output_to_check = self.get_output(&proc_res);
+ self.check_all_error_patterns(&output_to_check, &proc_res, pm);
+ }
+ }
+
+ debug!(
+ "run_ui_test: explicit={:?} config.compare_mode={:?} expected_errors={:?} \
+ proc_res.status={:?} props.error_patterns={:?}",
+ explicit,
+ self.config.compare_mode,
+ expected_errors,
+ proc_res.status,
+ self.props.error_patterns
+ );
+ if !explicit && self.config.compare_mode.is_none() {
+ let check_patterns = should_run == WillExecute::No
+ && (!self.props.error_patterns.is_empty()
+ || !self.props.regex_error_patterns.is_empty());
+
+ let check_annotations = !check_patterns || !expected_errors.is_empty();
+
+ if check_patterns {
+ // "// error-pattern" comments
+ let output_to_check = self.get_output(&proc_res);
+ self.check_all_error_patterns(&output_to_check, &proc_res, pm);
+ }
+
+ if check_annotations {
+ // "//~ERROR comments"
+ self.check_expected_errors(expected_errors, &proc_res);
+ }
+ }
+
+ if self.props.run_rustfix && self.config.compare_mode.is_none() {
+ // And finally, compile the fixed code and make sure it both
+ // succeeds and has no diagnostics.
+ let mut rustc = self.make_compile_args(
+ &self.testpaths.file.with_extension(UI_FIXED),
+ TargetLocation::ThisFile(self.make_exe_name()),
+ emit_metadata,
+ AllowUnused::No,
+ );
+ rustc.arg("-L").arg(&self.aux_output_dir_name());
+ let res = self.compose_and_run_compiler(rustc, None);
+ if !res.status.success() {
+ self.fatal_proc_rec("failed to compile fixed code", &res);
+ }
+ if !res.stderr.is_empty()
+ && !self.props.rustfix_only_machine_applicable
+ && !json::rustfix_diagnostics_only(&res.stderr).is_empty()
+ {
+ self.fatal_proc_rec("fixed code is still producing diagnostics", &res);
+ }
+ }
+ }
+
+ fn run_mir_opt_test(&self) {
+ let pm = self.pass_mode();
+ let should_run = self.should_run(pm);
+ let emit_metadata = self.should_emit_metadata(pm);
+ let proc_res = self.compile_test(should_run, emit_metadata);
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("compilation failed!", &proc_res);
+ }
+
+ self.check_mir_dump();
+
+ if let WillExecute::Yes = should_run {
+ let proc_res = self.exec_compiled_test();
+
+ if !proc_res.status.success() {
+ self.fatal_proc_rec("test run failed!", &proc_res);
+ }
+ }
+ }
+
+ fn check_mir_dump(&self) {
+ let test_file_contents = fs::read_to_string(&self.testpaths.file).unwrap();
+
+ let test_dir = self.testpaths.file.parent().unwrap();
+ let test_crate =
+ self.testpaths.file.file_stem().unwrap().to_str().unwrap().replace("-", "_");
+
+ let mut bit_width = String::new();
+ if test_file_contents.lines().any(|l| l == "// EMIT_MIR_FOR_EACH_BIT_WIDTH") {
+ bit_width = format!(".{}", get_pointer_width(&self.config.target));
+ }
+
+ if self.config.bless {
+ for e in
+ glob(&format!("{}/{}.*{}.mir", test_dir.display(), test_crate, bit_width)).unwrap()
+ {
+ std::fs::remove_file(e.unwrap()).unwrap();
+ }
+ for e in
+ glob(&format!("{}/{}.*{}.diff", test_dir.display(), test_crate, bit_width)).unwrap()
+ {
+ std::fs::remove_file(e.unwrap()).unwrap();
+ }
+ }
+
+ for l in test_file_contents.lines() {
+ if l.starts_with("// EMIT_MIR ") {
+ let test_name = l.trim_start_matches("// EMIT_MIR ").trim();
+ let mut test_names = test_name.split(' ');
+ // sometimes we specify two files so that we get a diff between the two files
+ let test_name = test_names.next().unwrap();
+ let mut expected_file;
+ let from_file;
+ let to_file;
+
+ if test_name.ends_with(".diff") {
+ let trimmed = test_name.trim_end_matches(".diff");
+ let test_against = format!("{}.after.mir", trimmed);
+ from_file = format!("{}.before.mir", trimmed);
+ expected_file = format!("{}{}.diff", trimmed, bit_width);
+ assert!(
+ test_names.next().is_none(),
+ "two mir pass names specified for MIR diff"
+ );
+ to_file = Some(test_against);
+ } else if let Some(first_pass) = test_names.next() {
+ let second_pass = test_names.next().unwrap();
+ assert!(
+ test_names.next().is_none(),
+ "three mir pass names specified for MIR diff"
+ );
+ expected_file =
+ format!("{}{}.{}-{}.diff", test_name, bit_width, first_pass, second_pass);
+ let second_file = format!("{}.{}.mir", test_name, second_pass);
+ from_file = format!("{}.{}.mir", test_name, first_pass);
+ to_file = Some(second_file);
+ } else {
+ let ext_re = Regex::new(r#"(\.(mir|dot|html))$"#).unwrap();
+ let cap = ext_re
+ .captures_iter(test_name)
+ .next()
+ .expect("test_name has an invalid extension");
+ let extension = cap.get(1).unwrap().as_str();
+ expected_file = format!(
+ "{}{}{}",
+ test_name.trim_end_matches(extension),
+ bit_width,
+ extension,
+ );
+ from_file = test_name.to_string();
+ assert!(
+ test_names.next().is_none(),
+ "two mir pass names specified for MIR dump"
+ );
+ to_file = None;
+ };
+ if !expected_file.starts_with(&test_crate) {
+ expected_file = format!("{}.{}", test_crate, expected_file);
+ }
+ let expected_file = test_dir.join(expected_file);
+
+ let dumped_string = if let Some(after) = to_file {
+ self.diff_mir_files(from_file.into(), after.into())
+ } else {
+ let mut output_file = PathBuf::new();
+ output_file.push(self.get_mir_dump_dir());
+ output_file.push(&from_file);
+ debug!(
+ "comparing the contents of: {} with {}",
+ output_file.display(),
+ expected_file.display()
+ );
+ if !output_file.exists() {
+ panic!(
+ "Output file `{}` from test does not exist, available files are in `{}`",
+ output_file.display(),
+ output_file.parent().unwrap().display()
+ );
+ }
+ self.check_mir_test_timestamp(&from_file, &output_file);
+ let dumped_string = fs::read_to_string(&output_file).unwrap();
+ self.normalize_output(&dumped_string, &[])
+ };
+
+ if self.config.bless {
+ let _ = std::fs::remove_file(&expected_file);
+ std::fs::write(expected_file, dumped_string.as_bytes()).unwrap();
+ } else {
+ if !expected_file.exists() {
+ panic!(
+ "Output file `{}` from test does not exist",
+ expected_file.display()
+ );
+ }
+ let expected_string = fs::read_to_string(&expected_file).unwrap();
+ if dumped_string != expected_string {
+ print!("{}", write_diff(&expected_string, &dumped_string, 3));
+ panic!(
+ "Actual MIR output differs from expected MIR output {}",
+ expected_file.display()
+ );
+ }
+ }
+ }
+ }
+ }
+
+ fn diff_mir_files(&self, before: PathBuf, after: PathBuf) -> String {
+ let to_full_path = |path: PathBuf| {
+ let full = self.get_mir_dump_dir().join(&path);
+ if !full.exists() {
+ panic!(
+ "the mir dump file for {} does not exist (requested in {})",
+ path.display(),
+ self.testpaths.file.display(),
+ );
+ }
+ full
+ };
+ let before = to_full_path(before);
+ let after = to_full_path(after);
+ debug!("comparing the contents of: {} with {}", before.display(), after.display());
+ let before = fs::read_to_string(before).unwrap();
+ let after = fs::read_to_string(after).unwrap();
+ let before = self.normalize_output(&before, &[]);
+ let after = self.normalize_output(&after, &[]);
+ let mut dumped_string = String::new();
+ for result in diff::lines(&before, &after) {
+ use std::fmt::Write;
+ match result {
+ diff::Result::Left(s) => writeln!(dumped_string, "- {}", s).unwrap(),
+ diff::Result::Right(s) => writeln!(dumped_string, "+ {}", s).unwrap(),
+ diff::Result::Both(s, _) => writeln!(dumped_string, " {}", s).unwrap(),
+ }
+ }
+ dumped_string
+ }
+
+ fn check_mir_test_timestamp(&self, test_name: &str, output_file: &Path) {
+ let t = |file| fs::metadata(file).unwrap().modified().unwrap();
+ let source_file = &self.testpaths.file;
+ let output_time = t(output_file);
+ let source_time = t(source_file);
+ if source_time > output_time {
+ debug!("source file time: {:?} output file time: {:?}", source_time, output_time);
+ panic!(
+ "test source file `{}` is newer than potentially stale output file `{}`.",
+ source_file.display(),
+ test_name
+ );
+ }
+ }
+
+ fn get_mir_dump_dir(&self) -> PathBuf {
+ let mut mir_dump_dir = PathBuf::from(self.config.build_base.as_path());
+ debug!("input_file: {:?}", self.testpaths.file);
+ mir_dump_dir.push(&self.testpaths.relative_dir);
+ mir_dump_dir.push(self.testpaths.file.file_stem().unwrap());
+ mir_dump_dir
+ }
+
+ fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> String {
+ let cflags = self.props.compile_flags.join(" ");
+ let json = cflags.contains("--error-format json")
+ || cflags.contains("--error-format pretty-json")
+ || cflags.contains("--error-format=json")
+ || cflags.contains("--error-format=pretty-json")
+ || cflags.contains("--output-format json")
+ || cflags.contains("--output-format=json");
+
+ let mut normalized = output.to_string();
+
+ let mut normalize_path = |from: &Path, to: &str| {
+ let mut from = from.display().to_string();
+ if json {
+ from = from.replace("\\", "\\\\");
+ }
+ normalized = normalized.replace(&from, to);
+ };
+
+ let parent_dir = self.testpaths.file.parent().unwrap();
+ normalize_path(parent_dir, "$DIR");
+
+ // Paths into the libstd/libcore
+ let base_dir = self.config.src_base.parent().unwrap().parent().unwrap().parent().unwrap();
+ let src_dir = base_dir.join("library");
+ normalize_path(&src_dir, "$SRC_DIR");
+
+ // `ui-fulldeps` tests can show paths to the compiler source when testing macros from
+ // `rustc_macros`
+ // eg. /home/user/rust/compiler
+ let compiler_src_dir = base_dir.join("compiler");
+ normalize_path(&compiler_src_dir, "$COMPILER_DIR");
+
+ if let Some(virtual_rust_source_base_dir) =
+ option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(PathBuf::from)
+ {
+ normalize_path(&virtual_rust_source_base_dir.join("library"), "$SRC_DIR");
+ normalize_path(&virtual_rust_source_base_dir.join("compiler"), "$COMPILER_DIR");
+ }
+
+ // Paths into the build directory
+ let test_build_dir = &self.config.build_base;
+ let parent_build_dir = test_build_dir.parent().unwrap().parent().unwrap().parent().unwrap();
+
+ // eg. /home/user/rust/build/x86_64-unknown-linux-gnu/test/ui
+ normalize_path(test_build_dir, "$TEST_BUILD_DIR");
+ // eg. /home/user/rust/build
+ normalize_path(parent_build_dir, "$BUILD_DIR");
+
+ // Paths into lib directory.
+ normalize_path(&parent_build_dir.parent().unwrap().join("lib"), "$LIB_DIR");
+
+ if json {
+ // escaped newlines in json strings should be readable
+ // in the stderr files. There's no point int being correct,
+ // since only humans process the stderr files.
+ // Thus we just turn escaped newlines back into newlines.
+ normalized = normalized.replace("\\n", "\n");
+ }
+
+ // If there are `$SRC_DIR` normalizations with line and column numbers, then replace them
+ // with placeholders as we do not want tests needing updated when compiler source code
+ // changes.
+ // eg. $SRC_DIR/libcore/mem.rs:323:14 becomes $SRC_DIR/libcore/mem.rs:LL:COL
+ lazy_static! {
+ static ref SRC_DIR_RE: Regex =
+ Regex::new("SRC_DIR(.+):\\d+:\\d+(: \\d+:\\d+)?").unwrap();
+ }
+
+ normalized = SRC_DIR_RE.replace_all(&normalized, "SRC_DIR$1:LL:COL").into_owned();
+
+ normalized = Self::normalize_platform_differences(&normalized);
+ normalized = normalized.replace("\t", "\\t"); // makes tabs visible
+
+ // Remove test annotations like `//~ ERROR text` from the output,
+ // since they duplicate actual errors and make the output hard to read.
+ // This mirrors the regex in src/tools/tidy/src/style.rs, please update
+ // both if either are changed.
+ lazy_static! {
+ static ref ANNOTATION_RE: Regex = Regex::new("\\s*//(\\[.*\\])?~.*").unwrap();
+ }
+
+ normalized = ANNOTATION_RE.replace_all(&normalized, "").into_owned();
+
+ // This code normalizes various hashes in v0 symbol mangling that is
+ // emitted in the ui and mir-opt tests.
+ lazy_static! {
+ static ref V0_CRATE_HASH_PREFIX_RE: Regex =
+ Regex::new(r"_R.*?Cs[0-9a-zA-Z]+_").unwrap();
+ static ref V0_CRATE_HASH_RE: Regex = Regex::new(r"Cs[0-9a-zA-Z]+_").unwrap();
+ }
+
+ const V0_CRATE_HASH_PLACEHOLDER: &str = r"CsCRATE_HASH_";
+ if V0_CRATE_HASH_PREFIX_RE.is_match(&normalized) {
+ // Normalize crate hash
+ normalized =
+ V0_CRATE_HASH_RE.replace_all(&normalized, V0_CRATE_HASH_PLACEHOLDER).into_owned();
+ }
+
+ lazy_static! {
+ static ref V0_BACK_REF_PREFIX_RE: Regex = Regex::new(r"\(_R.*?B[0-9a-zA-Z]_").unwrap();
+ static ref V0_BACK_REF_RE: Regex = Regex::new(r"B[0-9a-zA-Z]_").unwrap();
+ }
+
+ const V0_BACK_REF_PLACEHOLDER: &str = r"B<REF>_";
+ if V0_BACK_REF_PREFIX_RE.is_match(&normalized) {
+ // Normalize back references (see RFC 2603)
+ normalized =
+ V0_BACK_REF_RE.replace_all(&normalized, V0_BACK_REF_PLACEHOLDER).into_owned();
+ }
+
+ // Custom normalization rules
+ for rule in custom_rules {
+ let re = Regex::new(&rule.0).expect("bad regex in custom normalization rule");
+ normalized = re.replace_all(&normalized, &rule.1[..]).into_owned();
+ }
+ normalized
+ }
+
+ /// Normalize output differences across platforms. Generally changes Windows output to be more
+ /// Unix-like.
+ ///
+ /// Replaces backslashes in paths with forward slashes, and replaces CRLF line endings
+ /// with LF.
+ fn normalize_platform_differences(output: &str) -> String {
+ lazy_static! {
+ /// Used to find Windows paths.
+ ///
+ /// It's not possible to detect paths in the error messages generally, but this is a
+ /// decent enough heuristic.
+ static ref PATH_BACKSLASH_RE: Regex = Regex::new(r#"(?x)
+ (?:
+ # Match paths that don't include spaces.
+ (?:\\[\pL\pN\.\-_']+)+\.\pL+
+ |
+ # If the path starts with a well-known root, then allow spaces.
+ \$(?:DIR|SRC_DIR|TEST_BUILD_DIR|BUILD_DIR|LIB_DIR)(?:\\[\pL\pN\.\-_' ]+)+
+ )"#
+ ).unwrap();
+ }
+
+ let output = output.replace(r"\\", r"\");
+
+ PATH_BACKSLASH_RE
+ .replace_all(&output, |caps: &Captures<'_>| {
+ println!("{}", &caps[0]);
+ caps[0].replace(r"\", "/")
+ })
+ .replace("\r\n", "\n")
+ }
+
+ fn expected_output_path(&self, kind: &str) -> PathBuf {
+ let mut path =
+ expected_output_path(&self.testpaths, self.revision, &self.config.compare_mode, kind);
+
+ if !path.exists() {
+ if let Some(CompareMode::Polonius) = self.config.compare_mode {
+ path = expected_output_path(&self.testpaths, self.revision, &None, kind);
+ }
+ }
+
+ if !path.exists() {
+ path = expected_output_path(&self.testpaths, self.revision, &None, kind);
+ }
+
+ path
+ }
+
+ fn load_expected_output(&self, kind: &str) -> String {
+ let path = self.expected_output_path(kind);
+ if path.exists() {
+ match self.load_expected_output_from_path(&path) {
+ Ok(x) => x,
+ Err(x) => self.fatal(&x),
+ }
+ } else {
+ String::new()
+ }
+ }
+
+ fn load_expected_output_from_path(&self, path: &Path) -> Result<String, String> {
+ fs::read_to_string(path).map_err(|err| {
+ format!("failed to load expected output from `{}`: {}", path.display(), err)
+ })
+ }
+
+ fn delete_file(&self, file: &PathBuf) {
+ if !file.exists() {
+ // Deleting a nonexistant file would error.
+ return;
+ }
+ if let Err(e) = fs::remove_file(file) {
+ self.fatal(&format!("failed to delete `{}`: {}", file.display(), e,));
+ }
+ }
+
+ fn compare_output(&self, kind: &str, actual: &str, expected: &str) -> usize {
+ if actual == expected {
+ return 0;
+ }
+
+ if !self.config.bless {
+ if expected.is_empty() {
+ println!("normalized {}:\n{}\n", kind, actual);
+ } else {
+ println!("diff of {}:\n", kind);
+ print!("{}", write_diff(expected, actual, 3));
+ }
+ }
+
+ let mode = self.config.compare_mode.as_ref().map_or("", |m| m.to_str());
+ let output_file = self
+ .output_base_name()
+ .with_extra_extension(self.revision.unwrap_or(""))
+ .with_extra_extension(mode)
+ .with_extra_extension(kind);
+
+ let mut files = vec![output_file];
+ if self.config.bless {
+ // Delete non-revision .stderr/.stdout file if revisions are used.
+ // Without this, we'd just generate the new files and leave the old files around.
+ if self.revision.is_some() {
+ let old =
+ expected_output_path(self.testpaths, None, &self.config.compare_mode, kind);
+ self.delete_file(&old);
+ }
+ files.push(expected_output_path(
+ self.testpaths,
+ self.revision,
+ &self.config.compare_mode,
+ kind,
+ ));
+ }
+
+ for output_file in &files {
+ if actual.is_empty() {
+ self.delete_file(output_file);
+ } else if let Err(err) = fs::write(&output_file, &actual) {
+ self.fatal(&format!(
+ "failed to write {} to `{}`: {}",
+ kind,
+ output_file.display(),
+ err,
+ ));
+ }
+ }
+
+ println!("\nThe actual {0} differed from the expected {0}.", kind);
+ for output_file in files {
+ println!("Actual {} saved to {}", kind, output_file.display());
+ }
+ if self.config.bless { 0 } else { 1 }
+ }
+
+ fn check_and_prune_duplicate_outputs(
+ &self,
+ proc_res: &ProcRes,
+ modes: &[CompareMode],
+ require_same_modes: &[CompareMode],
+ ) {
+ for kind in UI_EXTENSIONS {
+ let canon_comparison_path =
+ expected_output_path(&self.testpaths, self.revision, &None, kind);
+
+ let canon = match self.load_expected_output_from_path(&canon_comparison_path) {
+ Ok(canon) => canon,
+ _ => continue,
+ };
+ let bless = self.config.bless;
+ let check_and_prune_duplicate_outputs = |mode: &CompareMode, require_same: bool| {
+ let examined_path =
+ expected_output_path(&self.testpaths, self.revision, &Some(mode.clone()), kind);
+
+ // If there is no output, there is nothing to do
+ let examined_content = match self.load_expected_output_from_path(&examined_path) {
+ Ok(content) => content,
+ _ => return,
+ };
+
+ let is_duplicate = canon == examined_content;
+
+ match (bless, require_same, is_duplicate) {
+ // If we're blessing and the output is the same, then delete the file.
+ (true, _, true) => {
+ self.delete_file(&examined_path);
+ }
+ // If we want them to be the same, but they are different, then error.
+ // We do this wether we bless or not
+ (_, true, false) => {
+ self.fatal_proc_rec(
+ &format!("`{}` should not have different output from base test!", kind),
+ proc_res,
+ );
+ }
+ _ => {}
+ }
+ };
+ for mode in modes {
+ check_and_prune_duplicate_outputs(mode, false);
+ }
+ for mode in require_same_modes {
+ check_and_prune_duplicate_outputs(mode, true);
+ }
+ }
+ }
+
+ fn create_stamp(&self) {
+ let stamp = crate::stamp(&self.config, self.testpaths, self.revision);
+ fs::write(&stamp, compute_stamp_hash(&self.config)).unwrap();
+ }
+}
+
+struct ProcArgs {
+ prog: String,
+ args: Vec<String>,
+}
+
+pub struct ProcRes {
+ status: ExitStatus,
+ stdout: String,
+ stderr: String,
+ cmdline: String,
+}
+
+impl ProcRes {
+ pub fn print_info(&self) {
+ fn render(name: &str, contents: &str) -> String {
+ let contents = json::extract_rendered(contents);
+ let contents = contents.trim();
+ if contents.is_empty() {
+ format!("{name}: none")
+ } else {
+ format!(
+ "\
+ --- {name} -------------------------------\n\
+ {contents}\n\
+ ------------------------------------------",
+ )
+ }
+ }
+
+ println!(
+ "status: {}\ncommand: {}\n{}\n{}\n",
+ self.status,
+ self.cmdline,
+ render("stdout", &self.stdout),
+ render("stderr", &self.stderr),
+ );
+ }
+
+ pub fn fatal(&self, err: Option<&str>, on_failure: impl FnOnce()) -> ! {
+ if let Some(e) = err {
+ println!("\nerror: {}", e);
+ }
+ self.print_info();
+ on_failure();
+ // Use resume_unwind instead of panic!() to prevent a panic message + backtrace from
+ // compiletest, which is unnecessary noise.
+ std::panic::resume_unwind(Box::new(()));
+ }
+}
+
+#[derive(Debug)]
+enum TargetLocation {
+ ThisFile(PathBuf),
+ ThisDirectory(PathBuf),
+}
+
+enum AllowUnused {
+ Yes,
+ No,
+}
diff --git a/src/tools/compiletest/src/runtest/debugger.rs b/src/tools/compiletest/src/runtest/debugger.rs
new file mode 100644
index 000000000..379ff0bab
--- /dev/null
+++ b/src/tools/compiletest/src/runtest/debugger.rs
@@ -0,0 +1,122 @@
+use crate::common::Config;
+use crate::header::line_directive;
+use crate::runtest::ProcRes;
+
+use std::fs::File;
+use std::io::{BufRead, BufReader};
+use std::path::Path;
+
+pub(super) struct DebuggerCommands {
+ pub commands: Vec<String>,
+ pub check_lines: Vec<String>,
+ pub breakpoint_lines: Vec<usize>,
+}
+
+impl DebuggerCommands {
+ pub(super) fn parse_from(
+ file: &Path,
+ config: &Config,
+ debugger_prefixes: &[&str],
+ rev: Option<&str>,
+ ) -> Result<Self, String> {
+ let directives = debugger_prefixes
+ .iter()
+ .map(|prefix| (format!("{}-command", prefix), format!("{}-check", prefix)))
+ .collect::<Vec<_>>();
+
+ let mut breakpoint_lines = vec![];
+ let mut commands = vec![];
+ let mut check_lines = vec![];
+ let mut counter = 0;
+ let reader = BufReader::new(File::open(file).unwrap());
+ for line in reader.lines() {
+ counter += 1;
+ match line {
+ Ok(line) => {
+ let (lnrev, line) = line_directive("//", &line).unwrap_or((None, &line));
+
+ // Skip any revision specific directive that doesn't match the current
+ // revision being tested
+ if lnrev.is_some() && lnrev != rev {
+ continue;
+ }
+
+ if line.contains("#break") {
+ breakpoint_lines.push(counter);
+ }
+
+ for &(ref command_directive, ref check_directive) in &directives {
+ config
+ .parse_name_value_directive(&line, command_directive)
+ .map(|cmd| commands.push(cmd));
+
+ config
+ .parse_name_value_directive(&line, check_directive)
+ .map(|cmd| check_lines.push(cmd));
+ }
+ }
+ Err(e) => return Err(format!("Error while parsing debugger commands: {}", e)),
+ }
+ }
+
+ Ok(Self { commands, check_lines, breakpoint_lines })
+ }
+}
+
+pub(super) fn check_debugger_output(
+ debugger_run_result: &ProcRes,
+ check_lines: &[String],
+) -> Result<(), String> {
+ let num_check_lines = check_lines.len();
+
+ let mut check_line_index = 0;
+ for line in debugger_run_result.stdout.lines() {
+ if check_line_index >= num_check_lines {
+ break;
+ }
+
+ if check_single_line(line, &(check_lines[check_line_index])[..]) {
+ check_line_index += 1;
+ }
+ }
+ if check_line_index != num_check_lines && num_check_lines > 0 {
+ Err(format!("line not found in debugger output: {}", check_lines[check_line_index]))
+ } else {
+ Ok(())
+ }
+}
+
+fn check_single_line(line: &str, check_line: &str) -> bool {
+ // Allow check lines to leave parts unspecified (e.g., uninitialized
+ // bits in the wrong case of an enum) with the notation "[...]".
+ let line = line.trim();
+ let check_line = check_line.trim();
+ let can_start_anywhere = check_line.starts_with("[...]");
+ let can_end_anywhere = check_line.ends_with("[...]");
+
+ let check_fragments: Vec<&str> =
+ check_line.split("[...]").filter(|frag| !frag.is_empty()).collect();
+ if check_fragments.is_empty() {
+ return true;
+ }
+
+ let (mut rest, first_fragment) = if can_start_anywhere {
+ match line.find(check_fragments[0]) {
+ Some(pos) => (&line[pos + check_fragments[0].len()..], 1),
+ None => return false,
+ }
+ } else {
+ (line, 0)
+ };
+
+ for current_fragment in &check_fragments[first_fragment..] {
+ match rest.find(current_fragment) {
+ Some(pos) => {
+ rest = &rest[pos + current_fragment.len()..];
+ }
+ None => return false,
+ }
+ }
+
+ if !can_end_anywhere && !rest.is_empty() { false } else { true }
+}
diff --git a/src/tools/compiletest/src/runtest/tests.rs b/src/tools/compiletest/src/runtest/tests.rs
new file mode 100644
index 000000000..511051111
--- /dev/null
+++ b/src/tools/compiletest/src/runtest/tests.rs
@@ -0,0 +1,50 @@
+use super::*;
+
+#[test]
+fn normalize_platform_differences() {
+ assert_eq!(TestCx::normalize_platform_differences(r"$DIR\foo.rs"), "$DIR/foo.rs");
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"$BUILD_DIR\..\parser.rs"),
+ "$BUILD_DIR/../parser.rs"
+ );
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"$DIR\bar.rs hello\nworld"),
+ r"$DIR/bar.rs hello\nworld"
+ );
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"either bar\baz.rs or bar\baz\mod.rs"),
+ r"either bar/baz.rs or bar/baz/mod.rs",
+ );
+ assert_eq!(TestCx::normalize_platform_differences(r"`.\some\path.rs`"), r"`./some/path.rs`",);
+ assert_eq!(TestCx::normalize_platform_differences(r"`some\path.rs`"), r"`some/path.rs`",);
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"$DIR\path-with-dashes.rs"),
+ r"$DIR/path-with-dashes.rs"
+ );
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"$DIR\path_with_underscores.rs"),
+ r"$DIR/path_with_underscores.rs",
+ );
+ assert_eq!(TestCx::normalize_platform_differences(r"$DIR\foo.rs:12:11"), "$DIR/foo.rs:12:11",);
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"$DIR\path with spaces 'n' quotes"),
+ "$DIR/path with spaces 'n' quotes",
+ );
+ assert_eq!(
+ TestCx::normalize_platform_differences(r"$DIR\file_with\no_extension"),
+ "$DIR/file_with/no_extension",
+ );
+
+ assert_eq!(TestCx::normalize_platform_differences(r"\n"), r"\n");
+ assert_eq!(TestCx::normalize_platform_differences(r"{ \n"), r"{ \n");
+ assert_eq!(TestCx::normalize_platform_differences(r"`\]`"), r"`\]`");
+ assert_eq!(TestCx::normalize_platform_differences(r#""\{""#), r#""\{""#);
+ assert_eq!(
+ TestCx::normalize_platform_differences(r#"write!(&mut v, "Hello\n")"#),
+ r#"write!(&mut v, "Hello\n")"#
+ );
+ assert_eq!(
+ TestCx::normalize_platform_differences(r#"println!("test\ntest")"#),
+ r#"println!("test\ntest")"#,
+ );
+}
diff --git a/src/tools/compiletest/src/tests.rs b/src/tools/compiletest/src/tests.rs
new file mode 100644
index 000000000..e6725dba2
--- /dev/null
+++ b/src/tools/compiletest/src/tests.rs
@@ -0,0 +1,78 @@
+use super::header::extract_llvm_version;
+use super::*;
+
+#[test]
+fn test_extract_gdb_version() {
+ macro_rules! test { ($($expectation:literal: $input:literal,)*) => {{$(
+ assert_eq!(extract_gdb_version($input), Some($expectation));
+ )*}}}
+
+ test! {
+ 7000001: "GNU gdb (GDB) CentOS 7.0.1-45.el5.centos",
+
+ 7002000: "GNU gdb (GDB) Red Hat Enterprise Linux 7.2-90.el6",
+
+ 7004000: "GNU gdb (Ubuntu/Linaro 7.4-2012.04-0ubuntu2.1) 7.4-2012.04",
+ 7004001: "GNU gdb (GDB) 7.4.1-debian",
+
+ 7006001: "GNU gdb (GDB) Red Hat Enterprise Linux 7.6.1-80.el7",
+
+ 7007001: "GNU gdb (Ubuntu 7.7.1-0ubuntu5~14.04.2) 7.7.1",
+ 7007001: "GNU gdb (Debian 7.7.1+dfsg-5) 7.7.1",
+ 7007001: "GNU gdb (GDB) Fedora 7.7.1-21.fc20",
+
+ 7008000: "GNU gdb (GDB; openSUSE 13.2) 7.8",
+ 7009001: "GNU gdb (GDB) Fedora 7.9.1-20.fc22",
+ 7010001: "GNU gdb (GDB) Fedora 7.10.1-31.fc23",
+
+ 7011000: "GNU gdb (Ubuntu 7.11-0ubuntu1) 7.11",
+ 7011001: "GNU gdb (Ubuntu 7.11.1-0ubuntu1~16.04) 7.11.1",
+ 7011001: "GNU gdb (Debian 7.11.1-2) 7.11.1",
+ 7011001: "GNU gdb (GDB) Fedora 7.11.1-86.fc24",
+ 7011001: "GNU gdb (GDB; openSUSE Leap 42.1) 7.11.1",
+ 7011001: "GNU gdb (GDB; openSUSE Tumbleweed) 7.11.1",
+
+ 7011090: "7.11.90",
+ 7011090: "GNU gdb (Ubuntu 7.11.90.20161005-0ubuntu1) 7.11.90.20161005-git",
+
+ 7012000: "7.12",
+ 7012000: "GNU gdb (GDB) 7.12",
+ 7012000: "GNU gdb (GDB) 7.12.20161027-git",
+ 7012050: "GNU gdb (GDB) 7.12.50.20161027-git",
+
+ 9002000: "GNU gdb (Ubuntu 9.2-0ubuntu1~20.04) 9.2",
+ 10001000: "GNU gdb (GDB) 10.1 [GDB v10.1 for FreeBSD]",
+ }
+}
+
+#[test]
+fn test_extract_lldb_version() {
+ // Apple variants
+ assert_eq!(extract_lldb_version("LLDB-179.5"), Some((179, false)));
+ assert_eq!(extract_lldb_version("lldb-300.2.51"), Some((300, false)));
+
+ // Upstream versions
+ assert_eq!(extract_lldb_version("lldb version 6.0.1"), Some((600, false)));
+ assert_eq!(extract_lldb_version("lldb version 9.0.0"), Some((900, false)));
+}
+
+#[test]
+fn is_test_test() {
+ assert_eq!(true, is_test(&OsString::from("a_test.rs")));
+ assert_eq!(false, is_test(&OsString::from(".a_test.rs")));
+ assert_eq!(false, is_test(&OsString::from("a_cat.gif")));
+ assert_eq!(false, is_test(&OsString::from("#a_dog_gif")));
+ assert_eq!(false, is_test(&OsString::from("~a_temp_file")));
+}
+
+#[test]
+fn test_extract_llvm_version() {
+ assert_eq!(extract_llvm_version("8.1.2-rust"), Some(80102));
+ assert_eq!(extract_llvm_version("9.0.1-rust-1.43.0-dev"), Some(90001));
+ assert_eq!(extract_llvm_version("9.3.1-rust-1.43.0-dev"), Some(90301));
+ assert_eq!(extract_llvm_version("10.0.0-rust"), Some(100000));
+ assert_eq!(extract_llvm_version("11.1.0"), Some(110100));
+ assert_eq!(extract_llvm_version("12.0.0libcxx"), Some(120000));
+ assert_eq!(extract_llvm_version("12.0.0-rc3"), Some(120000));
+ assert_eq!(extract_llvm_version("13.0.0git"), Some(130000));
+}
diff --git a/src/tools/compiletest/src/util.rs b/src/tools/compiletest/src/util.rs
new file mode 100644
index 000000000..22df18ee9
--- /dev/null
+++ b/src/tools/compiletest/src/util.rs
@@ -0,0 +1,259 @@
+use crate::common::Config;
+use std::env;
+use std::ffi::OsStr;
+use std::path::PathBuf;
+
+use tracing::*;
+
+#[cfg(test)]
+mod tests;
+
+/// Conversion table from triple OS name to Rust SYSNAME
+const OS_TABLE: &[(&str, &str)] = &[
+ ("android", "android"),
+ ("androideabi", "android"),
+ ("cuda", "cuda"),
+ ("darwin", "macos"),
+ ("dragonfly", "dragonfly"),
+ ("emscripten", "emscripten"),
+ ("freebsd", "freebsd"),
+ ("fuchsia", "fuchsia"),
+ ("haiku", "haiku"),
+ ("hermit", "hermit"),
+ ("illumos", "illumos"),
+ ("ios", "ios"),
+ ("l4re", "l4re"),
+ ("linux", "linux"),
+ ("mingw32", "windows"),
+ ("none", "none"),
+ ("netbsd", "netbsd"),
+ ("openbsd", "openbsd"),
+ ("redox", "redox"),
+ ("sgx", "sgx"),
+ ("solaris", "solaris"),
+ ("watchos", "watchos"),
+ ("win32", "windows"),
+ ("windows", "windows"),
+ ("vxworks", "vxworks"),
+];
+
+const ARCH_TABLE: &[(&str, &str)] = &[
+ ("aarch64", "aarch64"),
+ ("aarch64_be", "aarch64"),
+ ("amd64", "x86_64"),
+ ("arm", "arm"),
+ ("arm64", "aarch64"),
+ ("armv4t", "arm"),
+ ("armv5te", "arm"),
+ ("armv7", "arm"),
+ ("armv7s", "arm"),
+ ("asmjs", "asmjs"),
+ ("avr", "avr"),
+ ("bpfeb", "bpf"),
+ ("bpfel", "bpf"),
+ ("hexagon", "hexagon"),
+ ("i386", "x86"),
+ ("i586", "x86"),
+ ("i686", "x86"),
+ ("m68k", "m68k"),
+ ("mips", "mips"),
+ ("mips64", "mips64"),
+ ("mips64el", "mips64"),
+ ("mipsisa32r6", "mips"),
+ ("mipsisa32r6el", "mips"),
+ ("mipsisa64r6", "mips64"),
+ ("mipsisa64r6el", "mips64"),
+ ("mipsel", "mips"),
+ ("mipsisa32r6", "mips"),
+ ("mipsisa32r6el", "mips"),
+ ("mipsisa64r6", "mips64"),
+ ("mipsisa64r6el", "mips64"),
+ ("msp430", "msp430"),
+ ("nvptx64", "nvptx64"),
+ ("powerpc", "powerpc"),
+ ("powerpc64", "powerpc64"),
+ ("powerpc64le", "powerpc64"),
+ ("riscv64gc", "riscv64"),
+ ("s390x", "s390x"),
+ ("sparc", "sparc"),
+ ("sparc64", "sparc64"),
+ ("sparcv9", "sparc64"),
+ ("thumbv6m", "thumb"),
+ ("thumbv7em", "thumb"),
+ ("thumbv7m", "thumb"),
+ ("wasm32", "wasm32"),
+ ("x86_64", "x86_64"),
+ ("xcore", "xcore"),
+];
+
+pub const ASAN_SUPPORTED_TARGETS: &[&str] = &[
+ "aarch64-apple-darwin",
+ "aarch64-fuchsia",
+ "aarch64-unknown-linux-gnu",
+ "x86_64-apple-darwin",
+ "x86_64-fuchsia",
+ "x86_64-unknown-freebsd",
+ "x86_64-unknown-linux-gnu",
+];
+
+// FIXME(rcvalle): More targets are likely supported.
+pub const CFI_SUPPORTED_TARGETS: &[&str] = &[
+ "aarch64-apple-darwin",
+ "aarch64-fuchsia",
+ "aarch64-linux-android",
+ "aarch64-unknown-freebsd",
+ "aarch64-unknown-linux-gnu",
+ "x86_64-apple-darwin",
+ "x86_64-fuchsia",
+ "x86_64-pc-solaris",
+ "x86_64-unknown-freebsd",
+ "x86_64-unknown-illumos",
+ "x86_64-unknown-linux-gnu",
+ "x86_64-unknown-linux-musl",
+ "x86_64-unknown-netbsd",
+];
+
+pub const LSAN_SUPPORTED_TARGETS: &[&str] = &[
+ // FIXME: currently broken, see #88132
+ // "aarch64-apple-darwin",
+ "aarch64-unknown-linux-gnu",
+ "x86_64-apple-darwin",
+ "x86_64-unknown-linux-gnu",
+];
+
+pub const MSAN_SUPPORTED_TARGETS: &[&str] =
+ &["aarch64-unknown-linux-gnu", "x86_64-unknown-freebsd", "x86_64-unknown-linux-gnu"];
+
+pub const TSAN_SUPPORTED_TARGETS: &[&str] = &[
+ "aarch64-apple-darwin",
+ "aarch64-unknown-linux-gnu",
+ "x86_64-apple-darwin",
+ "x86_64-unknown-freebsd",
+ "x86_64-unknown-linux-gnu",
+];
+
+pub const HWASAN_SUPPORTED_TARGETS: &[&str] =
+ &["aarch64-linux-android", "aarch64-unknown-linux-gnu"];
+
+pub const MEMTAG_SUPPORTED_TARGETS: &[&str] =
+ &["aarch64-linux-android", "aarch64-unknown-linux-gnu"];
+
+pub const SHADOWCALLSTACK_SUPPORTED_TARGETS: &[&str] = &["aarch64-linux-android"];
+
+const BIG_ENDIAN: &[&str] = &[
+ "aarch64_be",
+ "armebv7r",
+ "mips",
+ "mips64",
+ "mipsisa32r6",
+ "mipsisa64r6",
+ "powerpc",
+ "powerpc64",
+ "s390x",
+ "sparc",
+ "sparc64",
+ "sparcv9",
+];
+
+static ASM_SUPPORTED_ARCHS: &[&str] = &[
+ "x86", "x86_64", "arm", "aarch64", "riscv32",
+ "riscv64",
+ // These targets require an additional asm_experimental_arch feature.
+ // "nvptx64", "hexagon", "mips", "mips64", "spirv", "wasm32",
+];
+
+pub fn has_asm_support(triple: &str) -> bool {
+ ASM_SUPPORTED_ARCHS.contains(&get_arch(triple))
+}
+
+pub fn matches_os(triple: &str, name: &str) -> bool {
+ // For the wasm32 bare target we ignore anything also ignored on emscripten
+ // and then we also recognize `wasm32-bare` as the os for the target
+ if triple == "wasm32-unknown-unknown" {
+ return name == "emscripten" || name == "wasm32-bare";
+ }
+ let triple: Vec<_> = triple.split('-').collect();
+ for &(triple_os, os) in OS_TABLE {
+ if triple.contains(&triple_os) {
+ return os == name;
+ }
+ }
+ panic!("Cannot determine OS from triple");
+}
+
+/// Determine the architecture from `triple`
+pub fn get_arch(triple: &str) -> &'static str {
+ let triple: Vec<_> = triple.split('-').collect();
+ for &(triple_arch, arch) in ARCH_TABLE {
+ if triple.contains(&triple_arch) {
+ return arch;
+ }
+ }
+ panic!("Cannot determine Architecture from triple");
+}
+
+/// Determine the endianness from `triple`
+pub fn is_big_endian(triple: &str) -> bool {
+ let triple_arch = triple.split('-').next().unwrap();
+ BIG_ENDIAN.contains(&triple_arch)
+}
+
+pub fn matches_env(triple: &str, name: &str) -> bool {
+ if let Some(env) = triple.split('-').nth(3) { env.starts_with(name) } else { false }
+}
+
+pub fn get_pointer_width(triple: &str) -> &'static str {
+ if (triple.contains("64") && !triple.ends_with("gnux32") && !triple.ends_with("gnu_ilp32"))
+ || triple.starts_with("s390x")
+ {
+ "64bit"
+ } else if triple.starts_with("avr") {
+ "16bit"
+ } else {
+ "32bit"
+ }
+}
+
+pub fn make_new_path(path: &str) -> String {
+ assert!(cfg!(windows));
+ // Windows just uses PATH as the library search path, so we have to
+ // maintain the current value while adding our own
+ match env::var(lib_path_env_var()) {
+ Ok(curr) => format!("{}{}{}", path, path_div(), curr),
+ Err(..) => path.to_owned(),
+ }
+}
+
+pub fn lib_path_env_var() -> &'static str {
+ "PATH"
+}
+fn path_div() -> &'static str {
+ ";"
+}
+
+pub fn logv(config: &Config, s: String) {
+ debug!("{}", s);
+ if config.verbose {
+ println!("{}", s);
+ }
+}
+
+pub trait PathBufExt {
+ /// Append an extension to the path, even if it already has one.
+ fn with_extra_extension<S: AsRef<OsStr>>(&self, extension: S) -> PathBuf;
+}
+
+impl PathBufExt for PathBuf {
+ fn with_extra_extension<S: AsRef<OsStr>>(&self, extension: S) -> PathBuf {
+ if extension.as_ref().is_empty() {
+ self.clone()
+ } else {
+ let mut fname = self.file_name().unwrap().to_os_string();
+ if !extension.as_ref().to_str().unwrap().starts_with('.') {
+ fname.push(".");
+ }
+ fname.push(extension);
+ self.with_file_name(fname)
+ }
+ }
+}
diff --git a/src/tools/compiletest/src/util/tests.rs b/src/tools/compiletest/src/util/tests.rs
new file mode 100644
index 000000000..663027173
--- /dev/null
+++ b/src/tools/compiletest/src/util/tests.rs
@@ -0,0 +1,51 @@
+use super::*;
+
+#[test]
+#[should_panic(expected = "Cannot determine Architecture from triple")]
+fn test_get_arch_failure() {
+ get_arch("abc");
+}
+
+#[test]
+fn test_get_arch() {
+ assert_eq!("x86_64", get_arch("x86_64-unknown-linux-gnu"));
+ assert_eq!("x86_64", get_arch("amd64"));
+ assert_eq!("nvptx64", get_arch("nvptx64-nvidia-cuda"));
+}
+
+#[test]
+#[should_panic(expected = "Cannot determine OS from triple")]
+fn test_matches_os_failure() {
+ matches_os("abc", "abc");
+}
+
+#[test]
+fn test_matches_os() {
+ assert!(matches_os("x86_64-unknown-linux-gnu", "linux"));
+ assert!(matches_os("wasm32-unknown-unknown", "emscripten"));
+ assert!(matches_os("wasm32-unknown-unknown", "wasm32-bare"));
+ assert!(!matches_os("wasm32-unknown-unknown", "windows"));
+ assert!(matches_os("thumbv6m0-none-eabi", "none"));
+ assert!(matches_os("riscv32imc-unknown-none-elf", "none"));
+ assert!(matches_os("nvptx64-nvidia-cuda", "cuda"));
+ assert!(matches_os("x86_64-fortanix-unknown-sgx", "sgx"));
+}
+
+#[test]
+fn is_big_endian_test() {
+ assert!(!is_big_endian("no"));
+ assert!(is_big_endian("sparc-unknown-unknown"));
+}
+
+#[test]
+fn path_buf_with_extra_extension_test() {
+ assert_eq!(
+ PathBuf::from("foo.rs.stderr"),
+ PathBuf::from("foo.rs").with_extra_extension("stderr")
+ );
+ assert_eq!(
+ PathBuf::from("foo.rs.stderr"),
+ PathBuf::from("foo.rs").with_extra_extension(".stderr")
+ );
+ assert_eq!(PathBuf::from("foo.rs"), PathBuf::from("foo.rs").with_extra_extension(""));
+}